repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModel.from_yaml | def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a DiscreteChoiceModel instance from a saved YAML configuration.
Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
MNLDiscreteChoiceModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
model = cls(
cfg['model_expression'],
cfg['sample_size'],
probability_mode=cfg.get('probability_mode', 'full_product'),
choice_mode=cfg.get('choice_mode', 'individual'),
choosers_fit_filters=cfg.get('choosers_fit_filters', None),
choosers_predict_filters=cfg.get('choosers_predict_filters', None),
alts_fit_filters=cfg.get('alts_fit_filters', None),
alts_predict_filters=cfg.get('alts_predict_filters', None),
interaction_predict_filters=cfg.get(
'interaction_predict_filters', None),
estimation_sample_size=cfg.get('estimation_sample_size', None),
prediction_sample_size=cfg.get('prediction_sample_size', None),
choice_column=cfg.get('choice_column', None),
name=cfg.get('name', None)
)
if cfg.get('log_likelihoods', None):
model.log_likelihoods = cfg['log_likelihoods']
if cfg.get('fit_parameters', None):
model.fit_parameters = pd.DataFrame(cfg['fit_parameters'])
logger.debug('loaded LCM model {} from YAML'.format(model.name))
return model | python | def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a DiscreteChoiceModel instance from a saved YAML configuration.
Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
MNLDiscreteChoiceModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
model = cls(
cfg['model_expression'],
cfg['sample_size'],
probability_mode=cfg.get('probability_mode', 'full_product'),
choice_mode=cfg.get('choice_mode', 'individual'),
choosers_fit_filters=cfg.get('choosers_fit_filters', None),
choosers_predict_filters=cfg.get('choosers_predict_filters', None),
alts_fit_filters=cfg.get('alts_fit_filters', None),
alts_predict_filters=cfg.get('alts_predict_filters', None),
interaction_predict_filters=cfg.get(
'interaction_predict_filters', None),
estimation_sample_size=cfg.get('estimation_sample_size', None),
prediction_sample_size=cfg.get('prediction_sample_size', None),
choice_column=cfg.get('choice_column', None),
name=cfg.get('name', None)
)
if cfg.get('log_likelihoods', None):
model.log_likelihoods = cfg['log_likelihoods']
if cfg.get('fit_parameters', None):
model.fit_parameters = pd.DataFrame(cfg['fit_parameters'])
logger.debug('loaded LCM model {} from YAML'.format(model.name))
return model | [
"def",
"from_yaml",
"(",
"cls",
",",
"yaml_str",
"=",
"None",
",",
"str_or_buffer",
"=",
"None",
")",
":",
"cfg",
"=",
"yamlio",
".",
"yaml_to_dict",
"(",
"yaml_str",
",",
"str_or_buffer",
")",
"model",
"=",
"cls",
"(",
"cfg",
"[",
"'model_expression'",
"]",
",",
"cfg",
"[",
"'sample_size'",
"]",
",",
"probability_mode",
"=",
"cfg",
".",
"get",
"(",
"'probability_mode'",
",",
"'full_product'",
")",
",",
"choice_mode",
"=",
"cfg",
".",
"get",
"(",
"'choice_mode'",
",",
"'individual'",
")",
",",
"choosers_fit_filters",
"=",
"cfg",
".",
"get",
"(",
"'choosers_fit_filters'",
",",
"None",
")",
",",
"choosers_predict_filters",
"=",
"cfg",
".",
"get",
"(",
"'choosers_predict_filters'",
",",
"None",
")",
",",
"alts_fit_filters",
"=",
"cfg",
".",
"get",
"(",
"'alts_fit_filters'",
",",
"None",
")",
",",
"alts_predict_filters",
"=",
"cfg",
".",
"get",
"(",
"'alts_predict_filters'",
",",
"None",
")",
",",
"interaction_predict_filters",
"=",
"cfg",
".",
"get",
"(",
"'interaction_predict_filters'",
",",
"None",
")",
",",
"estimation_sample_size",
"=",
"cfg",
".",
"get",
"(",
"'estimation_sample_size'",
",",
"None",
")",
",",
"prediction_sample_size",
"=",
"cfg",
".",
"get",
"(",
"'prediction_sample_size'",
",",
"None",
")",
",",
"choice_column",
"=",
"cfg",
".",
"get",
"(",
"'choice_column'",
",",
"None",
")",
",",
"name",
"=",
"cfg",
".",
"get",
"(",
"'name'",
",",
"None",
")",
")",
"if",
"cfg",
".",
"get",
"(",
"'log_likelihoods'",
",",
"None",
")",
":",
"model",
".",
"log_likelihoods",
"=",
"cfg",
"[",
"'log_likelihoods'",
"]",
"if",
"cfg",
".",
"get",
"(",
"'fit_parameters'",
",",
"None",
")",
":",
"model",
".",
"fit_parameters",
"=",
"pd",
".",
"DataFrame",
"(",
"cfg",
"[",
"'fit_parameters'",
"]",
")",
"logger",
".",
"debug",
"(",
"'loaded LCM model {} from YAML'",
".",
"format",
"(",
"model",
".",
"name",
")",
")",
"return",
"model"
] | Create a DiscreteChoiceModel instance from a saved YAML configuration.
Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
MNLDiscreteChoiceModel | [
"Create",
"a",
"DiscreteChoiceModel",
"instance",
"from",
"a",
"saved",
"YAML",
"configuration",
".",
"Arguments",
"are",
"mutally",
"exclusive",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L278-L320 | train | 235,800 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModel.fit | def fit(self, choosers, alternatives, current_choice):
"""
Fit and save model parameters based on given data.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice : pandas.Series or any
A Series describing the `alternatives` currently chosen
by the `choosers`. Should have an index matching `choosers`
and values matching the index of `alternatives`.
If a non-Series is given it should be a column in `choosers`.
Returns
-------
log_likelihoods : dict
Dict of log-liklihood values describing the quality of the
model fit. Will have keys 'null', 'convergence', and 'ratio'.
"""
logger.debug('start: fit LCM model {}'.format(self.name))
if not isinstance(current_choice, pd.Series):
current_choice = choosers[current_choice]
choosers, alternatives = self.apply_fit_filters(choosers, alternatives)
if self.estimation_sample_size:
choosers = choosers.loc[np.random.choice(
choosers.index,
min(self.estimation_sample_size, len(choosers)),
replace=False)]
current_choice = current_choice.loc[choosers.index]
_, merged, chosen = interaction.mnl_interaction_dataset(
choosers, alternatives, self.sample_size, current_choice)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.as_matrix().shape[0]:
raise ModelEvaluationError(
'Estimated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
self.log_likelihoods, self.fit_parameters = mnl.mnl_estimate(
model_design.as_matrix(), chosen, self.sample_size)
self.fit_parameters.index = model_design.columns
logger.debug('finish: fit LCM model {}'.format(self.name))
return self.log_likelihoods | python | def fit(self, choosers, alternatives, current_choice):
"""
Fit and save model parameters based on given data.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice : pandas.Series or any
A Series describing the `alternatives` currently chosen
by the `choosers`. Should have an index matching `choosers`
and values matching the index of `alternatives`.
If a non-Series is given it should be a column in `choosers`.
Returns
-------
log_likelihoods : dict
Dict of log-liklihood values describing the quality of the
model fit. Will have keys 'null', 'convergence', and 'ratio'.
"""
logger.debug('start: fit LCM model {}'.format(self.name))
if not isinstance(current_choice, pd.Series):
current_choice = choosers[current_choice]
choosers, alternatives = self.apply_fit_filters(choosers, alternatives)
if self.estimation_sample_size:
choosers = choosers.loc[np.random.choice(
choosers.index,
min(self.estimation_sample_size, len(choosers)),
replace=False)]
current_choice = current_choice.loc[choosers.index]
_, merged, chosen = interaction.mnl_interaction_dataset(
choosers, alternatives, self.sample_size, current_choice)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.as_matrix().shape[0]:
raise ModelEvaluationError(
'Estimated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
self.log_likelihoods, self.fit_parameters = mnl.mnl_estimate(
model_design.as_matrix(), chosen, self.sample_size)
self.fit_parameters.index = model_design.columns
logger.debug('finish: fit LCM model {}'.format(self.name))
return self.log_likelihoods | [
"def",
"fit",
"(",
"self",
",",
"choosers",
",",
"alternatives",
",",
"current_choice",
")",
":",
"logger",
".",
"debug",
"(",
"'start: fit LCM model {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"if",
"not",
"isinstance",
"(",
"current_choice",
",",
"pd",
".",
"Series",
")",
":",
"current_choice",
"=",
"choosers",
"[",
"current_choice",
"]",
"choosers",
",",
"alternatives",
"=",
"self",
".",
"apply_fit_filters",
"(",
"choosers",
",",
"alternatives",
")",
"if",
"self",
".",
"estimation_sample_size",
":",
"choosers",
"=",
"choosers",
".",
"loc",
"[",
"np",
".",
"random",
".",
"choice",
"(",
"choosers",
".",
"index",
",",
"min",
"(",
"self",
".",
"estimation_sample_size",
",",
"len",
"(",
"choosers",
")",
")",
",",
"replace",
"=",
"False",
")",
"]",
"current_choice",
"=",
"current_choice",
".",
"loc",
"[",
"choosers",
".",
"index",
"]",
"_",
",",
"merged",
",",
"chosen",
"=",
"interaction",
".",
"mnl_interaction_dataset",
"(",
"choosers",
",",
"alternatives",
",",
"self",
".",
"sample_size",
",",
"current_choice",
")",
"model_design",
"=",
"dmatrix",
"(",
"self",
".",
"str_model_expression",
",",
"data",
"=",
"merged",
",",
"return_type",
"=",
"'dataframe'",
")",
"if",
"len",
"(",
"merged",
")",
"!=",
"model_design",
".",
"as_matrix",
"(",
")",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ModelEvaluationError",
"(",
"'Estimated data does not have the same length as input. '",
"'This suggests there are null values in one or more of '",
"'the input columns.'",
")",
"self",
".",
"log_likelihoods",
",",
"self",
".",
"fit_parameters",
"=",
"mnl",
".",
"mnl_estimate",
"(",
"model_design",
".",
"as_matrix",
"(",
")",
",",
"chosen",
",",
"self",
".",
"sample_size",
")",
"self",
".",
"fit_parameters",
".",
"index",
"=",
"model_design",
".",
"columns",
"logger",
".",
"debug",
"(",
"'finish: fit LCM model {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"return",
"self",
".",
"log_likelihoods"
] | Fit and save model parameters based on given data.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice : pandas.Series or any
A Series describing the `alternatives` currently chosen
by the `choosers`. Should have an index matching `choosers`
and values matching the index of `alternatives`.
If a non-Series is given it should be a column in `choosers`.
Returns
-------
log_likelihoods : dict
Dict of log-liklihood values describing the quality of the
model fit. Will have keys 'null', 'convergence', and 'ratio'. | [
"Fit",
"and",
"save",
"model",
"parameters",
"based",
"on",
"given",
"data",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L371-L427 | train | 235,801 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModel.probabilities | def probabilities(self, choosers, alternatives, filter_tables=True):
"""
Returns the probabilities for a set of choosers to choose
from among a set of alternatives.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
filter_tables : bool, optional
If True, filter `choosers` and `alternatives` with prediction
filters before calculating probabilities.
Returns
-------
probabilities : pandas.Series
Probability of selection associated with each chooser
and alternative. Index will be a MultiIndex with alternative
IDs in the inner index and chooser IDs in the out index.
"""
logger.debug('start: calculate probabilities for LCM model {}'.format(
self.name))
self.assert_fitted()
if filter_tables:
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if self.prediction_sample_size is not None:
sample_size = self.prediction_sample_size
else:
sample_size = len(alternatives)
if self.probability_mode == 'single_chooser':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers.head(1), alternatives, sample_size)
elif self.probability_mode == 'full_product':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers, alternatives, sample_size)
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode))
merged = util.apply_filter_query(
merged, self.interaction_predict_filters)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.as_matrix().shape[0]:
raise ModelEvaluationError(
'Simulated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
# get the order of the coefficients in the same order as the
# columns in the design matrix
coeffs = [self.fit_parameters['Coefficient'][x]
for x in model_design.columns]
# probabilities are returned from mnl_simulate as a 2d array
# with choosers along rows and alternatives along columns
if self.probability_mode == 'single_chooser':
numalts = len(merged)
else:
numalts = sample_size
probabilities = mnl.mnl_simulate(
model_design.as_matrix(),
coeffs,
numalts=numalts, returnprobs=True)
# want to turn probabilities into a Series with a MultiIndex
# of chooser IDs and alternative IDs.
# indexing by chooser ID will get you the probabilities
# across alternatives for that chooser
mi = pd.MultiIndex.from_arrays(
[merged['join_index'].values, merged.index.values],
names=('chooser_id', 'alternative_id'))
probabilities = pd.Series(probabilities.flatten(), index=mi)
logger.debug('finish: calculate probabilities for LCM model {}'.format(
self.name))
return probabilities | python | def probabilities(self, choosers, alternatives, filter_tables=True):
"""
Returns the probabilities for a set of choosers to choose
from among a set of alternatives.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
filter_tables : bool, optional
If True, filter `choosers` and `alternatives` with prediction
filters before calculating probabilities.
Returns
-------
probabilities : pandas.Series
Probability of selection associated with each chooser
and alternative. Index will be a MultiIndex with alternative
IDs in the inner index and chooser IDs in the out index.
"""
logger.debug('start: calculate probabilities for LCM model {}'.format(
self.name))
self.assert_fitted()
if filter_tables:
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if self.prediction_sample_size is not None:
sample_size = self.prediction_sample_size
else:
sample_size = len(alternatives)
if self.probability_mode == 'single_chooser':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers.head(1), alternatives, sample_size)
elif self.probability_mode == 'full_product':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers, alternatives, sample_size)
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode))
merged = util.apply_filter_query(
merged, self.interaction_predict_filters)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.as_matrix().shape[0]:
raise ModelEvaluationError(
'Simulated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
# get the order of the coefficients in the same order as the
# columns in the design matrix
coeffs = [self.fit_parameters['Coefficient'][x]
for x in model_design.columns]
# probabilities are returned from mnl_simulate as a 2d array
# with choosers along rows and alternatives along columns
if self.probability_mode == 'single_chooser':
numalts = len(merged)
else:
numalts = sample_size
probabilities = mnl.mnl_simulate(
model_design.as_matrix(),
coeffs,
numalts=numalts, returnprobs=True)
# want to turn probabilities into a Series with a MultiIndex
# of chooser IDs and alternative IDs.
# indexing by chooser ID will get you the probabilities
# across alternatives for that chooser
mi = pd.MultiIndex.from_arrays(
[merged['join_index'].values, merged.index.values],
names=('chooser_id', 'alternative_id'))
probabilities = pd.Series(probabilities.flatten(), index=mi)
logger.debug('finish: calculate probabilities for LCM model {}'.format(
self.name))
return probabilities | [
"def",
"probabilities",
"(",
"self",
",",
"choosers",
",",
"alternatives",
",",
"filter_tables",
"=",
"True",
")",
":",
"logger",
".",
"debug",
"(",
"'start: calculate probabilities for LCM model {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"self",
".",
"assert_fitted",
"(",
")",
"if",
"filter_tables",
":",
"choosers",
",",
"alternatives",
"=",
"self",
".",
"apply_predict_filters",
"(",
"choosers",
",",
"alternatives",
")",
"if",
"self",
".",
"prediction_sample_size",
"is",
"not",
"None",
":",
"sample_size",
"=",
"self",
".",
"prediction_sample_size",
"else",
":",
"sample_size",
"=",
"len",
"(",
"alternatives",
")",
"if",
"self",
".",
"probability_mode",
"==",
"'single_chooser'",
":",
"_",
",",
"merged",
",",
"_",
"=",
"interaction",
".",
"mnl_interaction_dataset",
"(",
"choosers",
".",
"head",
"(",
"1",
")",
",",
"alternatives",
",",
"sample_size",
")",
"elif",
"self",
".",
"probability_mode",
"==",
"'full_product'",
":",
"_",
",",
"merged",
",",
"_",
"=",
"interaction",
".",
"mnl_interaction_dataset",
"(",
"choosers",
",",
"alternatives",
",",
"sample_size",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unrecognized probability_mode option: {}'",
".",
"format",
"(",
"self",
".",
"probability_mode",
")",
")",
"merged",
"=",
"util",
".",
"apply_filter_query",
"(",
"merged",
",",
"self",
".",
"interaction_predict_filters",
")",
"model_design",
"=",
"dmatrix",
"(",
"self",
".",
"str_model_expression",
",",
"data",
"=",
"merged",
",",
"return_type",
"=",
"'dataframe'",
")",
"if",
"len",
"(",
"merged",
")",
"!=",
"model_design",
".",
"as_matrix",
"(",
")",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ModelEvaluationError",
"(",
"'Simulated data does not have the same length as input. '",
"'This suggests there are null values in one or more of '",
"'the input columns.'",
")",
"# get the order of the coefficients in the same order as the",
"# columns in the design matrix",
"coeffs",
"=",
"[",
"self",
".",
"fit_parameters",
"[",
"'Coefficient'",
"]",
"[",
"x",
"]",
"for",
"x",
"in",
"model_design",
".",
"columns",
"]",
"# probabilities are returned from mnl_simulate as a 2d array",
"# with choosers along rows and alternatives along columns",
"if",
"self",
".",
"probability_mode",
"==",
"'single_chooser'",
":",
"numalts",
"=",
"len",
"(",
"merged",
")",
"else",
":",
"numalts",
"=",
"sample_size",
"probabilities",
"=",
"mnl",
".",
"mnl_simulate",
"(",
"model_design",
".",
"as_matrix",
"(",
")",
",",
"coeffs",
",",
"numalts",
"=",
"numalts",
",",
"returnprobs",
"=",
"True",
")",
"# want to turn probabilities into a Series with a MultiIndex",
"# of chooser IDs and alternative IDs.",
"# indexing by chooser ID will get you the probabilities",
"# across alternatives for that chooser",
"mi",
"=",
"pd",
".",
"MultiIndex",
".",
"from_arrays",
"(",
"[",
"merged",
"[",
"'join_index'",
"]",
".",
"values",
",",
"merged",
".",
"index",
".",
"values",
"]",
",",
"names",
"=",
"(",
"'chooser_id'",
",",
"'alternative_id'",
")",
")",
"probabilities",
"=",
"pd",
".",
"Series",
"(",
"probabilities",
".",
"flatten",
"(",
")",
",",
"index",
"=",
"mi",
")",
"logger",
".",
"debug",
"(",
"'finish: calculate probabilities for LCM model {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"return",
"probabilities"
] | Returns the probabilities for a set of choosers to choose
from among a set of alternatives.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
filter_tables : bool, optional
If True, filter `choosers` and `alternatives` with prediction
filters before calculating probabilities.
Returns
-------
probabilities : pandas.Series
Probability of selection associated with each chooser
and alternative. Index will be a MultiIndex with alternative
IDs in the inner index and chooser IDs in the out index. | [
"Returns",
"the",
"probabilities",
"for",
"a",
"set",
"of",
"choosers",
"to",
"choose",
"from",
"among",
"a",
"set",
"of",
"alternatives",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L474-L560 | train | 235,802 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModel.summed_probabilities | def summed_probabilities(self, choosers, alternatives):
"""
Calculate total probability associated with each alternative.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Total probability associated with each alternative.
"""
def normalize(s):
return s / s.sum()
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
probs = self.probabilities(choosers, alternatives, filter_tables=False)
# groupby the the alternatives ID and sum
if self.probability_mode == 'single_chooser':
return (
normalize(probs) * len(choosers)
).reset_index(level=0, drop=True)
elif self.probability_mode == 'full_product':
return probs.groupby(level=0).apply(normalize)\
.groupby(level=1).sum()
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode)) | python | def summed_probabilities(self, choosers, alternatives):
"""
Calculate total probability associated with each alternative.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Total probability associated with each alternative.
"""
def normalize(s):
return s / s.sum()
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
probs = self.probabilities(choosers, alternatives, filter_tables=False)
# groupby the the alternatives ID and sum
if self.probability_mode == 'single_chooser':
return (
normalize(probs) * len(choosers)
).reset_index(level=0, drop=True)
elif self.probability_mode == 'full_product':
return probs.groupby(level=0).apply(normalize)\
.groupby(level=1).sum()
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode)) | [
"def",
"summed_probabilities",
"(",
"self",
",",
"choosers",
",",
"alternatives",
")",
":",
"def",
"normalize",
"(",
"s",
")",
":",
"return",
"s",
"/",
"s",
".",
"sum",
"(",
")",
"choosers",
",",
"alternatives",
"=",
"self",
".",
"apply_predict_filters",
"(",
"choosers",
",",
"alternatives",
")",
"probs",
"=",
"self",
".",
"probabilities",
"(",
"choosers",
",",
"alternatives",
",",
"filter_tables",
"=",
"False",
")",
"# groupby the the alternatives ID and sum",
"if",
"self",
".",
"probability_mode",
"==",
"'single_chooser'",
":",
"return",
"(",
"normalize",
"(",
"probs",
")",
"*",
"len",
"(",
"choosers",
")",
")",
".",
"reset_index",
"(",
"level",
"=",
"0",
",",
"drop",
"=",
"True",
")",
"elif",
"self",
".",
"probability_mode",
"==",
"'full_product'",
":",
"return",
"probs",
".",
"groupby",
"(",
"level",
"=",
"0",
")",
".",
"apply",
"(",
"normalize",
")",
".",
"groupby",
"(",
"level",
"=",
"1",
")",
".",
"sum",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unrecognized probability_mode option: {}'",
".",
"format",
"(",
"self",
".",
"probability_mode",
")",
")"
] | Calculate total probability associated with each alternative.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Total probability associated with each alternative. | [
"Calculate",
"total",
"probability",
"associated",
"with",
"each",
"alternative",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L562-L597 | train | 235,803 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModel.predict | def predict(self, choosers, alternatives, debug=False):
"""
Choose from among alternatives for a group of agents.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
debug : bool
If debug is set to true, will set the variable "sim_pdf" on
the object to store the probabilities for mapping of the
outcome.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
"""
self.assert_fitted()
logger.debug('start: predict LCM model {}'.format(self.name))
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if len(choosers) == 0:
return pd.Series()
if len(alternatives) == 0:
return pd.Series(index=choosers.index)
probabilities = self.probabilities(
choosers, alternatives, filter_tables=False)
if debug:
self.sim_pdf = probabilities
if self.choice_mode == 'aggregate':
choices = unit_choice(
choosers.index.values,
probabilities.index.get_level_values('alternative_id').values,
probabilities.values)
elif self.choice_mode == 'individual':
def mkchoice(probs):
probs.reset_index(0, drop=True, inplace=True)
return np.random.choice(
probs.index.values, p=probs.values / probs.sum())
choices = probabilities.groupby(level='chooser_id', sort=False)\
.apply(mkchoice)
else:
raise ValueError(
'Unrecognized choice_mode option: {}'.format(self.choice_mode))
logger.debug('finish: predict LCM model {}'.format(self.name))
return choices | python | def predict(self, choosers, alternatives, debug=False):
"""
Choose from among alternatives for a group of agents.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
debug : bool
If debug is set to true, will set the variable "sim_pdf" on
the object to store the probabilities for mapping of the
outcome.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
"""
self.assert_fitted()
logger.debug('start: predict LCM model {}'.format(self.name))
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if len(choosers) == 0:
return pd.Series()
if len(alternatives) == 0:
return pd.Series(index=choosers.index)
probabilities = self.probabilities(
choosers, alternatives, filter_tables=False)
if debug:
self.sim_pdf = probabilities
if self.choice_mode == 'aggregate':
choices = unit_choice(
choosers.index.values,
probabilities.index.get_level_values('alternative_id').values,
probabilities.values)
elif self.choice_mode == 'individual':
def mkchoice(probs):
probs.reset_index(0, drop=True, inplace=True)
return np.random.choice(
probs.index.values, p=probs.values / probs.sum())
choices = probabilities.groupby(level='chooser_id', sort=False)\
.apply(mkchoice)
else:
raise ValueError(
'Unrecognized choice_mode option: {}'.format(self.choice_mode))
logger.debug('finish: predict LCM model {}'.format(self.name))
return choices | [
"def",
"predict",
"(",
"self",
",",
"choosers",
",",
"alternatives",
",",
"debug",
"=",
"False",
")",
":",
"self",
".",
"assert_fitted",
"(",
")",
"logger",
".",
"debug",
"(",
"'start: predict LCM model {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"choosers",
",",
"alternatives",
"=",
"self",
".",
"apply_predict_filters",
"(",
"choosers",
",",
"alternatives",
")",
"if",
"len",
"(",
"choosers",
")",
"==",
"0",
":",
"return",
"pd",
".",
"Series",
"(",
")",
"if",
"len",
"(",
"alternatives",
")",
"==",
"0",
":",
"return",
"pd",
".",
"Series",
"(",
"index",
"=",
"choosers",
".",
"index",
")",
"probabilities",
"=",
"self",
".",
"probabilities",
"(",
"choosers",
",",
"alternatives",
",",
"filter_tables",
"=",
"False",
")",
"if",
"debug",
":",
"self",
".",
"sim_pdf",
"=",
"probabilities",
"if",
"self",
".",
"choice_mode",
"==",
"'aggregate'",
":",
"choices",
"=",
"unit_choice",
"(",
"choosers",
".",
"index",
".",
"values",
",",
"probabilities",
".",
"index",
".",
"get_level_values",
"(",
"'alternative_id'",
")",
".",
"values",
",",
"probabilities",
".",
"values",
")",
"elif",
"self",
".",
"choice_mode",
"==",
"'individual'",
":",
"def",
"mkchoice",
"(",
"probs",
")",
":",
"probs",
".",
"reset_index",
"(",
"0",
",",
"drop",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"return",
"np",
".",
"random",
".",
"choice",
"(",
"probs",
".",
"index",
".",
"values",
",",
"p",
"=",
"probs",
".",
"values",
"/",
"probs",
".",
"sum",
"(",
")",
")",
"choices",
"=",
"probabilities",
".",
"groupby",
"(",
"level",
"=",
"'chooser_id'",
",",
"sort",
"=",
"False",
")",
".",
"apply",
"(",
"mkchoice",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unrecognized choice_mode option: {}'",
".",
"format",
"(",
"self",
".",
"choice_mode",
")",
")",
"logger",
".",
"debug",
"(",
"'finish: predict LCM model {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"return",
"choices"
] | Choose from among alternatives for a group of agents.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
debug : bool
If debug is set to true, will set the variable "sim_pdf" on
the object to store the probabilities for mapping of the
outcome.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers. | [
"Choose",
"from",
"among",
"alternatives",
"for",
"a",
"group",
"of",
"agents",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L599-L657 | train | 235,804 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModel.to_dict | def to_dict(self):
"""
Return a dict respresentation of an MNLDiscreteChoiceModel
instance.
"""
return {
'model_type': 'discretechoice',
'model_expression': self.model_expression,
'sample_size': self.sample_size,
'name': self.name,
'probability_mode': self.probability_mode,
'choice_mode': self.choice_mode,
'choosers_fit_filters': self.choosers_fit_filters,
'choosers_predict_filters': self.choosers_predict_filters,
'alts_fit_filters': self.alts_fit_filters,
'alts_predict_filters': self.alts_predict_filters,
'interaction_predict_filters': self.interaction_predict_filters,
'estimation_sample_size': self.estimation_sample_size,
'prediction_sample_size': self.prediction_sample_size,
'choice_column': self.choice_column,
'fitted': self.fitted,
'log_likelihoods': self.log_likelihoods,
'fit_parameters': (yamlio.frame_to_yaml_safe(self.fit_parameters)
if self.fitted else None)
} | python | def to_dict(self):
"""
Return a dict respresentation of an MNLDiscreteChoiceModel
instance.
"""
return {
'model_type': 'discretechoice',
'model_expression': self.model_expression,
'sample_size': self.sample_size,
'name': self.name,
'probability_mode': self.probability_mode,
'choice_mode': self.choice_mode,
'choosers_fit_filters': self.choosers_fit_filters,
'choosers_predict_filters': self.choosers_predict_filters,
'alts_fit_filters': self.alts_fit_filters,
'alts_predict_filters': self.alts_predict_filters,
'interaction_predict_filters': self.interaction_predict_filters,
'estimation_sample_size': self.estimation_sample_size,
'prediction_sample_size': self.prediction_sample_size,
'choice_column': self.choice_column,
'fitted': self.fitted,
'log_likelihoods': self.log_likelihoods,
'fit_parameters': (yamlio.frame_to_yaml_safe(self.fit_parameters)
if self.fitted else None)
} | [
"def",
"to_dict",
"(",
"self",
")",
":",
"return",
"{",
"'model_type'",
":",
"'discretechoice'",
",",
"'model_expression'",
":",
"self",
".",
"model_expression",
",",
"'sample_size'",
":",
"self",
".",
"sample_size",
",",
"'name'",
":",
"self",
".",
"name",
",",
"'probability_mode'",
":",
"self",
".",
"probability_mode",
",",
"'choice_mode'",
":",
"self",
".",
"choice_mode",
",",
"'choosers_fit_filters'",
":",
"self",
".",
"choosers_fit_filters",
",",
"'choosers_predict_filters'",
":",
"self",
".",
"choosers_predict_filters",
",",
"'alts_fit_filters'",
":",
"self",
".",
"alts_fit_filters",
",",
"'alts_predict_filters'",
":",
"self",
".",
"alts_predict_filters",
",",
"'interaction_predict_filters'",
":",
"self",
".",
"interaction_predict_filters",
",",
"'estimation_sample_size'",
":",
"self",
".",
"estimation_sample_size",
",",
"'prediction_sample_size'",
":",
"self",
".",
"prediction_sample_size",
",",
"'choice_column'",
":",
"self",
".",
"choice_column",
",",
"'fitted'",
":",
"self",
".",
"fitted",
",",
"'log_likelihoods'",
":",
"self",
".",
"log_likelihoods",
",",
"'fit_parameters'",
":",
"(",
"yamlio",
".",
"frame_to_yaml_safe",
"(",
"self",
".",
"fit_parameters",
")",
"if",
"self",
".",
"fitted",
"else",
"None",
")",
"}"
] | Return a dict respresentation of an MNLDiscreteChoiceModel
instance. | [
"Return",
"a",
"dict",
"respresentation",
"of",
"an",
"MNLDiscreteChoiceModel",
"instance",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L659-L684 | train | 235,805 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModel.choosers_columns_used | def choosers_columns_used(self):
"""
Columns from the choosers table that are used for filtering.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.choosers_predict_filters),
util.columns_in_filters(self.choosers_fit_filters)))) | python | def choosers_columns_used(self):
"""
Columns from the choosers table that are used for filtering.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.choosers_predict_filters),
util.columns_in_filters(self.choosers_fit_filters)))) | [
"def",
"choosers_columns_used",
"(",
"self",
")",
":",
"return",
"list",
"(",
"tz",
".",
"unique",
"(",
"tz",
".",
"concatv",
"(",
"util",
".",
"columns_in_filters",
"(",
"self",
".",
"choosers_predict_filters",
")",
",",
"util",
".",
"columns_in_filters",
"(",
"self",
".",
"choosers_fit_filters",
")",
")",
")",
")"
] | Columns from the choosers table that are used for filtering. | [
"Columns",
"from",
"the",
"choosers",
"table",
"that",
"are",
"used",
"for",
"filtering",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L712-L719 | train | 235,806 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModel.interaction_columns_used | def interaction_columns_used(self):
"""
Columns from the interaction dataset used for filtering and in
the model. These may come originally from either the choosers or
alternatives tables.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.interaction_predict_filters),
util.columns_in_formula(self.model_expression)))) | python | def interaction_columns_used(self):
"""
Columns from the interaction dataset used for filtering and in
the model. These may come originally from either the choosers or
alternatives tables.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.interaction_predict_filters),
util.columns_in_formula(self.model_expression)))) | [
"def",
"interaction_columns_used",
"(",
"self",
")",
":",
"return",
"list",
"(",
"tz",
".",
"unique",
"(",
"tz",
".",
"concatv",
"(",
"util",
".",
"columns_in_filters",
"(",
"self",
".",
"interaction_predict_filters",
")",
",",
"util",
".",
"columns_in_formula",
"(",
"self",
".",
"model_expression",
")",
")",
")",
")"
] | Columns from the interaction dataset used for filtering and in
the model. These may come originally from either the choosers or
alternatives tables. | [
"Columns",
"from",
"the",
"interaction",
"dataset",
"used",
"for",
"filtering",
"and",
"in",
"the",
"model",
".",
"These",
"may",
"come",
"originally",
"from",
"either",
"the",
"choosers",
"or",
"alternatives",
"tables",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L730-L739 | train | 235,807 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModel.predict_from_cfg | def predict_from_cfg(cls, choosers, alternatives, cfgname=None, cfg=None,
alternative_ratio=2.0, debug=False):
"""
Simulate choices for the specified choosers
Parameters
----------
choosers : DataFrame
A dataframe of agents doing the choosing.
alternatives : DataFrame
A dataframe of locations which the choosers are locating in and
which have a supply.
cfgname : string
The name of the yaml config file from which to read the discrete
choice model.
cfg: string
an ordered yaml string of the model discrete choice model configuration.
Used to read config from memory in lieu of loading cfgname from disk.
alternative_ratio : float, optional
Above the ratio of alternatives to choosers (default of 2.0),
the alternatives will be sampled to meet this ratio
(for performance reasons).
debug : boolean, optional (default False)
Whether to generate debug information on the model.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
lcm : MNLDiscreteChoiceModel which was used to predict
"""
logger.debug('start: predict from configuration {}'.format(cfgname))
if cfgname:
lcm = cls.from_yaml(str_or_buffer=cfgname)
elif cfg:
lcm = cls.from_yaml(yaml_str=cfg)
else:
msg = 'predict_from_cfg requires a configuration via the cfgname or cfg arguments'
logger.error(msg)
raise ValueError(msg)
if len(alternatives) > len(choosers) * alternative_ratio:
logger.info(
("Alternative ratio exceeded: %d alternatives "
"and only %d choosers") %
(len(alternatives), len(choosers)))
idxes = np.random.choice(
alternatives.index, size=int(len(choosers) *
alternative_ratio),
replace=False)
alternatives = alternatives.loc[idxes]
logger.info(
" after sampling %d alternatives are available\n" %
len(alternatives))
new_units = lcm.predict(choosers, alternatives, debug=debug)
print("Assigned %d choosers to new units" % len(new_units.dropna()))
logger.debug('finish: predict from configuration {}'.format(cfgname))
return new_units, lcm | python | def predict_from_cfg(cls, choosers, alternatives, cfgname=None, cfg=None,
alternative_ratio=2.0, debug=False):
"""
Simulate choices for the specified choosers
Parameters
----------
choosers : DataFrame
A dataframe of agents doing the choosing.
alternatives : DataFrame
A dataframe of locations which the choosers are locating in and
which have a supply.
cfgname : string
The name of the yaml config file from which to read the discrete
choice model.
cfg: string
an ordered yaml string of the model discrete choice model configuration.
Used to read config from memory in lieu of loading cfgname from disk.
alternative_ratio : float, optional
Above the ratio of alternatives to choosers (default of 2.0),
the alternatives will be sampled to meet this ratio
(for performance reasons).
debug : boolean, optional (default False)
Whether to generate debug information on the model.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
lcm : MNLDiscreteChoiceModel which was used to predict
"""
logger.debug('start: predict from configuration {}'.format(cfgname))
if cfgname:
lcm = cls.from_yaml(str_or_buffer=cfgname)
elif cfg:
lcm = cls.from_yaml(yaml_str=cfg)
else:
msg = 'predict_from_cfg requires a configuration via the cfgname or cfg arguments'
logger.error(msg)
raise ValueError(msg)
if len(alternatives) > len(choosers) * alternative_ratio:
logger.info(
("Alternative ratio exceeded: %d alternatives "
"and only %d choosers") %
(len(alternatives), len(choosers)))
idxes = np.random.choice(
alternatives.index, size=int(len(choosers) *
alternative_ratio),
replace=False)
alternatives = alternatives.loc[idxes]
logger.info(
" after sampling %d alternatives are available\n" %
len(alternatives))
new_units = lcm.predict(choosers, alternatives, debug=debug)
print("Assigned %d choosers to new units" % len(new_units.dropna()))
logger.debug('finish: predict from configuration {}'.format(cfgname))
return new_units, lcm | [
"def",
"predict_from_cfg",
"(",
"cls",
",",
"choosers",
",",
"alternatives",
",",
"cfgname",
"=",
"None",
",",
"cfg",
"=",
"None",
",",
"alternative_ratio",
"=",
"2.0",
",",
"debug",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"'start: predict from configuration {}'",
".",
"format",
"(",
"cfgname",
")",
")",
"if",
"cfgname",
":",
"lcm",
"=",
"cls",
".",
"from_yaml",
"(",
"str_or_buffer",
"=",
"cfgname",
")",
"elif",
"cfg",
":",
"lcm",
"=",
"cls",
".",
"from_yaml",
"(",
"yaml_str",
"=",
"cfg",
")",
"else",
":",
"msg",
"=",
"'predict_from_cfg requires a configuration via the cfgname or cfg arguments'",
"logger",
".",
"error",
"(",
"msg",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"if",
"len",
"(",
"alternatives",
")",
">",
"len",
"(",
"choosers",
")",
"*",
"alternative_ratio",
":",
"logger",
".",
"info",
"(",
"(",
"\"Alternative ratio exceeded: %d alternatives \"",
"\"and only %d choosers\"",
")",
"%",
"(",
"len",
"(",
"alternatives",
")",
",",
"len",
"(",
"choosers",
")",
")",
")",
"idxes",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"alternatives",
".",
"index",
",",
"size",
"=",
"int",
"(",
"len",
"(",
"choosers",
")",
"*",
"alternative_ratio",
")",
",",
"replace",
"=",
"False",
")",
"alternatives",
"=",
"alternatives",
".",
"loc",
"[",
"idxes",
"]",
"logger",
".",
"info",
"(",
"\" after sampling %d alternatives are available\\n\"",
"%",
"len",
"(",
"alternatives",
")",
")",
"new_units",
"=",
"lcm",
".",
"predict",
"(",
"choosers",
",",
"alternatives",
",",
"debug",
"=",
"debug",
")",
"print",
"(",
"\"Assigned %d choosers to new units\"",
"%",
"len",
"(",
"new_units",
".",
"dropna",
"(",
")",
")",
")",
"logger",
".",
"debug",
"(",
"'finish: predict from configuration {}'",
".",
"format",
"(",
"cfgname",
")",
")",
"return",
"new_units",
",",
"lcm"
] | Simulate choices for the specified choosers
Parameters
----------
choosers : DataFrame
A dataframe of agents doing the choosing.
alternatives : DataFrame
A dataframe of locations which the choosers are locating in and
which have a supply.
cfgname : string
The name of the yaml config file from which to read the discrete
choice model.
cfg: string
an ordered yaml string of the model discrete choice model configuration.
Used to read config from memory in lieu of loading cfgname from disk.
alternative_ratio : float, optional
Above the ratio of alternatives to choosers (default of 2.0),
the alternatives will be sampled to meet this ratio
(for performance reasons).
debug : boolean, optional (default False)
Whether to generate debug information on the model.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
lcm : MNLDiscreteChoiceModel which was used to predict | [
"Simulate",
"choices",
"for",
"the",
"specified",
"choosers"
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L787-L847 | train | 235,808 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModelGroup.add_model_from_params | def add_model_from_params(
self, name, model_expression, sample_size,
probability_mode='full_product', choice_mode='individual',
choosers_fit_filters=None, choosers_predict_filters=None,
alts_fit_filters=None, alts_predict_filters=None,
interaction_predict_filters=None, estimation_sample_size=None,
prediction_sample_size=None, choice_column=None):
"""
Add a model by passing parameters through to MNLDiscreteChoiceModel.
Parameters
----------
name
Must match a segment in the choosers table.
model_expression : str, iterable, or dict
A patsy model expression. Should contain only a right-hand side.
sample_size : int
Number of choices to sample for estimating the model.
probability_mode : str, optional
Specify the method to use for calculating probabilities
during prediction.
Available string options are 'single_chooser' and 'full_product'.
In "single chooser" mode one agent is chosen for calculating
probabilities across all alternatives. In "full product" mode
probabilities are calculated for every chooser across all
alternatives.
choice_mode : str or callable, optional
Specify the method to use for making choices among alternatives.
Available string options are 'individual' and 'aggregate'.
In "individual" mode choices will be made separately for each
chooser. In "aggregate" mode choices are made for all choosers at
once. Aggregate mode implies that an alternative chosen by one
agent is unavailable to other agents and that the same
probabilities can be used for all choosers.
choosers_fit_filters : list of str, optional
Filters applied to choosers table before fitting the model.
choosers_predict_filters : list of str, optional
Filters applied to the choosers table before calculating
new data points.
alts_fit_filters : list of str, optional
Filters applied to the alternatives table before fitting the model.
alts_predict_filters : list of str, optional
Filters applied to the alternatives table before calculating
new data points.
interaction_predict_filters : list of str, optional
Filters applied to the merged choosers/alternatives table
before predicting agent choices.
estimation_sample_size : int, optional
Whether to sample choosers during estimation
(needs to be applied after choosers_fit_filters)
prediction_sample_size : int, optional
Whether (and how much) to sample alternatives during prediction.
Note that this can lead to multiple choosers picking the same
alternative.
choice_column : optional
Name of the column in the `alternatives` table that choosers
should choose. e.g. the 'building_id' column. If not provided
the alternatives index is used.
"""
logger.debug('adding model {} to LCM group {}'.format(name, self.name))
self.models[name] = MNLDiscreteChoiceModel(
model_expression, sample_size,
probability_mode, choice_mode,
choosers_fit_filters, choosers_predict_filters,
alts_fit_filters, alts_predict_filters,
interaction_predict_filters, estimation_sample_size,
prediction_sample_size, choice_column, name) | python | def add_model_from_params(
self, name, model_expression, sample_size,
probability_mode='full_product', choice_mode='individual',
choosers_fit_filters=None, choosers_predict_filters=None,
alts_fit_filters=None, alts_predict_filters=None,
interaction_predict_filters=None, estimation_sample_size=None,
prediction_sample_size=None, choice_column=None):
"""
Add a model by passing parameters through to MNLDiscreteChoiceModel.
Parameters
----------
name
Must match a segment in the choosers table.
model_expression : str, iterable, or dict
A patsy model expression. Should contain only a right-hand side.
sample_size : int
Number of choices to sample for estimating the model.
probability_mode : str, optional
Specify the method to use for calculating probabilities
during prediction.
Available string options are 'single_chooser' and 'full_product'.
In "single chooser" mode one agent is chosen for calculating
probabilities across all alternatives. In "full product" mode
probabilities are calculated for every chooser across all
alternatives.
choice_mode : str or callable, optional
Specify the method to use for making choices among alternatives.
Available string options are 'individual' and 'aggregate'.
In "individual" mode choices will be made separately for each
chooser. In "aggregate" mode choices are made for all choosers at
once. Aggregate mode implies that an alternative chosen by one
agent is unavailable to other agents and that the same
probabilities can be used for all choosers.
choosers_fit_filters : list of str, optional
Filters applied to choosers table before fitting the model.
choosers_predict_filters : list of str, optional
Filters applied to the choosers table before calculating
new data points.
alts_fit_filters : list of str, optional
Filters applied to the alternatives table before fitting the model.
alts_predict_filters : list of str, optional
Filters applied to the alternatives table before calculating
new data points.
interaction_predict_filters : list of str, optional
Filters applied to the merged choosers/alternatives table
before predicting agent choices.
estimation_sample_size : int, optional
Whether to sample choosers during estimation
(needs to be applied after choosers_fit_filters)
prediction_sample_size : int, optional
Whether (and how much) to sample alternatives during prediction.
Note that this can lead to multiple choosers picking the same
alternative.
choice_column : optional
Name of the column in the `alternatives` table that choosers
should choose. e.g. the 'building_id' column. If not provided
the alternatives index is used.
"""
logger.debug('adding model {} to LCM group {}'.format(name, self.name))
self.models[name] = MNLDiscreteChoiceModel(
model_expression, sample_size,
probability_mode, choice_mode,
choosers_fit_filters, choosers_predict_filters,
alts_fit_filters, alts_predict_filters,
interaction_predict_filters, estimation_sample_size,
prediction_sample_size, choice_column, name) | [
"def",
"add_model_from_params",
"(",
"self",
",",
"name",
",",
"model_expression",
",",
"sample_size",
",",
"probability_mode",
"=",
"'full_product'",
",",
"choice_mode",
"=",
"'individual'",
",",
"choosers_fit_filters",
"=",
"None",
",",
"choosers_predict_filters",
"=",
"None",
",",
"alts_fit_filters",
"=",
"None",
",",
"alts_predict_filters",
"=",
"None",
",",
"interaction_predict_filters",
"=",
"None",
",",
"estimation_sample_size",
"=",
"None",
",",
"prediction_sample_size",
"=",
"None",
",",
"choice_column",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'adding model {} to LCM group {}'",
".",
"format",
"(",
"name",
",",
"self",
".",
"name",
")",
")",
"self",
".",
"models",
"[",
"name",
"]",
"=",
"MNLDiscreteChoiceModel",
"(",
"model_expression",
",",
"sample_size",
",",
"probability_mode",
",",
"choice_mode",
",",
"choosers_fit_filters",
",",
"choosers_predict_filters",
",",
"alts_fit_filters",
",",
"alts_predict_filters",
",",
"interaction_predict_filters",
",",
"estimation_sample_size",
",",
"prediction_sample_size",
",",
"choice_column",
",",
"name",
")"
] | Add a model by passing parameters through to MNLDiscreteChoiceModel.
Parameters
----------
name
Must match a segment in the choosers table.
model_expression : str, iterable, or dict
A patsy model expression. Should contain only a right-hand side.
sample_size : int
Number of choices to sample for estimating the model.
probability_mode : str, optional
Specify the method to use for calculating probabilities
during prediction.
Available string options are 'single_chooser' and 'full_product'.
In "single chooser" mode one agent is chosen for calculating
probabilities across all alternatives. In "full product" mode
probabilities are calculated for every chooser across all
alternatives.
choice_mode : str or callable, optional
Specify the method to use for making choices among alternatives.
Available string options are 'individual' and 'aggregate'.
In "individual" mode choices will be made separately for each
chooser. In "aggregate" mode choices are made for all choosers at
once. Aggregate mode implies that an alternative chosen by one
agent is unavailable to other agents and that the same
probabilities can be used for all choosers.
choosers_fit_filters : list of str, optional
Filters applied to choosers table before fitting the model.
choosers_predict_filters : list of str, optional
Filters applied to the choosers table before calculating
new data points.
alts_fit_filters : list of str, optional
Filters applied to the alternatives table before fitting the model.
alts_predict_filters : list of str, optional
Filters applied to the alternatives table before calculating
new data points.
interaction_predict_filters : list of str, optional
Filters applied to the merged choosers/alternatives table
before predicting agent choices.
estimation_sample_size : int, optional
Whether to sample choosers during estimation
(needs to be applied after choosers_fit_filters)
prediction_sample_size : int, optional
Whether (and how much) to sample alternatives during prediction.
Note that this can lead to multiple choosers picking the same
alternative.
choice_column : optional
Name of the column in the `alternatives` table that choosers
should choose. e.g. the 'building_id' column. If not provided
the alternatives index is used. | [
"Add",
"a",
"model",
"by",
"passing",
"parameters",
"through",
"to",
"MNLDiscreteChoiceModel",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L893-L960 | train | 235,809 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModelGroup.apply_fit_filters | def apply_fit_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for fitting.
This is done by filtering each submodel and concatenating
the results.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
ch = []
alts = []
for name, df in self._iter_groups(choosers):
filtered_choosers, filtered_alts = \
self.models[name].apply_fit_filters(df, alternatives)
ch.append(filtered_choosers)
alts.append(filtered_alts)
return pd.concat(ch), pd.concat(alts) | python | def apply_fit_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for fitting.
This is done by filtering each submodel and concatenating
the results.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
ch = []
alts = []
for name, df in self._iter_groups(choosers):
filtered_choosers, filtered_alts = \
self.models[name].apply_fit_filters(df, alternatives)
ch.append(filtered_choosers)
alts.append(filtered_alts)
return pd.concat(ch), pd.concat(alts) | [
"def",
"apply_fit_filters",
"(",
"self",
",",
"choosers",
",",
"alternatives",
")",
":",
"ch",
"=",
"[",
"]",
"alts",
"=",
"[",
"]",
"for",
"name",
",",
"df",
"in",
"self",
".",
"_iter_groups",
"(",
"choosers",
")",
":",
"filtered_choosers",
",",
"filtered_alts",
"=",
"self",
".",
"models",
"[",
"name",
"]",
".",
"apply_fit_filters",
"(",
"df",
",",
"alternatives",
")",
"ch",
".",
"append",
"(",
"filtered_choosers",
")",
"alts",
".",
"append",
"(",
"filtered_alts",
")",
"return",
"pd",
".",
"concat",
"(",
"ch",
")",
",",
"pd",
".",
"concat",
"(",
"alts",
")"
] | Filter `choosers` and `alternatives` for fitting.
This is done by filtering each submodel and concatenating
the results.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame | [
"Filter",
"choosers",
"and",
"alternatives",
"for",
"fitting",
".",
"This",
"is",
"done",
"by",
"filtering",
"each",
"submodel",
"and",
"concatenating",
"the",
"results",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L986-L1014 | train | 235,810 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModelGroup.fit | def fit(self, choosers, alternatives, current_choice):
"""
Fit and save models based on given data after segmenting
the `choosers` table.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column with the same name as the .segmentation_col
attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice
Name of column in `choosers` that indicates which alternative
they have currently chosen.
Returns
-------
log_likelihoods : dict of dict
Keys will be model names and values will be dictionaries of
log-liklihood values as returned by MNLDiscreteChoiceModel.fit.
"""
with log_start_finish(
'fit models in LCM group {}'.format(self.name), logger):
return {
name: self.models[name].fit(df, alternatives, current_choice)
for name, df in self._iter_groups(choosers)} | python | def fit(self, choosers, alternatives, current_choice):
"""
Fit and save models based on given data after segmenting
the `choosers` table.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column with the same name as the .segmentation_col
attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice
Name of column in `choosers` that indicates which alternative
they have currently chosen.
Returns
-------
log_likelihoods : dict of dict
Keys will be model names and values will be dictionaries of
log-liklihood values as returned by MNLDiscreteChoiceModel.fit.
"""
with log_start_finish(
'fit models in LCM group {}'.format(self.name), logger):
return {
name: self.models[name].fit(df, alternatives, current_choice)
for name, df in self._iter_groups(choosers)} | [
"def",
"fit",
"(",
"self",
",",
"choosers",
",",
"alternatives",
",",
"current_choice",
")",
":",
"with",
"log_start_finish",
"(",
"'fit models in LCM group {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
",",
"logger",
")",
":",
"return",
"{",
"name",
":",
"self",
".",
"models",
"[",
"name",
"]",
".",
"fit",
"(",
"df",
",",
"alternatives",
",",
"current_choice",
")",
"for",
"name",
",",
"df",
"in",
"self",
".",
"_iter_groups",
"(",
"choosers",
")",
"}"
] | Fit and save models based on given data after segmenting
the `choosers` table.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column with the same name as the .segmentation_col
attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice
Name of column in `choosers` that indicates which alternative
they have currently chosen.
Returns
-------
log_likelihoods : dict of dict
Keys will be model names and values will be dictionaries of
log-liklihood values as returned by MNLDiscreteChoiceModel.fit. | [
"Fit",
"and",
"save",
"models",
"based",
"on",
"given",
"data",
"after",
"segmenting",
"the",
"choosers",
"table",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1049-L1078 | train | 235,811 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModelGroup.fitted | def fitted(self):
"""
Whether all models in the group have been fitted.
"""
return (all(m.fitted for m in self.models.values())
if self.models else False) | python | def fitted(self):
"""
Whether all models in the group have been fitted.
"""
return (all(m.fitted for m in self.models.values())
if self.models else False) | [
"def",
"fitted",
"(",
"self",
")",
":",
"return",
"(",
"all",
"(",
"m",
".",
"fitted",
"for",
"m",
"in",
"self",
".",
"models",
".",
"values",
"(",
")",
")",
"if",
"self",
".",
"models",
"else",
"False",
")"
] | Whether all models in the group have been fitted. | [
"Whether",
"all",
"models",
"in",
"the",
"group",
"have",
"been",
"fitted",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1081-L1087 | train | 235,812 |
UDST/urbansim | urbansim/models/dcm.py | MNLDiscreteChoiceModelGroup.summed_probabilities | def summed_probabilities(self, choosers, alternatives):
"""
Returns the sum of probabilities for alternatives across all
chooser segments.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column matching the .segmentation_col attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Summed probabilities from each segment added together.
"""
if len(alternatives) == 0 or len(choosers) == 0:
return pd.Series()
logger.debug(
'start: calculate summed probabilities in LCM group {}'.format(
self.name))
probs = []
for name, df in self._iter_groups(choosers):
probs.append(
self.models[name].summed_probabilities(df, alternatives))
add = tz.curry(pd.Series.add, fill_value=0)
probs = tz.reduce(add, probs)
logger.debug(
'finish: calculate summed probabilities in LCM group {}'.format(
self.name))
return probs | python | def summed_probabilities(self, choosers, alternatives):
"""
Returns the sum of probabilities for alternatives across all
chooser segments.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column matching the .segmentation_col attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Summed probabilities from each segment added together.
"""
if len(alternatives) == 0 or len(choosers) == 0:
return pd.Series()
logger.debug(
'start: calculate summed probabilities in LCM group {}'.format(
self.name))
probs = []
for name, df in self._iter_groups(choosers):
probs.append(
self.models[name].summed_probabilities(df, alternatives))
add = tz.curry(pd.Series.add, fill_value=0)
probs = tz.reduce(add, probs)
logger.debug(
'finish: calculate summed probabilities in LCM group {}'.format(
self.name))
return probs | [
"def",
"summed_probabilities",
"(",
"self",
",",
"choosers",
",",
"alternatives",
")",
":",
"if",
"len",
"(",
"alternatives",
")",
"==",
"0",
"or",
"len",
"(",
"choosers",
")",
"==",
"0",
":",
"return",
"pd",
".",
"Series",
"(",
")",
"logger",
".",
"debug",
"(",
"'start: calculate summed probabilities in LCM group {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"probs",
"=",
"[",
"]",
"for",
"name",
",",
"df",
"in",
"self",
".",
"_iter_groups",
"(",
"choosers",
")",
":",
"probs",
".",
"append",
"(",
"self",
".",
"models",
"[",
"name",
"]",
".",
"summed_probabilities",
"(",
"df",
",",
"alternatives",
")",
")",
"add",
"=",
"tz",
".",
"curry",
"(",
"pd",
".",
"Series",
".",
"add",
",",
"fill_value",
"=",
"0",
")",
"probs",
"=",
"tz",
".",
"reduce",
"(",
"add",
",",
"probs",
")",
"logger",
".",
"debug",
"(",
"'finish: calculate summed probabilities in LCM group {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"return",
"probs"
] | Returns the sum of probabilities for alternatives across all
chooser segments.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column matching the .segmentation_col attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Summed probabilities from each segment added together. | [
"Returns",
"the",
"sum",
"of",
"probabilities",
"for",
"alternatives",
"across",
"all",
"chooser",
"segments",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1119-L1156 | train | 235,813 |
UDST/urbansim | urbansim/models/dcm.py | SegmentedMNLDiscreteChoiceModel.from_yaml | def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a SegmentedMNLDiscreteChoiceModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedMNLDiscreteChoiceModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
default_model_expr = cfg['default_config']['model_expression']
seg = cls(
cfg['segmentation_col'],
cfg['sample_size'],
cfg['probability_mode'],
cfg['choice_mode'],
cfg['choosers_fit_filters'],
cfg['choosers_predict_filters'],
cfg['alts_fit_filters'],
cfg['alts_predict_filters'],
cfg['interaction_predict_filters'],
cfg['estimation_sample_size'],
cfg['prediction_sample_size'],
cfg['choice_column'],
default_model_expr,
cfg['remove_alts'],
cfg['name'])
if "models" not in cfg:
cfg["models"] = {}
for name, m in cfg['models'].items():
m['model_expression'] = m.get(
'model_expression', default_model_expr)
m['sample_size'] = cfg['sample_size']
m['probability_mode'] = cfg['probability_mode']
m['choice_mode'] = cfg['choice_mode']
m['choosers_fit_filters'] = None
m['choosers_predict_filters'] = None
m['alts_fit_filters'] = None
m['alts_predict_filters'] = None
m['interaction_predict_filters'] = \
cfg['interaction_predict_filters']
m['estimation_sample_size'] = cfg['estimation_sample_size']
m['prediction_sample_size'] = cfg['prediction_sample_size']
m['choice_column'] = cfg['choice_column']
model = MNLDiscreteChoiceModel.from_yaml(
yamlio.convert_to_yaml(m, None))
seg._group.add_model(model)
logger.debug(
'loaded segmented LCM model {} from YAML'.format(seg.name))
return seg | python | def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a SegmentedMNLDiscreteChoiceModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedMNLDiscreteChoiceModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
default_model_expr = cfg['default_config']['model_expression']
seg = cls(
cfg['segmentation_col'],
cfg['sample_size'],
cfg['probability_mode'],
cfg['choice_mode'],
cfg['choosers_fit_filters'],
cfg['choosers_predict_filters'],
cfg['alts_fit_filters'],
cfg['alts_predict_filters'],
cfg['interaction_predict_filters'],
cfg['estimation_sample_size'],
cfg['prediction_sample_size'],
cfg['choice_column'],
default_model_expr,
cfg['remove_alts'],
cfg['name'])
if "models" not in cfg:
cfg["models"] = {}
for name, m in cfg['models'].items():
m['model_expression'] = m.get(
'model_expression', default_model_expr)
m['sample_size'] = cfg['sample_size']
m['probability_mode'] = cfg['probability_mode']
m['choice_mode'] = cfg['choice_mode']
m['choosers_fit_filters'] = None
m['choosers_predict_filters'] = None
m['alts_fit_filters'] = None
m['alts_predict_filters'] = None
m['interaction_predict_filters'] = \
cfg['interaction_predict_filters']
m['estimation_sample_size'] = cfg['estimation_sample_size']
m['prediction_sample_size'] = cfg['prediction_sample_size']
m['choice_column'] = cfg['choice_column']
model = MNLDiscreteChoiceModel.from_yaml(
yamlio.convert_to_yaml(m, None))
seg._group.add_model(model)
logger.debug(
'loaded segmented LCM model {} from YAML'.format(seg.name))
return seg | [
"def",
"from_yaml",
"(",
"cls",
",",
"yaml_str",
"=",
"None",
",",
"str_or_buffer",
"=",
"None",
")",
":",
"cfg",
"=",
"yamlio",
".",
"yaml_to_dict",
"(",
"yaml_str",
",",
"str_or_buffer",
")",
"default_model_expr",
"=",
"cfg",
"[",
"'default_config'",
"]",
"[",
"'model_expression'",
"]",
"seg",
"=",
"cls",
"(",
"cfg",
"[",
"'segmentation_col'",
"]",
",",
"cfg",
"[",
"'sample_size'",
"]",
",",
"cfg",
"[",
"'probability_mode'",
"]",
",",
"cfg",
"[",
"'choice_mode'",
"]",
",",
"cfg",
"[",
"'choosers_fit_filters'",
"]",
",",
"cfg",
"[",
"'choosers_predict_filters'",
"]",
",",
"cfg",
"[",
"'alts_fit_filters'",
"]",
",",
"cfg",
"[",
"'alts_predict_filters'",
"]",
",",
"cfg",
"[",
"'interaction_predict_filters'",
"]",
",",
"cfg",
"[",
"'estimation_sample_size'",
"]",
",",
"cfg",
"[",
"'prediction_sample_size'",
"]",
",",
"cfg",
"[",
"'choice_column'",
"]",
",",
"default_model_expr",
",",
"cfg",
"[",
"'remove_alts'",
"]",
",",
"cfg",
"[",
"'name'",
"]",
")",
"if",
"\"models\"",
"not",
"in",
"cfg",
":",
"cfg",
"[",
"\"models\"",
"]",
"=",
"{",
"}",
"for",
"name",
",",
"m",
"in",
"cfg",
"[",
"'models'",
"]",
".",
"items",
"(",
")",
":",
"m",
"[",
"'model_expression'",
"]",
"=",
"m",
".",
"get",
"(",
"'model_expression'",
",",
"default_model_expr",
")",
"m",
"[",
"'sample_size'",
"]",
"=",
"cfg",
"[",
"'sample_size'",
"]",
"m",
"[",
"'probability_mode'",
"]",
"=",
"cfg",
"[",
"'probability_mode'",
"]",
"m",
"[",
"'choice_mode'",
"]",
"=",
"cfg",
"[",
"'choice_mode'",
"]",
"m",
"[",
"'choosers_fit_filters'",
"]",
"=",
"None",
"m",
"[",
"'choosers_predict_filters'",
"]",
"=",
"None",
"m",
"[",
"'alts_fit_filters'",
"]",
"=",
"None",
"m",
"[",
"'alts_predict_filters'",
"]",
"=",
"None",
"m",
"[",
"'interaction_predict_filters'",
"]",
"=",
"cfg",
"[",
"'interaction_predict_filters'",
"]",
"m",
"[",
"'estimation_sample_size'",
"]",
"=",
"cfg",
"[",
"'estimation_sample_size'",
"]",
"m",
"[",
"'prediction_sample_size'",
"]",
"=",
"cfg",
"[",
"'prediction_sample_size'",
"]",
"m",
"[",
"'choice_column'",
"]",
"=",
"cfg",
"[",
"'choice_column'",
"]",
"model",
"=",
"MNLDiscreteChoiceModel",
".",
"from_yaml",
"(",
"yamlio",
".",
"convert_to_yaml",
"(",
"m",
",",
"None",
")",
")",
"seg",
".",
"_group",
".",
"add_model",
"(",
"model",
")",
"logger",
".",
"debug",
"(",
"'loaded segmented LCM model {} from YAML'",
".",
"format",
"(",
"seg",
".",
"name",
")",
")",
"return",
"seg"
] | Create a SegmentedMNLDiscreteChoiceModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedMNLDiscreteChoiceModel | [
"Create",
"a",
"SegmentedMNLDiscreteChoiceModel",
"instance",
"from",
"a",
"saved",
"YAML",
"configuration",
".",
"Arguments",
"are",
"mutally",
"exclusive",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1334-L1397 | train | 235,814 |
UDST/urbansim | urbansim/models/dcm.py | SegmentedMNLDiscreteChoiceModel.add_segment | def add_segment(self, name, model_expression=None):
"""
Add a new segment with its own model expression.
Parameters
----------
name
Segment name. Must match a segment in the groupby of the data.
model_expression : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
If not given the default model will be used, which must not be
None.
"""
logger.debug('adding LCM model {} to segmented model {}'.format(
name, self.name))
if not model_expression:
if not self.default_model_expr:
raise ValueError(
'No default model available, '
'you must supply a model expression.')
model_expression = self.default_model_expr
# we'll take care of some of the filtering this side before
# segmentation
self._group.add_model_from_params(
name=name,
model_expression=model_expression,
sample_size=self.sample_size,
probability_mode=self.probability_mode,
choice_mode=self.choice_mode,
choosers_fit_filters=None,
choosers_predict_filters=None,
alts_fit_filters=None,
alts_predict_filters=None,
interaction_predict_filters=self.interaction_predict_filters,
estimation_sample_size=self.estimation_sample_size,
choice_column=self.choice_column) | python | def add_segment(self, name, model_expression=None):
"""
Add a new segment with its own model expression.
Parameters
----------
name
Segment name. Must match a segment in the groupby of the data.
model_expression : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
If not given the default model will be used, which must not be
None.
"""
logger.debug('adding LCM model {} to segmented model {}'.format(
name, self.name))
if not model_expression:
if not self.default_model_expr:
raise ValueError(
'No default model available, '
'you must supply a model expression.')
model_expression = self.default_model_expr
# we'll take care of some of the filtering this side before
# segmentation
self._group.add_model_from_params(
name=name,
model_expression=model_expression,
sample_size=self.sample_size,
probability_mode=self.probability_mode,
choice_mode=self.choice_mode,
choosers_fit_filters=None,
choosers_predict_filters=None,
alts_fit_filters=None,
alts_predict_filters=None,
interaction_predict_filters=self.interaction_predict_filters,
estimation_sample_size=self.estimation_sample_size,
choice_column=self.choice_column) | [
"def",
"add_segment",
"(",
"self",
",",
"name",
",",
"model_expression",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'adding LCM model {} to segmented model {}'",
".",
"format",
"(",
"name",
",",
"self",
".",
"name",
")",
")",
"if",
"not",
"model_expression",
":",
"if",
"not",
"self",
".",
"default_model_expr",
":",
"raise",
"ValueError",
"(",
"'No default model available, '",
"'you must supply a model expression.'",
")",
"model_expression",
"=",
"self",
".",
"default_model_expr",
"# we'll take care of some of the filtering this side before",
"# segmentation",
"self",
".",
"_group",
".",
"add_model_from_params",
"(",
"name",
"=",
"name",
",",
"model_expression",
"=",
"model_expression",
",",
"sample_size",
"=",
"self",
".",
"sample_size",
",",
"probability_mode",
"=",
"self",
".",
"probability_mode",
",",
"choice_mode",
"=",
"self",
".",
"choice_mode",
",",
"choosers_fit_filters",
"=",
"None",
",",
"choosers_predict_filters",
"=",
"None",
",",
"alts_fit_filters",
"=",
"None",
",",
"alts_predict_filters",
"=",
"None",
",",
"interaction_predict_filters",
"=",
"self",
".",
"interaction_predict_filters",
",",
"estimation_sample_size",
"=",
"self",
".",
"estimation_sample_size",
",",
"choice_column",
"=",
"self",
".",
"choice_column",
")"
] | Add a new segment with its own model expression.
Parameters
----------
name
Segment name. Must match a segment in the groupby of the data.
model_expression : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
If not given the default model will be used, which must not be
None. | [
"Add",
"a",
"new",
"segment",
"with",
"its",
"own",
"model",
"expression",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1399-L1437 | train | 235,815 |
UDST/urbansim | urbansim/models/dcm.py | SegmentedMNLDiscreteChoiceModel.fit | def fit(self, choosers, alternatives, current_choice):
"""
Fit and save models based on given data after segmenting
the `choosers` table. Segments that have not already been explicitly
added will be automatically added with default model.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column with the same name as the .segmentation_col
attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice
Name of column in `choosers` that indicates which alternative
they have currently chosen.
Returns
-------
log_likelihoods : dict of dict
Keys will be model names and values will be dictionaries of
log-liklihood values as returned by MNLDiscreteChoiceModel.fit.
"""
logger.debug('start: fit models in segmented LCM {}'.format(self.name))
choosers, alternatives = self.apply_fit_filters(choosers, alternatives)
unique = choosers[self.segmentation_col].unique()
# Remove any existing segments that may no longer have counterparts
# in the data. This can happen when loading a saved model and then
# calling this method with data that no longer has segments that
# were there the last time this was called.
gone = set(self._group.models) - set(unique)
for g in gone:
del self._group.models[g]
for x in unique:
if x not in self._group.models:
self.add_segment(x)
results = self._group.fit(choosers, alternatives, current_choice)
logger.debug(
'finish: fit models in segmented LCM {}'.format(self.name))
return results | python | def fit(self, choosers, alternatives, current_choice):
"""
Fit and save models based on given data after segmenting
the `choosers` table. Segments that have not already been explicitly
added will be automatically added with default model.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column with the same name as the .segmentation_col
attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice
Name of column in `choosers` that indicates which alternative
they have currently chosen.
Returns
-------
log_likelihoods : dict of dict
Keys will be model names and values will be dictionaries of
log-liklihood values as returned by MNLDiscreteChoiceModel.fit.
"""
logger.debug('start: fit models in segmented LCM {}'.format(self.name))
choosers, alternatives = self.apply_fit_filters(choosers, alternatives)
unique = choosers[self.segmentation_col].unique()
# Remove any existing segments that may no longer have counterparts
# in the data. This can happen when loading a saved model and then
# calling this method with data that no longer has segments that
# were there the last time this was called.
gone = set(self._group.models) - set(unique)
for g in gone:
del self._group.models[g]
for x in unique:
if x not in self._group.models:
self.add_segment(x)
results = self._group.fit(choosers, alternatives, current_choice)
logger.debug(
'finish: fit models in segmented LCM {}'.format(self.name))
return results | [
"def",
"fit",
"(",
"self",
",",
"choosers",
",",
"alternatives",
",",
"current_choice",
")",
":",
"logger",
".",
"debug",
"(",
"'start: fit models in segmented LCM {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"choosers",
",",
"alternatives",
"=",
"self",
".",
"apply_fit_filters",
"(",
"choosers",
",",
"alternatives",
")",
"unique",
"=",
"choosers",
"[",
"self",
".",
"segmentation_col",
"]",
".",
"unique",
"(",
")",
"# Remove any existing segments that may no longer have counterparts",
"# in the data. This can happen when loading a saved model and then",
"# calling this method with data that no longer has segments that",
"# were there the last time this was called.",
"gone",
"=",
"set",
"(",
"self",
".",
"_group",
".",
"models",
")",
"-",
"set",
"(",
"unique",
")",
"for",
"g",
"in",
"gone",
":",
"del",
"self",
".",
"_group",
".",
"models",
"[",
"g",
"]",
"for",
"x",
"in",
"unique",
":",
"if",
"x",
"not",
"in",
"self",
".",
"_group",
".",
"models",
":",
"self",
".",
"add_segment",
"(",
"x",
")",
"results",
"=",
"self",
".",
"_group",
".",
"fit",
"(",
"choosers",
",",
"alternatives",
",",
"current_choice",
")",
"logger",
".",
"debug",
"(",
"'finish: fit models in segmented LCM {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"return",
"results"
] | Fit and save models based on given data after segmenting
the `choosers` table. Segments that have not already been explicitly
added will be automatically added with default model.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column with the same name as the .segmentation_col
attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice
Name of column in `choosers` that indicates which alternative
they have currently chosen.
Returns
-------
log_likelihoods : dict of dict
Keys will be model names and values will be dictionaries of
log-liklihood values as returned by MNLDiscreteChoiceModel.fit. | [
"Fit",
"and",
"save",
"models",
"based",
"on",
"given",
"data",
"after",
"segmenting",
"the",
"choosers",
"table",
".",
"Segments",
"that",
"have",
"not",
"already",
"been",
"explicitly",
"added",
"will",
"be",
"automatically",
"added",
"with",
"default",
"model",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1480-L1526 | train | 235,816 |
UDST/urbansim | urbansim/models/dcm.py | SegmentedMNLDiscreteChoiceModel._filter_choosers_alts | def _filter_choosers_alts(self, choosers, alternatives):
"""
Apply filters to the choosers and alts tables.
"""
return (
util.apply_filter_query(
choosers, self.choosers_predict_filters),
util.apply_filter_query(
alternatives, self.alts_predict_filters)) | python | def _filter_choosers_alts(self, choosers, alternatives):
"""
Apply filters to the choosers and alts tables.
"""
return (
util.apply_filter_query(
choosers, self.choosers_predict_filters),
util.apply_filter_query(
alternatives, self.alts_predict_filters)) | [
"def",
"_filter_choosers_alts",
"(",
"self",
",",
"choosers",
",",
"alternatives",
")",
":",
"return",
"(",
"util",
".",
"apply_filter_query",
"(",
"choosers",
",",
"self",
".",
"choosers_predict_filters",
")",
",",
"util",
".",
"apply_filter_query",
"(",
"alternatives",
",",
"self",
".",
"alts_predict_filters",
")",
")"
] | Apply filters to the choosers and alts tables. | [
"Apply",
"filters",
"to",
"the",
"choosers",
"and",
"alts",
"tables",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1536-L1545 | train | 235,817 |
UDST/urbansim | scripts/cache_to_hdf5.py | cache_to_df | def cache_to_df(dir_path):
"""
Convert a directory of binary array data files to a Pandas DataFrame.
Parameters
----------
dir_path : str
"""
table = {}
for attrib in glob.glob(os.path.join(dir_path, '*')):
attrib_name, attrib_ext = os.path.splitext(os.path.basename(attrib))
if attrib_ext == '.lf8':
attrib_data = np.fromfile(attrib, np.float64)
table[attrib_name] = attrib_data
elif attrib_ext == '.lf4':
attrib_data = np.fromfile(attrib, np.float32)
table[attrib_name] = attrib_data
elif attrib_ext == '.li2':
attrib_data = np.fromfile(attrib, np.int16)
table[attrib_name] = attrib_data
elif attrib_ext == '.li4':
attrib_data = np.fromfile(attrib, np.int32)
table[attrib_name] = attrib_data
elif attrib_ext == '.li8':
attrib_data = np.fromfile(attrib, np.int64)
table[attrib_name] = attrib_data
elif attrib_ext == '.ib1':
attrib_data = np.fromfile(attrib, np.bool_)
table[attrib_name] = attrib_data
elif attrib_ext.startswith('.iS'):
length_string = int(attrib_ext[3:])
attrib_data = np.fromfile(attrib, ('a' + str(length_string)))
table[attrib_name] = attrib_data
else:
print('Array {} is not a recognized data type'.format(attrib))
df = pd.DataFrame(table)
return df | python | def cache_to_df(dir_path):
"""
Convert a directory of binary array data files to a Pandas DataFrame.
Parameters
----------
dir_path : str
"""
table = {}
for attrib in glob.glob(os.path.join(dir_path, '*')):
attrib_name, attrib_ext = os.path.splitext(os.path.basename(attrib))
if attrib_ext == '.lf8':
attrib_data = np.fromfile(attrib, np.float64)
table[attrib_name] = attrib_data
elif attrib_ext == '.lf4':
attrib_data = np.fromfile(attrib, np.float32)
table[attrib_name] = attrib_data
elif attrib_ext == '.li2':
attrib_data = np.fromfile(attrib, np.int16)
table[attrib_name] = attrib_data
elif attrib_ext == '.li4':
attrib_data = np.fromfile(attrib, np.int32)
table[attrib_name] = attrib_data
elif attrib_ext == '.li8':
attrib_data = np.fromfile(attrib, np.int64)
table[attrib_name] = attrib_data
elif attrib_ext == '.ib1':
attrib_data = np.fromfile(attrib, np.bool_)
table[attrib_name] = attrib_data
elif attrib_ext.startswith('.iS'):
length_string = int(attrib_ext[3:])
attrib_data = np.fromfile(attrib, ('a' + str(length_string)))
table[attrib_name] = attrib_data
else:
print('Array {} is not a recognized data type'.format(attrib))
df = pd.DataFrame(table)
return df | [
"def",
"cache_to_df",
"(",
"dir_path",
")",
":",
"table",
"=",
"{",
"}",
"for",
"attrib",
"in",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"'*'",
")",
")",
":",
"attrib_name",
",",
"attrib_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"attrib",
")",
")",
"if",
"attrib_ext",
"==",
"'.lf8'",
":",
"attrib_data",
"=",
"np",
".",
"fromfile",
"(",
"attrib",
",",
"np",
".",
"float64",
")",
"table",
"[",
"attrib_name",
"]",
"=",
"attrib_data",
"elif",
"attrib_ext",
"==",
"'.lf4'",
":",
"attrib_data",
"=",
"np",
".",
"fromfile",
"(",
"attrib",
",",
"np",
".",
"float32",
")",
"table",
"[",
"attrib_name",
"]",
"=",
"attrib_data",
"elif",
"attrib_ext",
"==",
"'.li2'",
":",
"attrib_data",
"=",
"np",
".",
"fromfile",
"(",
"attrib",
",",
"np",
".",
"int16",
")",
"table",
"[",
"attrib_name",
"]",
"=",
"attrib_data",
"elif",
"attrib_ext",
"==",
"'.li4'",
":",
"attrib_data",
"=",
"np",
".",
"fromfile",
"(",
"attrib",
",",
"np",
".",
"int32",
")",
"table",
"[",
"attrib_name",
"]",
"=",
"attrib_data",
"elif",
"attrib_ext",
"==",
"'.li8'",
":",
"attrib_data",
"=",
"np",
".",
"fromfile",
"(",
"attrib",
",",
"np",
".",
"int64",
")",
"table",
"[",
"attrib_name",
"]",
"=",
"attrib_data",
"elif",
"attrib_ext",
"==",
"'.ib1'",
":",
"attrib_data",
"=",
"np",
".",
"fromfile",
"(",
"attrib",
",",
"np",
".",
"bool_",
")",
"table",
"[",
"attrib_name",
"]",
"=",
"attrib_data",
"elif",
"attrib_ext",
".",
"startswith",
"(",
"'.iS'",
")",
":",
"length_string",
"=",
"int",
"(",
"attrib_ext",
"[",
"3",
":",
"]",
")",
"attrib_data",
"=",
"np",
".",
"fromfile",
"(",
"attrib",
",",
"(",
"'a'",
"+",
"str",
"(",
"length_string",
")",
")",
")",
"table",
"[",
"attrib_name",
"]",
"=",
"attrib_data",
"else",
":",
"print",
"(",
"'Array {} is not a recognized data type'",
".",
"format",
"(",
"attrib",
")",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"table",
")",
"return",
"df"
] | Convert a directory of binary array data files to a Pandas DataFrame.
Parameters
----------
dir_path : str | [
"Convert",
"a",
"directory",
"of",
"binary",
"array",
"data",
"files",
"to",
"a",
"Pandas",
"DataFrame",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/scripts/cache_to_hdf5.py#L14-L60 | train | 235,818 |
UDST/urbansim | scripts/cache_to_hdf5.py | convert_dirs | def convert_dirs(base_dir, hdf_name, complib=None, complevel=0):
"""
Convert nested set of directories to
"""
print('Converting directories in {}'.format(base_dir))
dirs = glob.glob(os.path.join(base_dir, '*'))
dirs = {d for d in dirs if os.path.basename(d) in DIRECTORIES}
if not dirs:
raise RuntimeError('No direcotries found matching known data.')
store = pd.HDFStore(
hdf_name, mode='w', complevel=complevel, complib=complib)
for dirpath in dirs:
dirname = os.path.basename(dirpath)
print(dirname)
df = cache_to_df(dirpath)
if dirname == 'travel_data':
keys = ['from_zone_id', 'to_zone_id']
elif dirname == 'annual_employment_control_totals':
keys = ['sector_id', 'year', 'home_based_status']
elif dirname == 'annual_job_relocation_rates':
keys = ['sector_id']
elif dirname == 'annual_household_control_totals':
keys = ['year']
elif dirname == 'annual_household_relocation_rates':
keys = ['age_of_head_max', 'age_of_head_min',
'income_min', 'income_max']
elif dirname == 'building_sqft_per_job':
keys = ['zone_id', 'building_type_id']
elif dirname == 'counties':
keys = ['county_id']
elif dirname == 'development_event_history':
keys = ['building_id']
elif dirname == 'target_vacancies':
keys = ['building_type_id', 'year']
else:
keys = [dirname[:-1] + '_id']
if dirname != 'annual_household_relocation_rates':
df = df.set_index(keys)
for colname in df.columns:
if df[colname].dtype == np.float64:
df[colname] = df[colname].astype(np.float32)
elif df[colname].dtype == np.int64:
df[colname] = df[colname].astype(np.int32)
else:
df[colname] = df[colname]
df.info()
print(os.linesep)
store.put(dirname, df)
store.close() | python | def convert_dirs(base_dir, hdf_name, complib=None, complevel=0):
"""
Convert nested set of directories to
"""
print('Converting directories in {}'.format(base_dir))
dirs = glob.glob(os.path.join(base_dir, '*'))
dirs = {d for d in dirs if os.path.basename(d) in DIRECTORIES}
if not dirs:
raise RuntimeError('No direcotries found matching known data.')
store = pd.HDFStore(
hdf_name, mode='w', complevel=complevel, complib=complib)
for dirpath in dirs:
dirname = os.path.basename(dirpath)
print(dirname)
df = cache_to_df(dirpath)
if dirname == 'travel_data':
keys = ['from_zone_id', 'to_zone_id']
elif dirname == 'annual_employment_control_totals':
keys = ['sector_id', 'year', 'home_based_status']
elif dirname == 'annual_job_relocation_rates':
keys = ['sector_id']
elif dirname == 'annual_household_control_totals':
keys = ['year']
elif dirname == 'annual_household_relocation_rates':
keys = ['age_of_head_max', 'age_of_head_min',
'income_min', 'income_max']
elif dirname == 'building_sqft_per_job':
keys = ['zone_id', 'building_type_id']
elif dirname == 'counties':
keys = ['county_id']
elif dirname == 'development_event_history':
keys = ['building_id']
elif dirname == 'target_vacancies':
keys = ['building_type_id', 'year']
else:
keys = [dirname[:-1] + '_id']
if dirname != 'annual_household_relocation_rates':
df = df.set_index(keys)
for colname in df.columns:
if df[colname].dtype == np.float64:
df[colname] = df[colname].astype(np.float32)
elif df[colname].dtype == np.int64:
df[colname] = df[colname].astype(np.int32)
else:
df[colname] = df[colname]
df.info()
print(os.linesep)
store.put(dirname, df)
store.close() | [
"def",
"convert_dirs",
"(",
"base_dir",
",",
"hdf_name",
",",
"complib",
"=",
"None",
",",
"complevel",
"=",
"0",
")",
":",
"print",
"(",
"'Converting directories in {}'",
".",
"format",
"(",
"base_dir",
")",
")",
"dirs",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"'*'",
")",
")",
"dirs",
"=",
"{",
"d",
"for",
"d",
"in",
"dirs",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"d",
")",
"in",
"DIRECTORIES",
"}",
"if",
"not",
"dirs",
":",
"raise",
"RuntimeError",
"(",
"'No direcotries found matching known data.'",
")",
"store",
"=",
"pd",
".",
"HDFStore",
"(",
"hdf_name",
",",
"mode",
"=",
"'w'",
",",
"complevel",
"=",
"complevel",
",",
"complib",
"=",
"complib",
")",
"for",
"dirpath",
"in",
"dirs",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"dirpath",
")",
"print",
"(",
"dirname",
")",
"df",
"=",
"cache_to_df",
"(",
"dirpath",
")",
"if",
"dirname",
"==",
"'travel_data'",
":",
"keys",
"=",
"[",
"'from_zone_id'",
",",
"'to_zone_id'",
"]",
"elif",
"dirname",
"==",
"'annual_employment_control_totals'",
":",
"keys",
"=",
"[",
"'sector_id'",
",",
"'year'",
",",
"'home_based_status'",
"]",
"elif",
"dirname",
"==",
"'annual_job_relocation_rates'",
":",
"keys",
"=",
"[",
"'sector_id'",
"]",
"elif",
"dirname",
"==",
"'annual_household_control_totals'",
":",
"keys",
"=",
"[",
"'year'",
"]",
"elif",
"dirname",
"==",
"'annual_household_relocation_rates'",
":",
"keys",
"=",
"[",
"'age_of_head_max'",
",",
"'age_of_head_min'",
",",
"'income_min'",
",",
"'income_max'",
"]",
"elif",
"dirname",
"==",
"'building_sqft_per_job'",
":",
"keys",
"=",
"[",
"'zone_id'",
",",
"'building_type_id'",
"]",
"elif",
"dirname",
"==",
"'counties'",
":",
"keys",
"=",
"[",
"'county_id'",
"]",
"elif",
"dirname",
"==",
"'development_event_history'",
":",
"keys",
"=",
"[",
"'building_id'",
"]",
"elif",
"dirname",
"==",
"'target_vacancies'",
":",
"keys",
"=",
"[",
"'building_type_id'",
",",
"'year'",
"]",
"else",
":",
"keys",
"=",
"[",
"dirname",
"[",
":",
"-",
"1",
"]",
"+",
"'_id'",
"]",
"if",
"dirname",
"!=",
"'annual_household_relocation_rates'",
":",
"df",
"=",
"df",
".",
"set_index",
"(",
"keys",
")",
"for",
"colname",
"in",
"df",
".",
"columns",
":",
"if",
"df",
"[",
"colname",
"]",
".",
"dtype",
"==",
"np",
".",
"float64",
":",
"df",
"[",
"colname",
"]",
"=",
"df",
"[",
"colname",
"]",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"elif",
"df",
"[",
"colname",
"]",
".",
"dtype",
"==",
"np",
".",
"int64",
":",
"df",
"[",
"colname",
"]",
"=",
"df",
"[",
"colname",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"else",
":",
"df",
"[",
"colname",
"]",
"=",
"df",
"[",
"colname",
"]",
"df",
".",
"info",
"(",
")",
"print",
"(",
"os",
".",
"linesep",
")",
"store",
".",
"put",
"(",
"dirname",
",",
"df",
")",
"store",
".",
"close",
"(",
")"
] | Convert nested set of directories to | [
"Convert",
"nested",
"set",
"of",
"directories",
"to"
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/scripts/cache_to_hdf5.py#L72-L130 | train | 235,819 |
UDST/urbansim | urbansim/utils/misc.py | get_run_number | def get_run_number():
"""
Get a run number for this execution of the model system, for
identifying the output hdf5 files).
Returns
-------
The integer number for this run of the model system.
"""
try:
f = open(os.path.join(os.getenv('DATA_HOME', "."), 'RUNNUM'), 'r')
num = int(f.read())
f.close()
except Exception:
num = 1
f = open(os.path.join(os.getenv('DATA_HOME', "."), 'RUNNUM'), 'w')
f.write(str(num + 1))
f.close()
return num | python | def get_run_number():
"""
Get a run number for this execution of the model system, for
identifying the output hdf5 files).
Returns
-------
The integer number for this run of the model system.
"""
try:
f = open(os.path.join(os.getenv('DATA_HOME', "."), 'RUNNUM'), 'r')
num = int(f.read())
f.close()
except Exception:
num = 1
f = open(os.path.join(os.getenv('DATA_HOME', "."), 'RUNNUM'), 'w')
f.write(str(num + 1))
f.close()
return num | [
"def",
"get_run_number",
"(",
")",
":",
"try",
":",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getenv",
"(",
"'DATA_HOME'",
",",
"\".\"",
")",
",",
"'RUNNUM'",
")",
",",
"'r'",
")",
"num",
"=",
"int",
"(",
"f",
".",
"read",
"(",
")",
")",
"f",
".",
"close",
"(",
")",
"except",
"Exception",
":",
"num",
"=",
"1",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getenv",
"(",
"'DATA_HOME'",
",",
"\".\"",
")",
",",
"'RUNNUM'",
")",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"str",
"(",
"num",
"+",
"1",
")",
")",
"f",
".",
"close",
"(",
")",
"return",
"num"
] | Get a run number for this execution of the model system, for
identifying the output hdf5 files).
Returns
-------
The integer number for this run of the model system. | [
"Get",
"a",
"run",
"number",
"for",
"this",
"execution",
"of",
"the",
"model",
"system",
"for",
"identifying",
"the",
"output",
"hdf5",
"files",
")",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L97-L115 | train | 235,820 |
UDST/urbansim | urbansim/utils/misc.py | compute_range | def compute_range(travel_data, attr, travel_time_attr, dist, agg=np.sum):
"""
Compute a zone-based accessibility query using the urbansim format
travel data dataframe.
Parameters
----------
travel_data : dataframe
The dataframe of urbansim format travel data. Has from_zone_id as
first index, to_zone_id as second index, and different impedances
between zones as columns.
attr : series
The attr to aggregate. Should be indexed by zone_id and the values
will be aggregated.
travel_time_attr : string
The column name in travel_data to use as the impedance.
dist : float
The max distance to aggregate up to
agg : function, optional, np.sum by default
The numpy function to use for aggregation
"""
travel_data = travel_data.reset_index(level=1)
travel_data = travel_data[travel_data[travel_time_attr] < dist]
travel_data["attr"] = attr[travel_data.to_zone_id].values
return travel_data.groupby(level=0).attr.apply(agg) | python | def compute_range(travel_data, attr, travel_time_attr, dist, agg=np.sum):
"""
Compute a zone-based accessibility query using the urbansim format
travel data dataframe.
Parameters
----------
travel_data : dataframe
The dataframe of urbansim format travel data. Has from_zone_id as
first index, to_zone_id as second index, and different impedances
between zones as columns.
attr : series
The attr to aggregate. Should be indexed by zone_id and the values
will be aggregated.
travel_time_attr : string
The column name in travel_data to use as the impedance.
dist : float
The max distance to aggregate up to
agg : function, optional, np.sum by default
The numpy function to use for aggregation
"""
travel_data = travel_data.reset_index(level=1)
travel_data = travel_data[travel_data[travel_time_attr] < dist]
travel_data["attr"] = attr[travel_data.to_zone_id].values
return travel_data.groupby(level=0).attr.apply(agg) | [
"def",
"compute_range",
"(",
"travel_data",
",",
"attr",
",",
"travel_time_attr",
",",
"dist",
",",
"agg",
"=",
"np",
".",
"sum",
")",
":",
"travel_data",
"=",
"travel_data",
".",
"reset_index",
"(",
"level",
"=",
"1",
")",
"travel_data",
"=",
"travel_data",
"[",
"travel_data",
"[",
"travel_time_attr",
"]",
"<",
"dist",
"]",
"travel_data",
"[",
"\"attr\"",
"]",
"=",
"attr",
"[",
"travel_data",
".",
"to_zone_id",
"]",
".",
"values",
"return",
"travel_data",
".",
"groupby",
"(",
"level",
"=",
"0",
")",
".",
"attr",
".",
"apply",
"(",
"agg",
")"
] | Compute a zone-based accessibility query using the urbansim format
travel data dataframe.
Parameters
----------
travel_data : dataframe
The dataframe of urbansim format travel data. Has from_zone_id as
first index, to_zone_id as second index, and different impedances
between zones as columns.
attr : series
The attr to aggregate. Should be indexed by zone_id and the values
will be aggregated.
travel_time_attr : string
The column name in travel_data to use as the impedance.
dist : float
The max distance to aggregate up to
agg : function, optional, np.sum by default
The numpy function to use for aggregation | [
"Compute",
"a",
"zone",
"-",
"based",
"accessibility",
"query",
"using",
"the",
"urbansim",
"format",
"travel",
"data",
"dataframe",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L118-L142 | train | 235,821 |
UDST/urbansim | urbansim/utils/misc.py | reindex | def reindex(series1, series2):
"""
This reindexes the first series by the second series. This is an extremely
common operation that does not appear to be in Pandas at this time.
If anyone knows of an easier way to do this in Pandas, please inform the
UrbanSim developers.
The canonical example would be a parcel series which has an index which is
parcel_ids and a value which you want to fetch, let's say it's land_area.
Another dataset, let's say of buildings has a series which indicate the
parcel_ids that the buildings are located on, but which does not have
land_area. If you pass parcels.land_area as the first series and
buildings.parcel_id as the second series, this function returns a series
which is indexed by buildings and has land_area as values and can be
added to the buildings dataset.
In short, this is a join on to a different table using a foreign key
stored in the current table, but with only one attribute rather than
for a full dataset.
This is very similar to the pandas "loc" function or "reindex" function,
but neither of those functions return the series indexed on the current
table. In both of those cases, the series would be indexed on the foreign
table and would require a second step to change the index.
"""
# turns out the merge is much faster than the .loc below
df = pd.merge(pd.DataFrame({"left": series2}),
pd.DataFrame({"right": series1}),
left_on="left",
right_index=True,
how="left")
return df.right | python | def reindex(series1, series2):
"""
This reindexes the first series by the second series. This is an extremely
common operation that does not appear to be in Pandas at this time.
If anyone knows of an easier way to do this in Pandas, please inform the
UrbanSim developers.
The canonical example would be a parcel series which has an index which is
parcel_ids and a value which you want to fetch, let's say it's land_area.
Another dataset, let's say of buildings has a series which indicate the
parcel_ids that the buildings are located on, but which does not have
land_area. If you pass parcels.land_area as the first series and
buildings.parcel_id as the second series, this function returns a series
which is indexed by buildings and has land_area as values and can be
added to the buildings dataset.
In short, this is a join on to a different table using a foreign key
stored in the current table, but with only one attribute rather than
for a full dataset.
This is very similar to the pandas "loc" function or "reindex" function,
but neither of those functions return the series indexed on the current
table. In both of those cases, the series would be indexed on the foreign
table and would require a second step to change the index.
"""
# turns out the merge is much faster than the .loc below
df = pd.merge(pd.DataFrame({"left": series2}),
pd.DataFrame({"right": series1}),
left_on="left",
right_index=True,
how="left")
return df.right | [
"def",
"reindex",
"(",
"series1",
",",
"series2",
")",
":",
"# turns out the merge is much faster than the .loc below",
"df",
"=",
"pd",
".",
"merge",
"(",
"pd",
".",
"DataFrame",
"(",
"{",
"\"left\"",
":",
"series2",
"}",
")",
",",
"pd",
".",
"DataFrame",
"(",
"{",
"\"right\"",
":",
"series1",
"}",
")",
",",
"left_on",
"=",
"\"left\"",
",",
"right_index",
"=",
"True",
",",
"how",
"=",
"\"left\"",
")",
"return",
"df",
".",
"right"
] | This reindexes the first series by the second series. This is an extremely
common operation that does not appear to be in Pandas at this time.
If anyone knows of an easier way to do this in Pandas, please inform the
UrbanSim developers.
The canonical example would be a parcel series which has an index which is
parcel_ids and a value which you want to fetch, let's say it's land_area.
Another dataset, let's say of buildings has a series which indicate the
parcel_ids that the buildings are located on, but which does not have
land_area. If you pass parcels.land_area as the first series and
buildings.parcel_id as the second series, this function returns a series
which is indexed by buildings and has land_area as values and can be
added to the buildings dataset.
In short, this is a join on to a different table using a foreign key
stored in the current table, but with only one attribute rather than
for a full dataset.
This is very similar to the pandas "loc" function or "reindex" function,
but neither of those functions return the series indexed on the current
table. In both of those cases, the series would be indexed on the foreign
table and would require a second step to change the index. | [
"This",
"reindexes",
"the",
"first",
"series",
"by",
"the",
"second",
"series",
".",
"This",
"is",
"an",
"extremely",
"common",
"operation",
"that",
"does",
"not",
"appear",
"to",
"be",
"in",
"Pandas",
"at",
"this",
"time",
".",
"If",
"anyone",
"knows",
"of",
"an",
"easier",
"way",
"to",
"do",
"this",
"in",
"Pandas",
"please",
"inform",
"the",
"UrbanSim",
"developers",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L145-L177 | train | 235,822 |
UDST/urbansim | urbansim/utils/misc.py | df64bitto32bit | def df64bitto32bit(tbl):
"""
Convert a Pandas dataframe from 64 bit types to 32 bit types to save
memory or disk space.
Parameters
----------
tbl : The dataframe to convert
Returns
-------
The converted dataframe
"""
newtbl = pd.DataFrame(index=tbl.index)
for colname in tbl.columns:
newtbl[colname] = series64bitto32bit(tbl[colname])
return newtbl | python | def df64bitto32bit(tbl):
"""
Convert a Pandas dataframe from 64 bit types to 32 bit types to save
memory or disk space.
Parameters
----------
tbl : The dataframe to convert
Returns
-------
The converted dataframe
"""
newtbl = pd.DataFrame(index=tbl.index)
for colname in tbl.columns:
newtbl[colname] = series64bitto32bit(tbl[colname])
return newtbl | [
"def",
"df64bitto32bit",
"(",
"tbl",
")",
":",
"newtbl",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"tbl",
".",
"index",
")",
"for",
"colname",
"in",
"tbl",
".",
"columns",
":",
"newtbl",
"[",
"colname",
"]",
"=",
"series64bitto32bit",
"(",
"tbl",
"[",
"colname",
"]",
")",
"return",
"newtbl"
] | Convert a Pandas dataframe from 64 bit types to 32 bit types to save
memory or disk space.
Parameters
----------
tbl : The dataframe to convert
Returns
-------
The converted dataframe | [
"Convert",
"a",
"Pandas",
"dataframe",
"from",
"64",
"bit",
"types",
"to",
"32",
"bit",
"types",
"to",
"save",
"memory",
"or",
"disk",
"space",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L320-L336 | train | 235,823 |
UDST/urbansim | urbansim/utils/misc.py | series64bitto32bit | def series64bitto32bit(s):
"""
Convert a Pandas series from 64 bit types to 32 bit types to save
memory or disk space.
Parameters
----------
s : The series to convert
Returns
-------
The converted series
"""
if s.dtype == np.float64:
return s.astype('float32')
elif s.dtype == np.int64:
return s.astype('int32')
return s | python | def series64bitto32bit(s):
"""
Convert a Pandas series from 64 bit types to 32 bit types to save
memory or disk space.
Parameters
----------
s : The series to convert
Returns
-------
The converted series
"""
if s.dtype == np.float64:
return s.astype('float32')
elif s.dtype == np.int64:
return s.astype('int32')
return s | [
"def",
"series64bitto32bit",
"(",
"s",
")",
":",
"if",
"s",
".",
"dtype",
"==",
"np",
".",
"float64",
":",
"return",
"s",
".",
"astype",
"(",
"'float32'",
")",
"elif",
"s",
".",
"dtype",
"==",
"np",
".",
"int64",
":",
"return",
"s",
".",
"astype",
"(",
"'int32'",
")",
"return",
"s"
] | Convert a Pandas series from 64 bit types to 32 bit types to save
memory or disk space.
Parameters
----------
s : The series to convert
Returns
-------
The converted series | [
"Convert",
"a",
"Pandas",
"series",
"from",
"64",
"bit",
"types",
"to",
"32",
"bit",
"types",
"to",
"save",
"memory",
"or",
"disk",
"space",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L339-L356 | train | 235,824 |
UDST/urbansim | urbansim/utils/misc.py | pandasdfsummarytojson | def pandasdfsummarytojson(df, ndigits=3):
"""
Convert the result of a
Parameters
----------
df : The result of a Pandas describe operation.
ndigits : int, optional - The number of significant digits to round to.
Returns
-------
A json object which captures the describe. Keys are field names and
values are dictionaries with all of the indexes returned by the Pandas
describe.
"""
df = df.transpose()
return {k: _pandassummarytojson(v, ndigits) for k, v in df.iterrows()} | python | def pandasdfsummarytojson(df, ndigits=3):
"""
Convert the result of a
Parameters
----------
df : The result of a Pandas describe operation.
ndigits : int, optional - The number of significant digits to round to.
Returns
-------
A json object which captures the describe. Keys are field names and
values are dictionaries with all of the indexes returned by the Pandas
describe.
"""
df = df.transpose()
return {k: _pandassummarytojson(v, ndigits) for k, v in df.iterrows()} | [
"def",
"pandasdfsummarytojson",
"(",
"df",
",",
"ndigits",
"=",
"3",
")",
":",
"df",
"=",
"df",
".",
"transpose",
"(",
")",
"return",
"{",
"k",
":",
"_pandassummarytojson",
"(",
"v",
",",
"ndigits",
")",
"for",
"k",
",",
"v",
"in",
"df",
".",
"iterrows",
"(",
")",
"}"
] | Convert the result of a
Parameters
----------
df : The result of a Pandas describe operation.
ndigits : int, optional - The number of significant digits to round to.
Returns
-------
A json object which captures the describe. Keys are field names and
values are dictionaries with all of the indexes returned by the Pandas
describe. | [
"Convert",
"the",
"result",
"of",
"a"
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L363-L379 | train | 235,825 |
UDST/urbansim | urbansim/utils/misc.py | column_map | def column_map(tables, columns):
"""
Take a list of tables and a list of column names and resolve which
columns come from which table.
Parameters
----------
tables : sequence of _DataFrameWrapper or _TableFuncWrapper
Could also be sequence of modified pandas.DataFrames, the important
thing is that they have ``.name`` and ``.columns`` attributes.
columns : sequence of str
The column names of interest.
Returns
-------
col_map : dict
Maps table names to lists of column names.
"""
if not columns:
return {t.name: None for t in tables}
columns = set(columns)
colmap = {t.name: list(set(t.columns).intersection(columns)) for t in tables}
foundcols = tz.reduce(lambda x, y: x.union(y), (set(v) for v in colmap.values()))
if foundcols != columns:
raise RuntimeError('Not all required columns were found. '
'Missing: {}'.format(list(columns - foundcols)))
return colmap | python | def column_map(tables, columns):
"""
Take a list of tables and a list of column names and resolve which
columns come from which table.
Parameters
----------
tables : sequence of _DataFrameWrapper or _TableFuncWrapper
Could also be sequence of modified pandas.DataFrames, the important
thing is that they have ``.name`` and ``.columns`` attributes.
columns : sequence of str
The column names of interest.
Returns
-------
col_map : dict
Maps table names to lists of column names.
"""
if not columns:
return {t.name: None for t in tables}
columns = set(columns)
colmap = {t.name: list(set(t.columns).intersection(columns)) for t in tables}
foundcols = tz.reduce(lambda x, y: x.union(y), (set(v) for v in colmap.values()))
if foundcols != columns:
raise RuntimeError('Not all required columns were found. '
'Missing: {}'.format(list(columns - foundcols)))
return colmap | [
"def",
"column_map",
"(",
"tables",
",",
"columns",
")",
":",
"if",
"not",
"columns",
":",
"return",
"{",
"t",
".",
"name",
":",
"None",
"for",
"t",
"in",
"tables",
"}",
"columns",
"=",
"set",
"(",
"columns",
")",
"colmap",
"=",
"{",
"t",
".",
"name",
":",
"list",
"(",
"set",
"(",
"t",
".",
"columns",
")",
".",
"intersection",
"(",
"columns",
")",
")",
"for",
"t",
"in",
"tables",
"}",
"foundcols",
"=",
"tz",
".",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
".",
"union",
"(",
"y",
")",
",",
"(",
"set",
"(",
"v",
")",
"for",
"v",
"in",
"colmap",
".",
"values",
"(",
")",
")",
")",
"if",
"foundcols",
"!=",
"columns",
":",
"raise",
"RuntimeError",
"(",
"'Not all required columns were found. '",
"'Missing: {}'",
".",
"format",
"(",
"list",
"(",
"columns",
"-",
"foundcols",
")",
")",
")",
"return",
"colmap"
] | Take a list of tables and a list of column names and resolve which
columns come from which table.
Parameters
----------
tables : sequence of _DataFrameWrapper or _TableFuncWrapper
Could also be sequence of modified pandas.DataFrames, the important
thing is that they have ``.name`` and ``.columns`` attributes.
columns : sequence of str
The column names of interest.
Returns
-------
col_map : dict
Maps table names to lists of column names. | [
"Take",
"a",
"list",
"of",
"tables",
"and",
"a",
"list",
"of",
"column",
"names",
"and",
"resolve",
"which",
"columns",
"come",
"from",
"which",
"table",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L382-L410 | train | 235,826 |
UDST/urbansim | urbansim/utils/misc.py | column_list | def column_list(tables, columns):
"""
Take a list of tables and a list of column names and return the columns
that are present in the tables.
Parameters
----------
tables : sequence of _DataFrameWrapper or _TableFuncWrapper
Could also be sequence of modified pandas.DataFrames, the important
thing is that they have ``.name`` and ``.columns`` attributes.
columns : sequence of str
The column names of interest.
Returns
-------
cols : list
Lists of column names available in the tables.
"""
columns = set(columns)
foundcols = tz.reduce(lambda x, y: x.union(y), (set(t.columns) for t in tables))
return list(columns.intersection(foundcols)) | python | def column_list(tables, columns):
"""
Take a list of tables and a list of column names and return the columns
that are present in the tables.
Parameters
----------
tables : sequence of _DataFrameWrapper or _TableFuncWrapper
Could also be sequence of modified pandas.DataFrames, the important
thing is that they have ``.name`` and ``.columns`` attributes.
columns : sequence of str
The column names of interest.
Returns
-------
cols : list
Lists of column names available in the tables.
"""
columns = set(columns)
foundcols = tz.reduce(lambda x, y: x.union(y), (set(t.columns) for t in tables))
return list(columns.intersection(foundcols)) | [
"def",
"column_list",
"(",
"tables",
",",
"columns",
")",
":",
"columns",
"=",
"set",
"(",
"columns",
")",
"foundcols",
"=",
"tz",
".",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
".",
"union",
"(",
"y",
")",
",",
"(",
"set",
"(",
"t",
".",
"columns",
")",
"for",
"t",
"in",
"tables",
")",
")",
"return",
"list",
"(",
"columns",
".",
"intersection",
"(",
"foundcols",
")",
")"
] | Take a list of tables and a list of column names and return the columns
that are present in the tables.
Parameters
----------
tables : sequence of _DataFrameWrapper or _TableFuncWrapper
Could also be sequence of modified pandas.DataFrames, the important
thing is that they have ``.name`` and ``.columns`` attributes.
columns : sequence of str
The column names of interest.
Returns
-------
cols : list
Lists of column names available in the tables. | [
"Take",
"a",
"list",
"of",
"tables",
"and",
"a",
"list",
"of",
"column",
"names",
"and",
"return",
"the",
"columns",
"that",
"are",
"present",
"in",
"the",
"tables",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/misc.py#L413-L434 | train | 235,827 |
UDST/urbansim | urbansim/utils/sampling.py | accounting_sample_replace | def accounting_sample_replace(total, data, accounting_column, prob_column=None, max_iterations=50):
"""
Sample rows with accounting with replacement.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
accounting_column: string
Name of column with accounting totals/quantities to apply towards the control.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
max_iterations: int, optional, default 50
When using an accounting attribute, the maximum number of sampling iterations
that will be applied.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
Indicates if the total was matched exactly.
"""
# check for probabilities
p = get_probs(data, prob_column)
# determine avg number of accounting items per sample (e.g. persons per household)
per_sample = data[accounting_column].sum() / (1.0 * len(data.index.values))
curr_total = 0
remaining = total
sample_rows = pd.DataFrame()
closest = None
closest_remain = total
matched = False
for i in range(0, max_iterations):
# stop if we've hit the control
if remaining == 0:
matched = True
break
# if sampling with probabilities, re-caclc the # of items per sample
# after the initial sample, this way the sample size reflects the probabilities
if p is not None and i == 1:
per_sample = sample_rows[accounting_column].sum() / (1.0 * len(sample_rows))
# update the sample
num_samples = int(math.ceil(math.fabs(remaining) / per_sample))
if remaining > 0:
# we're short, add to the sample
curr_ids = np.random.choice(data.index.values, num_samples, p=p)
sample_rows = pd.concat([sample_rows, data.loc[curr_ids]])
else:
# we've overshot, remove from existing samples (FIFO)
sample_rows = sample_rows.iloc[num_samples:].copy()
# update the total and check for the closest result
curr_total = sample_rows[accounting_column].sum()
remaining = total - curr_total
if abs(remaining) < closest_remain:
closest_remain = abs(remaining)
closest = sample_rows
return closest, matched | python | def accounting_sample_replace(total, data, accounting_column, prob_column=None, max_iterations=50):
"""
Sample rows with accounting with replacement.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
accounting_column: string
Name of column with accounting totals/quantities to apply towards the control.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
max_iterations: int, optional, default 50
When using an accounting attribute, the maximum number of sampling iterations
that will be applied.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
Indicates if the total was matched exactly.
"""
# check for probabilities
p = get_probs(data, prob_column)
# determine avg number of accounting items per sample (e.g. persons per household)
per_sample = data[accounting_column].sum() / (1.0 * len(data.index.values))
curr_total = 0
remaining = total
sample_rows = pd.DataFrame()
closest = None
closest_remain = total
matched = False
for i in range(0, max_iterations):
# stop if we've hit the control
if remaining == 0:
matched = True
break
# if sampling with probabilities, re-caclc the # of items per sample
# after the initial sample, this way the sample size reflects the probabilities
if p is not None and i == 1:
per_sample = sample_rows[accounting_column].sum() / (1.0 * len(sample_rows))
# update the sample
num_samples = int(math.ceil(math.fabs(remaining) / per_sample))
if remaining > 0:
# we're short, add to the sample
curr_ids = np.random.choice(data.index.values, num_samples, p=p)
sample_rows = pd.concat([sample_rows, data.loc[curr_ids]])
else:
# we've overshot, remove from existing samples (FIFO)
sample_rows = sample_rows.iloc[num_samples:].copy()
# update the total and check for the closest result
curr_total = sample_rows[accounting_column].sum()
remaining = total - curr_total
if abs(remaining) < closest_remain:
closest_remain = abs(remaining)
closest = sample_rows
return closest, matched | [
"def",
"accounting_sample_replace",
"(",
"total",
",",
"data",
",",
"accounting_column",
",",
"prob_column",
"=",
"None",
",",
"max_iterations",
"=",
"50",
")",
":",
"# check for probabilities",
"p",
"=",
"get_probs",
"(",
"data",
",",
"prob_column",
")",
"# determine avg number of accounting items per sample (e.g. persons per household)",
"per_sample",
"=",
"data",
"[",
"accounting_column",
"]",
".",
"sum",
"(",
")",
"/",
"(",
"1.0",
"*",
"len",
"(",
"data",
".",
"index",
".",
"values",
")",
")",
"curr_total",
"=",
"0",
"remaining",
"=",
"total",
"sample_rows",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"closest",
"=",
"None",
"closest_remain",
"=",
"total",
"matched",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"max_iterations",
")",
":",
"# stop if we've hit the control",
"if",
"remaining",
"==",
"0",
":",
"matched",
"=",
"True",
"break",
"# if sampling with probabilities, re-caclc the # of items per sample",
"# after the initial sample, this way the sample size reflects the probabilities",
"if",
"p",
"is",
"not",
"None",
"and",
"i",
"==",
"1",
":",
"per_sample",
"=",
"sample_rows",
"[",
"accounting_column",
"]",
".",
"sum",
"(",
")",
"/",
"(",
"1.0",
"*",
"len",
"(",
"sample_rows",
")",
")",
"# update the sample",
"num_samples",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"math",
".",
"fabs",
"(",
"remaining",
")",
"/",
"per_sample",
")",
")",
"if",
"remaining",
">",
"0",
":",
"# we're short, add to the sample",
"curr_ids",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"data",
".",
"index",
".",
"values",
",",
"num_samples",
",",
"p",
"=",
"p",
")",
"sample_rows",
"=",
"pd",
".",
"concat",
"(",
"[",
"sample_rows",
",",
"data",
".",
"loc",
"[",
"curr_ids",
"]",
"]",
")",
"else",
":",
"# we've overshot, remove from existing samples (FIFO)",
"sample_rows",
"=",
"sample_rows",
".",
"iloc",
"[",
"num_samples",
":",
"]",
".",
"copy",
"(",
")",
"# update the total and check for the closest result",
"curr_total",
"=",
"sample_rows",
"[",
"accounting_column",
"]",
".",
"sum",
"(",
")",
"remaining",
"=",
"total",
"-",
"curr_total",
"if",
"abs",
"(",
"remaining",
")",
"<",
"closest_remain",
":",
"closest_remain",
"=",
"abs",
"(",
"remaining",
")",
"closest",
"=",
"sample_rows",
"return",
"closest",
",",
"matched"
] | Sample rows with accounting with replacement.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
accounting_column: string
Name of column with accounting totals/quantities to apply towards the control.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
max_iterations: int, optional, default 50
When using an accounting attribute, the maximum number of sampling iterations
that will be applied.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
Indicates if the total was matched exactly. | [
"Sample",
"rows",
"with",
"accounting",
"with",
"replacement",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/sampling.py#L35-L105 | train | 235,828 |
UDST/urbansim | urbansim/utils/sampling.py | accounting_sample_no_replace | def accounting_sample_no_replace(total, data, accounting_column, prob_column=None):
"""
Samples rows with accounting without replacement.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
accounting_column: string
Name of column with accounting totals/quantities to apply towards the control.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
Indicates if the total was matched exactly.
"""
# make sure this is even feasible
if total > data[accounting_column].sum():
raise ValueError('Control total exceeds the available samples')
# check for probabilities
p = get_probs(data, prob_column)
# shuffle the rows
if p is None:
# random shuffle
shuff_idx = np.random.permutation(data.index.values)
else:
# weighted shuffle
ran_p = pd.Series(np.power(np.random.rand(len(p)), 1.0 / p), index=data.index)
ran_p.sort_values(ascending=False)
shuff_idx = ran_p.index.values
# get the initial sample
shuffle = data.loc[shuff_idx]
csum = np.cumsum(shuffle[accounting_column].values)
pos = np.searchsorted(csum, total, 'right')
sample = shuffle.iloc[:pos]
# refine the sample
sample_idx = sample.index.values
sample_total = sample[accounting_column].sum()
shortage = total - sample_total
matched = False
for idx, row in shuffle.iloc[pos:].iterrows():
if shortage == 0:
# we've matached
matched = True
break
# add the current element if it doesnt exceed the total
cnt = row[accounting_column]
if cnt <= shortage:
sample_idx = np.append(sample_idx, idx)
shortage -= cnt
return shuffle.loc[sample_idx].copy(), matched | python | def accounting_sample_no_replace(total, data, accounting_column, prob_column=None):
"""
Samples rows with accounting without replacement.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
accounting_column: string
Name of column with accounting totals/quantities to apply towards the control.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
Indicates if the total was matched exactly.
"""
# make sure this is even feasible
if total > data[accounting_column].sum():
raise ValueError('Control total exceeds the available samples')
# check for probabilities
p = get_probs(data, prob_column)
# shuffle the rows
if p is None:
# random shuffle
shuff_idx = np.random.permutation(data.index.values)
else:
# weighted shuffle
ran_p = pd.Series(np.power(np.random.rand(len(p)), 1.0 / p), index=data.index)
ran_p.sort_values(ascending=False)
shuff_idx = ran_p.index.values
# get the initial sample
shuffle = data.loc[shuff_idx]
csum = np.cumsum(shuffle[accounting_column].values)
pos = np.searchsorted(csum, total, 'right')
sample = shuffle.iloc[:pos]
# refine the sample
sample_idx = sample.index.values
sample_total = sample[accounting_column].sum()
shortage = total - sample_total
matched = False
for idx, row in shuffle.iloc[pos:].iterrows():
if shortage == 0:
# we've matached
matched = True
break
# add the current element if it doesnt exceed the total
cnt = row[accounting_column]
if cnt <= shortage:
sample_idx = np.append(sample_idx, idx)
shortage -= cnt
return shuffle.loc[sample_idx].copy(), matched | [
"def",
"accounting_sample_no_replace",
"(",
"total",
",",
"data",
",",
"accounting_column",
",",
"prob_column",
"=",
"None",
")",
":",
"# make sure this is even feasible",
"if",
"total",
">",
"data",
"[",
"accounting_column",
"]",
".",
"sum",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Control total exceeds the available samples'",
")",
"# check for probabilities",
"p",
"=",
"get_probs",
"(",
"data",
",",
"prob_column",
")",
"# shuffle the rows",
"if",
"p",
"is",
"None",
":",
"# random shuffle",
"shuff_idx",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"data",
".",
"index",
".",
"values",
")",
"else",
":",
"# weighted shuffle",
"ran_p",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"power",
"(",
"np",
".",
"random",
".",
"rand",
"(",
"len",
"(",
"p",
")",
")",
",",
"1.0",
"/",
"p",
")",
",",
"index",
"=",
"data",
".",
"index",
")",
"ran_p",
".",
"sort_values",
"(",
"ascending",
"=",
"False",
")",
"shuff_idx",
"=",
"ran_p",
".",
"index",
".",
"values",
"# get the initial sample",
"shuffle",
"=",
"data",
".",
"loc",
"[",
"shuff_idx",
"]",
"csum",
"=",
"np",
".",
"cumsum",
"(",
"shuffle",
"[",
"accounting_column",
"]",
".",
"values",
")",
"pos",
"=",
"np",
".",
"searchsorted",
"(",
"csum",
",",
"total",
",",
"'right'",
")",
"sample",
"=",
"shuffle",
".",
"iloc",
"[",
":",
"pos",
"]",
"# refine the sample",
"sample_idx",
"=",
"sample",
".",
"index",
".",
"values",
"sample_total",
"=",
"sample",
"[",
"accounting_column",
"]",
".",
"sum",
"(",
")",
"shortage",
"=",
"total",
"-",
"sample_total",
"matched",
"=",
"False",
"for",
"idx",
",",
"row",
"in",
"shuffle",
".",
"iloc",
"[",
"pos",
":",
"]",
".",
"iterrows",
"(",
")",
":",
"if",
"shortage",
"==",
"0",
":",
"# we've matached",
"matched",
"=",
"True",
"break",
"# add the current element if it doesnt exceed the total",
"cnt",
"=",
"row",
"[",
"accounting_column",
"]",
"if",
"cnt",
"<=",
"shortage",
":",
"sample_idx",
"=",
"np",
".",
"append",
"(",
"sample_idx",
",",
"idx",
")",
"shortage",
"-=",
"cnt",
"return",
"shuffle",
".",
"loc",
"[",
"sample_idx",
"]",
".",
"copy",
"(",
")",
",",
"matched"
] | Samples rows with accounting without replacement.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
accounting_column: string
Name of column with accounting totals/quantities to apply towards the control.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
Indicates if the total was matched exactly. | [
"Samples",
"rows",
"with",
"accounting",
"without",
"replacement",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/sampling.py#L108-L172 | train | 235,829 |
UDST/urbansim | urbansim/developer/sqftproforma.py | SqFtProFormaConfig._convert_types | def _convert_types(self):
"""
convert lists and dictionaries that are useful for users to
np vectors that are usable by machines
"""
self.fars = np.array(self.fars)
self.parking_rates = np.array([self.parking_rates[use] for use in self.uses])
self.res_ratios = {}
assert len(self.uses) == len(self.residential_uses)
for k, v in self.forms.items():
self.forms[k] = np.array([self.forms[k].get(use, 0.0) for use in self.uses])
# normalize if not already
self.forms[k] /= self.forms[k].sum()
self.res_ratios[k] = pd.Series(self.forms[k])[self.residential_uses].sum()
self.costs = np.transpose(np.array([self.costs[use] for use in self.uses])) | python | def _convert_types(self):
"""
convert lists and dictionaries that are useful for users to
np vectors that are usable by machines
"""
self.fars = np.array(self.fars)
self.parking_rates = np.array([self.parking_rates[use] for use in self.uses])
self.res_ratios = {}
assert len(self.uses) == len(self.residential_uses)
for k, v in self.forms.items():
self.forms[k] = np.array([self.forms[k].get(use, 0.0) for use in self.uses])
# normalize if not already
self.forms[k] /= self.forms[k].sum()
self.res_ratios[k] = pd.Series(self.forms[k])[self.residential_uses].sum()
self.costs = np.transpose(np.array([self.costs[use] for use in self.uses])) | [
"def",
"_convert_types",
"(",
"self",
")",
":",
"self",
".",
"fars",
"=",
"np",
".",
"array",
"(",
"self",
".",
"fars",
")",
"self",
".",
"parking_rates",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"parking_rates",
"[",
"use",
"]",
"for",
"use",
"in",
"self",
".",
"uses",
"]",
")",
"self",
".",
"res_ratios",
"=",
"{",
"}",
"assert",
"len",
"(",
"self",
".",
"uses",
")",
"==",
"len",
"(",
"self",
".",
"residential_uses",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"forms",
".",
"items",
"(",
")",
":",
"self",
".",
"forms",
"[",
"k",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"forms",
"[",
"k",
"]",
".",
"get",
"(",
"use",
",",
"0.0",
")",
"for",
"use",
"in",
"self",
".",
"uses",
"]",
")",
"# normalize if not already",
"self",
".",
"forms",
"[",
"k",
"]",
"/=",
"self",
".",
"forms",
"[",
"k",
"]",
".",
"sum",
"(",
")",
"self",
".",
"res_ratios",
"[",
"k",
"]",
"=",
"pd",
".",
"Series",
"(",
"self",
".",
"forms",
"[",
"k",
"]",
")",
"[",
"self",
".",
"residential_uses",
"]",
".",
"sum",
"(",
")",
"self",
".",
"costs",
"=",
"np",
".",
"transpose",
"(",
"np",
".",
"array",
"(",
"[",
"self",
".",
"costs",
"[",
"use",
"]",
"for",
"use",
"in",
"self",
".",
"uses",
"]",
")",
")"
] | convert lists and dictionaries that are useful for users to
np vectors that are usable by machines | [
"convert",
"lists",
"and",
"dictionaries",
"that",
"are",
"useful",
"for",
"users",
"to",
"np",
"vectors",
"that",
"are",
"usable",
"by",
"machines"
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L192-L207 | train | 235,830 |
UDST/urbansim | urbansim/developer/sqftproforma.py | SqFtProForma._building_cost | def _building_cost(self, use_mix, stories):
"""
Generate building cost for a set of buildings
Parameters
----------
use_mix : array
The mix of uses for this form
stories : series
A Pandas Series of stories
Returns
-------
array
The cost per sqft for this unit mix and height.
"""
c = self.config
# stories to heights
heights = stories * c.height_per_story
# cost index for this height
costs = np.searchsorted(c.heights_for_costs, heights)
# this will get set to nan later
costs[np.isnan(heights)] = 0
# compute cost with matrix multiply
costs = np.dot(np.squeeze(c.costs[costs.astype('int32')]), use_mix)
# some heights aren't allowed - cost should be nan
costs[np.isnan(stories).flatten()] = np.nan
return costs.flatten() | python | def _building_cost(self, use_mix, stories):
"""
Generate building cost for a set of buildings
Parameters
----------
use_mix : array
The mix of uses for this form
stories : series
A Pandas Series of stories
Returns
-------
array
The cost per sqft for this unit mix and height.
"""
c = self.config
# stories to heights
heights = stories * c.height_per_story
# cost index for this height
costs = np.searchsorted(c.heights_for_costs, heights)
# this will get set to nan later
costs[np.isnan(heights)] = 0
# compute cost with matrix multiply
costs = np.dot(np.squeeze(c.costs[costs.astype('int32')]), use_mix)
# some heights aren't allowed - cost should be nan
costs[np.isnan(stories).flatten()] = np.nan
return costs.flatten() | [
"def",
"_building_cost",
"(",
"self",
",",
"use_mix",
",",
"stories",
")",
":",
"c",
"=",
"self",
".",
"config",
"# stories to heights",
"heights",
"=",
"stories",
"*",
"c",
".",
"height_per_story",
"# cost index for this height",
"costs",
"=",
"np",
".",
"searchsorted",
"(",
"c",
".",
"heights_for_costs",
",",
"heights",
")",
"# this will get set to nan later",
"costs",
"[",
"np",
".",
"isnan",
"(",
"heights",
")",
"]",
"=",
"0",
"# compute cost with matrix multiply",
"costs",
"=",
"np",
".",
"dot",
"(",
"np",
".",
"squeeze",
"(",
"c",
".",
"costs",
"[",
"costs",
".",
"astype",
"(",
"'int32'",
")",
"]",
")",
",",
"use_mix",
")",
"# some heights aren't allowed - cost should be nan",
"costs",
"[",
"np",
".",
"isnan",
"(",
"stories",
")",
".",
"flatten",
"(",
")",
"]",
"=",
"np",
".",
"nan",
"return",
"costs",
".",
"flatten",
"(",
")"
] | Generate building cost for a set of buildings
Parameters
----------
use_mix : array
The mix of uses for this form
stories : series
A Pandas Series of stories
Returns
-------
array
The cost per sqft for this unit mix and height. | [
"Generate",
"building",
"cost",
"for",
"a",
"set",
"of",
"buildings"
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L279-L307 | train | 235,831 |
UDST/urbansim | urbansim/developer/sqftproforma.py | SqFtProForma._generate_lookup | def _generate_lookup(self):
"""
Run the developer model on all possible inputs specified in the
configuration object - not generally called by the user. This part
computes the final cost per sqft of the building to construct and
then turns it into the yearly rent necessary to make break even on
that cost.
"""
c = self.config
# get all the building forms we can use
keys = c.forms.keys()
keys = sorted(keys)
df_d = {}
for name in keys:
# get the use distribution for each
uses_distrib = c.forms[name]
for parking_config in c.parking_configs:
# going to make a dataframe to store values to make
# pro forma results transparent
df = pd.DataFrame(index=c.fars)
df['far'] = c.fars
df['pclsz'] = c.tiled_parcel_sizes
building_bulk = np.reshape(
c.parcel_sizes, (-1, 1)) * np.reshape(c.fars, (1, -1))
building_bulk = np.reshape(building_bulk, (-1, 1))
# need to converge in on exactly how much far is available for
# deck pkg
if parking_config == 'deck':
building_bulk /= (1.0 + np.sum(uses_distrib * c.parking_rates) *
c.parking_sqft_d[parking_config] /
c.sqft_per_rate)
df['building_sqft'] = building_bulk
parkingstalls = building_bulk * \
np.sum(uses_distrib * c.parking_rates) / c.sqft_per_rate
parking_cost = (c.parking_cost_d[parking_config] *
parkingstalls *
c.parking_sqft_d[parking_config])
df['spaces'] = parkingstalls
if parking_config == 'underground':
df['park_sqft'] = parkingstalls * \
c.parking_sqft_d[parking_config]
stories = building_bulk / c.tiled_parcel_sizes
if parking_config == 'deck':
df['park_sqft'] = parkingstalls * \
c.parking_sqft_d[parking_config]
stories = ((building_bulk + parkingstalls *
c.parking_sqft_d[parking_config]) /
c.tiled_parcel_sizes)
if parking_config == 'surface':
stories = building_bulk / \
(c.tiled_parcel_sizes - parkingstalls *
c.parking_sqft_d[parking_config])
df['park_sqft'] = 0
# not all fars support surface parking
stories[stories < 0.0] = np.nan
# I think we can assume that stories over 3
# do not work with surface parking
stories[stories > 5.0] = np.nan
df['total_built_sqft'] = df.building_sqft + df.park_sqft
df['parking_sqft_ratio'] = df.park_sqft / df.total_built_sqft
stories /= c.parcel_coverage
df['stories'] = np.ceil(stories)
df['height'] = df.stories * c.height_per_story
df['build_cost_sqft'] = self._building_cost(uses_distrib, stories)
df['build_cost'] = df.build_cost_sqft * df.building_sqft
df['park_cost'] = parking_cost
df['cost'] = df.build_cost + df.park_cost
df['ave_cost_sqft'] = (df.cost / df.total_built_sqft) * c.profit_factor
if name == 'retail':
df['ave_cost_sqft'][c.fars > c.max_retail_height] = np.nan
if name == 'industrial':
df['ave_cost_sqft'][c.fars > c.max_industrial_height] = np.nan
df_d[(name, parking_config)] = df
self.dev_d = df_d | python | def _generate_lookup(self):
"""
Run the developer model on all possible inputs specified in the
configuration object - not generally called by the user. This part
computes the final cost per sqft of the building to construct and
then turns it into the yearly rent necessary to make break even on
that cost.
"""
c = self.config
# get all the building forms we can use
keys = c.forms.keys()
keys = sorted(keys)
df_d = {}
for name in keys:
# get the use distribution for each
uses_distrib = c.forms[name]
for parking_config in c.parking_configs:
# going to make a dataframe to store values to make
# pro forma results transparent
df = pd.DataFrame(index=c.fars)
df['far'] = c.fars
df['pclsz'] = c.tiled_parcel_sizes
building_bulk = np.reshape(
c.parcel_sizes, (-1, 1)) * np.reshape(c.fars, (1, -1))
building_bulk = np.reshape(building_bulk, (-1, 1))
# need to converge in on exactly how much far is available for
# deck pkg
if parking_config == 'deck':
building_bulk /= (1.0 + np.sum(uses_distrib * c.parking_rates) *
c.parking_sqft_d[parking_config] /
c.sqft_per_rate)
df['building_sqft'] = building_bulk
parkingstalls = building_bulk * \
np.sum(uses_distrib * c.parking_rates) / c.sqft_per_rate
parking_cost = (c.parking_cost_d[parking_config] *
parkingstalls *
c.parking_sqft_d[parking_config])
df['spaces'] = parkingstalls
if parking_config == 'underground':
df['park_sqft'] = parkingstalls * \
c.parking_sqft_d[parking_config]
stories = building_bulk / c.tiled_parcel_sizes
if parking_config == 'deck':
df['park_sqft'] = parkingstalls * \
c.parking_sqft_d[parking_config]
stories = ((building_bulk + parkingstalls *
c.parking_sqft_d[parking_config]) /
c.tiled_parcel_sizes)
if parking_config == 'surface':
stories = building_bulk / \
(c.tiled_parcel_sizes - parkingstalls *
c.parking_sqft_d[parking_config])
df['park_sqft'] = 0
# not all fars support surface parking
stories[stories < 0.0] = np.nan
# I think we can assume that stories over 3
# do not work with surface parking
stories[stories > 5.0] = np.nan
df['total_built_sqft'] = df.building_sqft + df.park_sqft
df['parking_sqft_ratio'] = df.park_sqft / df.total_built_sqft
stories /= c.parcel_coverage
df['stories'] = np.ceil(stories)
df['height'] = df.stories * c.height_per_story
df['build_cost_sqft'] = self._building_cost(uses_distrib, stories)
df['build_cost'] = df.build_cost_sqft * df.building_sqft
df['park_cost'] = parking_cost
df['cost'] = df.build_cost + df.park_cost
df['ave_cost_sqft'] = (df.cost / df.total_built_sqft) * c.profit_factor
if name == 'retail':
df['ave_cost_sqft'][c.fars > c.max_retail_height] = np.nan
if name == 'industrial':
df['ave_cost_sqft'][c.fars > c.max_industrial_height] = np.nan
df_d[(name, parking_config)] = df
self.dev_d = df_d | [
"def",
"_generate_lookup",
"(",
"self",
")",
":",
"c",
"=",
"self",
".",
"config",
"# get all the building forms we can use",
"keys",
"=",
"c",
".",
"forms",
".",
"keys",
"(",
")",
"keys",
"=",
"sorted",
"(",
"keys",
")",
"df_d",
"=",
"{",
"}",
"for",
"name",
"in",
"keys",
":",
"# get the use distribution for each",
"uses_distrib",
"=",
"c",
".",
"forms",
"[",
"name",
"]",
"for",
"parking_config",
"in",
"c",
".",
"parking_configs",
":",
"# going to make a dataframe to store values to make",
"# pro forma results transparent",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"c",
".",
"fars",
")",
"df",
"[",
"'far'",
"]",
"=",
"c",
".",
"fars",
"df",
"[",
"'pclsz'",
"]",
"=",
"c",
".",
"tiled_parcel_sizes",
"building_bulk",
"=",
"np",
".",
"reshape",
"(",
"c",
".",
"parcel_sizes",
",",
"(",
"-",
"1",
",",
"1",
")",
")",
"*",
"np",
".",
"reshape",
"(",
"c",
".",
"fars",
",",
"(",
"1",
",",
"-",
"1",
")",
")",
"building_bulk",
"=",
"np",
".",
"reshape",
"(",
"building_bulk",
",",
"(",
"-",
"1",
",",
"1",
")",
")",
"# need to converge in on exactly how much far is available for",
"# deck pkg",
"if",
"parking_config",
"==",
"'deck'",
":",
"building_bulk",
"/=",
"(",
"1.0",
"+",
"np",
".",
"sum",
"(",
"uses_distrib",
"*",
"c",
".",
"parking_rates",
")",
"*",
"c",
".",
"parking_sqft_d",
"[",
"parking_config",
"]",
"/",
"c",
".",
"sqft_per_rate",
")",
"df",
"[",
"'building_sqft'",
"]",
"=",
"building_bulk",
"parkingstalls",
"=",
"building_bulk",
"*",
"np",
".",
"sum",
"(",
"uses_distrib",
"*",
"c",
".",
"parking_rates",
")",
"/",
"c",
".",
"sqft_per_rate",
"parking_cost",
"=",
"(",
"c",
".",
"parking_cost_d",
"[",
"parking_config",
"]",
"*",
"parkingstalls",
"*",
"c",
".",
"parking_sqft_d",
"[",
"parking_config",
"]",
")",
"df",
"[",
"'spaces'",
"]",
"=",
"parkingstalls",
"if",
"parking_config",
"==",
"'underground'",
":",
"df",
"[",
"'park_sqft'",
"]",
"=",
"parkingstalls",
"*",
"c",
".",
"parking_sqft_d",
"[",
"parking_config",
"]",
"stories",
"=",
"building_bulk",
"/",
"c",
".",
"tiled_parcel_sizes",
"if",
"parking_config",
"==",
"'deck'",
":",
"df",
"[",
"'park_sqft'",
"]",
"=",
"parkingstalls",
"*",
"c",
".",
"parking_sqft_d",
"[",
"parking_config",
"]",
"stories",
"=",
"(",
"(",
"building_bulk",
"+",
"parkingstalls",
"*",
"c",
".",
"parking_sqft_d",
"[",
"parking_config",
"]",
")",
"/",
"c",
".",
"tiled_parcel_sizes",
")",
"if",
"parking_config",
"==",
"'surface'",
":",
"stories",
"=",
"building_bulk",
"/",
"(",
"c",
".",
"tiled_parcel_sizes",
"-",
"parkingstalls",
"*",
"c",
".",
"parking_sqft_d",
"[",
"parking_config",
"]",
")",
"df",
"[",
"'park_sqft'",
"]",
"=",
"0",
"# not all fars support surface parking",
"stories",
"[",
"stories",
"<",
"0.0",
"]",
"=",
"np",
".",
"nan",
"# I think we can assume that stories over 3",
"# do not work with surface parking",
"stories",
"[",
"stories",
">",
"5.0",
"]",
"=",
"np",
".",
"nan",
"df",
"[",
"'total_built_sqft'",
"]",
"=",
"df",
".",
"building_sqft",
"+",
"df",
".",
"park_sqft",
"df",
"[",
"'parking_sqft_ratio'",
"]",
"=",
"df",
".",
"park_sqft",
"/",
"df",
".",
"total_built_sqft",
"stories",
"/=",
"c",
".",
"parcel_coverage",
"df",
"[",
"'stories'",
"]",
"=",
"np",
".",
"ceil",
"(",
"stories",
")",
"df",
"[",
"'height'",
"]",
"=",
"df",
".",
"stories",
"*",
"c",
".",
"height_per_story",
"df",
"[",
"'build_cost_sqft'",
"]",
"=",
"self",
".",
"_building_cost",
"(",
"uses_distrib",
",",
"stories",
")",
"df",
"[",
"'build_cost'",
"]",
"=",
"df",
".",
"build_cost_sqft",
"*",
"df",
".",
"building_sqft",
"df",
"[",
"'park_cost'",
"]",
"=",
"parking_cost",
"df",
"[",
"'cost'",
"]",
"=",
"df",
".",
"build_cost",
"+",
"df",
".",
"park_cost",
"df",
"[",
"'ave_cost_sqft'",
"]",
"=",
"(",
"df",
".",
"cost",
"/",
"df",
".",
"total_built_sqft",
")",
"*",
"c",
".",
"profit_factor",
"if",
"name",
"==",
"'retail'",
":",
"df",
"[",
"'ave_cost_sqft'",
"]",
"[",
"c",
".",
"fars",
">",
"c",
".",
"max_retail_height",
"]",
"=",
"np",
".",
"nan",
"if",
"name",
"==",
"'industrial'",
":",
"df",
"[",
"'ave_cost_sqft'",
"]",
"[",
"c",
".",
"fars",
">",
"c",
".",
"max_industrial_height",
"]",
"=",
"np",
".",
"nan",
"df_d",
"[",
"(",
"name",
",",
"parking_config",
")",
"]",
"=",
"df",
"self",
".",
"dev_d",
"=",
"df_d"
] | Run the developer model on all possible inputs specified in the
configuration object - not generally called by the user. This part
computes the final cost per sqft of the building to construct and
then turns it into the yearly rent necessary to make break even on
that cost. | [
"Run",
"the",
"developer",
"model",
"on",
"all",
"possible",
"inputs",
"specified",
"in",
"the",
"configuration",
"object",
"-",
"not",
"generally",
"called",
"by",
"the",
"user",
".",
"This",
"part",
"computes",
"the",
"final",
"cost",
"per",
"sqft",
"of",
"the",
"building",
"to",
"construct",
"and",
"then",
"turns",
"it",
"into",
"the",
"yearly",
"rent",
"necessary",
"to",
"make",
"break",
"even",
"on",
"that",
"cost",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L309-L398 | train | 235,832 |
UDST/urbansim | urbansim/developer/sqftproforma.py | SqFtProForma.lookup | def lookup(self, form, df, only_built=True, pass_through=None):
"""
This function does the developer model lookups for all the actual input data.
Parameters
----------
form : string
One of the forms specified in the configuration file
df: dataframe
Pass in a single data frame which is indexed by parcel_id and has the
following columns
only_built : bool
Whether to return only those buildings that are profitable and allowed
by zoning, or whether to return as much information as possible, even if
unlikely to be built (can be used when development might be subsidized
or when debugging)
pass_through : list of strings
List of field names to take from the input parcel frame and pass
to the output feasibility frame - is usually used for debugging
purposes - these fields will be passed all the way through
developer
Input Dataframe Columns
rent : dataframe
A set of columns, one for each of the uses passed in the configuration.
Values are yearly rents for that use. Typical column names would be
"residential", "retail", "industrial" and "office"
land_cost : series
A series representing the CURRENT yearly rent for each parcel. Used to
compute acquisition costs for the parcel.
parcel_size : series
A series representing the parcel size for each parcel.
max_far : series
A series representing the maximum far allowed by zoning. Buildings
will not be built above these fars.
max_height : series
A series representing the maxmium height allowed by zoning. Buildings
will not be built above these heights. Will pick between the min of
the far and height, will ignore on of them if one is nan, but will not
build if both are nan.
max_dua : series, optional
A series representing the maximum dwelling units per acre allowed by
zoning. If max_dua is passed, the average unit size should be passed
below to translate from dua to floor space.
ave_unit_size : series, optional
This is required if max_dua is passed above, otherwise it is optional.
This is the same as the parameter to Developer.pick() (it should be the
same series).
Returns
-------
index : Series, int
parcel identifiers
building_sqft : Series, float
The number of square feet for the building to build. Keep in mind
this includes parking and common space. Will need a helpful function
to convert from gross square feet to actual usable square feet in
residential units.
building_cost : Series, float
The cost of constructing the building as given by the
ave_cost_per_sqft from the cost model (for this FAR) and the number
of square feet.
total_cost : Series, float
The cost of constructing the building plus the cost of acquisition of
the current parcel/building.
building_revenue : Series, float
The NPV of the revenue for the building to be built, which is the
number of square feet times the yearly rent divided by the cap
rate (with a few adjustment factors including building efficiency).
max_profit_far : Series, float
The FAR of the maximum profit building (constrained by the max_far and
max_height from the input dataframe).
max_profit :
The profit for the maximum profit building (constrained by the max_far
and max_height from the input dataframe).
"""
df = pd.concat(self._lookup_parking_cfg(form, parking_config, df, only_built,
pass_through)
for parking_config in self.config.parking_configs)
if len(df) == 0:
return pd.DataFrame()
max_profit_ind = df.pivot(
columns="parking_config",
values="max_profit").idxmax(axis=1).to_frame("parking_config")
df.set_index(["parking_config"], append=True, inplace=True)
max_profit_ind.set_index(["parking_config"], append=True, inplace=True)
# get the max_profit idx
return df.loc[max_profit_ind.index].reset_index(1) | python | def lookup(self, form, df, only_built=True, pass_through=None):
"""
This function does the developer model lookups for all the actual input data.
Parameters
----------
form : string
One of the forms specified in the configuration file
df: dataframe
Pass in a single data frame which is indexed by parcel_id and has the
following columns
only_built : bool
Whether to return only those buildings that are profitable and allowed
by zoning, or whether to return as much information as possible, even if
unlikely to be built (can be used when development might be subsidized
or when debugging)
pass_through : list of strings
List of field names to take from the input parcel frame and pass
to the output feasibility frame - is usually used for debugging
purposes - these fields will be passed all the way through
developer
Input Dataframe Columns
rent : dataframe
A set of columns, one for each of the uses passed in the configuration.
Values are yearly rents for that use. Typical column names would be
"residential", "retail", "industrial" and "office"
land_cost : series
A series representing the CURRENT yearly rent for each parcel. Used to
compute acquisition costs for the parcel.
parcel_size : series
A series representing the parcel size for each parcel.
max_far : series
A series representing the maximum far allowed by zoning. Buildings
will not be built above these fars.
max_height : series
A series representing the maxmium height allowed by zoning. Buildings
will not be built above these heights. Will pick between the min of
the far and height, will ignore on of them if one is nan, but will not
build if both are nan.
max_dua : series, optional
A series representing the maximum dwelling units per acre allowed by
zoning. If max_dua is passed, the average unit size should be passed
below to translate from dua to floor space.
ave_unit_size : series, optional
This is required if max_dua is passed above, otherwise it is optional.
This is the same as the parameter to Developer.pick() (it should be the
same series).
Returns
-------
index : Series, int
parcel identifiers
building_sqft : Series, float
The number of square feet for the building to build. Keep in mind
this includes parking and common space. Will need a helpful function
to convert from gross square feet to actual usable square feet in
residential units.
building_cost : Series, float
The cost of constructing the building as given by the
ave_cost_per_sqft from the cost model (for this FAR) and the number
of square feet.
total_cost : Series, float
The cost of constructing the building plus the cost of acquisition of
the current parcel/building.
building_revenue : Series, float
The NPV of the revenue for the building to be built, which is the
number of square feet times the yearly rent divided by the cap
rate (with a few adjustment factors including building efficiency).
max_profit_far : Series, float
The FAR of the maximum profit building (constrained by the max_far and
max_height from the input dataframe).
max_profit :
The profit for the maximum profit building (constrained by the max_far
and max_height from the input dataframe).
"""
df = pd.concat(self._lookup_parking_cfg(form, parking_config, df, only_built,
pass_through)
for parking_config in self.config.parking_configs)
if len(df) == 0:
return pd.DataFrame()
max_profit_ind = df.pivot(
columns="parking_config",
values="max_profit").idxmax(axis=1).to_frame("parking_config")
df.set_index(["parking_config"], append=True, inplace=True)
max_profit_ind.set_index(["parking_config"], append=True, inplace=True)
# get the max_profit idx
return df.loc[max_profit_ind.index].reset_index(1) | [
"def",
"lookup",
"(",
"self",
",",
"form",
",",
"df",
",",
"only_built",
"=",
"True",
",",
"pass_through",
"=",
"None",
")",
":",
"df",
"=",
"pd",
".",
"concat",
"(",
"self",
".",
"_lookup_parking_cfg",
"(",
"form",
",",
"parking_config",
",",
"df",
",",
"only_built",
",",
"pass_through",
")",
"for",
"parking_config",
"in",
"self",
".",
"config",
".",
"parking_configs",
")",
"if",
"len",
"(",
"df",
")",
"==",
"0",
":",
"return",
"pd",
".",
"DataFrame",
"(",
")",
"max_profit_ind",
"=",
"df",
".",
"pivot",
"(",
"columns",
"=",
"\"parking_config\"",
",",
"values",
"=",
"\"max_profit\"",
")",
".",
"idxmax",
"(",
"axis",
"=",
"1",
")",
".",
"to_frame",
"(",
"\"parking_config\"",
")",
"df",
".",
"set_index",
"(",
"[",
"\"parking_config\"",
"]",
",",
"append",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"max_profit_ind",
".",
"set_index",
"(",
"[",
"\"parking_config\"",
"]",
",",
"append",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"# get the max_profit idx",
"return",
"df",
".",
"loc",
"[",
"max_profit_ind",
".",
"index",
"]",
".",
"reset_index",
"(",
"1",
")"
] | This function does the developer model lookups for all the actual input data.
Parameters
----------
form : string
One of the forms specified in the configuration file
df: dataframe
Pass in a single data frame which is indexed by parcel_id and has the
following columns
only_built : bool
Whether to return only those buildings that are profitable and allowed
by zoning, or whether to return as much information as possible, even if
unlikely to be built (can be used when development might be subsidized
or when debugging)
pass_through : list of strings
List of field names to take from the input parcel frame and pass
to the output feasibility frame - is usually used for debugging
purposes - these fields will be passed all the way through
developer
Input Dataframe Columns
rent : dataframe
A set of columns, one for each of the uses passed in the configuration.
Values are yearly rents for that use. Typical column names would be
"residential", "retail", "industrial" and "office"
land_cost : series
A series representing the CURRENT yearly rent for each parcel. Used to
compute acquisition costs for the parcel.
parcel_size : series
A series representing the parcel size for each parcel.
max_far : series
A series representing the maximum far allowed by zoning. Buildings
will not be built above these fars.
max_height : series
A series representing the maxmium height allowed by zoning. Buildings
will not be built above these heights. Will pick between the min of
the far and height, will ignore on of them if one is nan, but will not
build if both are nan.
max_dua : series, optional
A series representing the maximum dwelling units per acre allowed by
zoning. If max_dua is passed, the average unit size should be passed
below to translate from dua to floor space.
ave_unit_size : series, optional
This is required if max_dua is passed above, otherwise it is optional.
This is the same as the parameter to Developer.pick() (it should be the
same series).
Returns
-------
index : Series, int
parcel identifiers
building_sqft : Series, float
The number of square feet for the building to build. Keep in mind
this includes parking and common space. Will need a helpful function
to convert from gross square feet to actual usable square feet in
residential units.
building_cost : Series, float
The cost of constructing the building as given by the
ave_cost_per_sqft from the cost model (for this FAR) and the number
of square feet.
total_cost : Series, float
The cost of constructing the building plus the cost of acquisition of
the current parcel/building.
building_revenue : Series, float
The NPV of the revenue for the building to be built, which is the
number of square feet times the yearly rent divided by the cap
rate (with a few adjustment factors including building efficiency).
max_profit_far : Series, float
The FAR of the maximum profit building (constrained by the max_far and
max_height from the input dataframe).
max_profit :
The profit for the maximum profit building (constrained by the max_far
and max_height from the input dataframe). | [
"This",
"function",
"does",
"the",
"developer",
"model",
"lookups",
"for",
"all",
"the",
"actual",
"input",
"data",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L445-L537 | train | 235,833 |
UDST/urbansim | urbansim/developer/sqftproforma.py | SqFtProForma._debug_output | def _debug_output(self):
"""
this code creates the debugging plots to understand
the behavior of the hypothetical building model
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
c = self.config
df_d = self.dev_d
keys = df_d.keys()
keys = sorted(keys)
for key in keys:
logger.debug("\n" + str(key) + "\n")
logger.debug(df_d[key])
for form in self.config.forms:
logger.debug("\n" + str(key) + "\n")
logger.debug(self.get_ave_cost_sqft(form, "surface"))
keys = c.forms.keys()
keys = sorted(keys)
cnt = 1
share = None
fig = plt.figure(figsize=(12, 3 * len(keys)))
fig.suptitle('Profitable rents by use', fontsize=40)
for name in keys:
sumdf = None
for parking_config in c.parking_configs:
df = df_d[(name, parking_config)]
if sumdf is None:
sumdf = pd.DataFrame(df['far'])
sumdf[parking_config] = df['ave_cost_sqft']
far = sumdf['far']
del sumdf['far']
if share is None:
share = plt.subplot(len(keys) / 2, 2, cnt)
else:
plt.subplot(len(keys) / 2, 2, cnt, sharex=share, sharey=share)
handles = plt.plot(far, sumdf)
plt.ylabel('even_rent')
plt.xlabel('FAR')
plt.title('Rents for use type %s' % name)
plt.legend(
handles, c.parking_configs, loc='lower right',
title='Parking type')
cnt += 1
plt.savefig('even_rents.png', bbox_inches=0) | python | def _debug_output(self):
"""
this code creates the debugging plots to understand
the behavior of the hypothetical building model
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
c = self.config
df_d = self.dev_d
keys = df_d.keys()
keys = sorted(keys)
for key in keys:
logger.debug("\n" + str(key) + "\n")
logger.debug(df_d[key])
for form in self.config.forms:
logger.debug("\n" + str(key) + "\n")
logger.debug(self.get_ave_cost_sqft(form, "surface"))
keys = c.forms.keys()
keys = sorted(keys)
cnt = 1
share = None
fig = plt.figure(figsize=(12, 3 * len(keys)))
fig.suptitle('Profitable rents by use', fontsize=40)
for name in keys:
sumdf = None
for parking_config in c.parking_configs:
df = df_d[(name, parking_config)]
if sumdf is None:
sumdf = pd.DataFrame(df['far'])
sumdf[parking_config] = df['ave_cost_sqft']
far = sumdf['far']
del sumdf['far']
if share is None:
share = plt.subplot(len(keys) / 2, 2, cnt)
else:
plt.subplot(len(keys) / 2, 2, cnt, sharex=share, sharey=share)
handles = plt.plot(far, sumdf)
plt.ylabel('even_rent')
plt.xlabel('FAR')
plt.title('Rents for use type %s' % name)
plt.legend(
handles, c.parking_configs, loc='lower right',
title='Parking type')
cnt += 1
plt.savefig('even_rents.png', bbox_inches=0) | [
"def",
"_debug_output",
"(",
"self",
")",
":",
"import",
"matplotlib",
"matplotlib",
".",
"use",
"(",
"'Agg'",
")",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"c",
"=",
"self",
".",
"config",
"df_d",
"=",
"self",
".",
"dev_d",
"keys",
"=",
"df_d",
".",
"keys",
"(",
")",
"keys",
"=",
"sorted",
"(",
"keys",
")",
"for",
"key",
"in",
"keys",
":",
"logger",
".",
"debug",
"(",
"\"\\n\"",
"+",
"str",
"(",
"key",
")",
"+",
"\"\\n\"",
")",
"logger",
".",
"debug",
"(",
"df_d",
"[",
"key",
"]",
")",
"for",
"form",
"in",
"self",
".",
"config",
".",
"forms",
":",
"logger",
".",
"debug",
"(",
"\"\\n\"",
"+",
"str",
"(",
"key",
")",
"+",
"\"\\n\"",
")",
"logger",
".",
"debug",
"(",
"self",
".",
"get_ave_cost_sqft",
"(",
"form",
",",
"\"surface\"",
")",
")",
"keys",
"=",
"c",
".",
"forms",
".",
"keys",
"(",
")",
"keys",
"=",
"sorted",
"(",
"keys",
")",
"cnt",
"=",
"1",
"share",
"=",
"None",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"12",
",",
"3",
"*",
"len",
"(",
"keys",
")",
")",
")",
"fig",
".",
"suptitle",
"(",
"'Profitable rents by use'",
",",
"fontsize",
"=",
"40",
")",
"for",
"name",
"in",
"keys",
":",
"sumdf",
"=",
"None",
"for",
"parking_config",
"in",
"c",
".",
"parking_configs",
":",
"df",
"=",
"df_d",
"[",
"(",
"name",
",",
"parking_config",
")",
"]",
"if",
"sumdf",
"is",
"None",
":",
"sumdf",
"=",
"pd",
".",
"DataFrame",
"(",
"df",
"[",
"'far'",
"]",
")",
"sumdf",
"[",
"parking_config",
"]",
"=",
"df",
"[",
"'ave_cost_sqft'",
"]",
"far",
"=",
"sumdf",
"[",
"'far'",
"]",
"del",
"sumdf",
"[",
"'far'",
"]",
"if",
"share",
"is",
"None",
":",
"share",
"=",
"plt",
".",
"subplot",
"(",
"len",
"(",
"keys",
")",
"/",
"2",
",",
"2",
",",
"cnt",
")",
"else",
":",
"plt",
".",
"subplot",
"(",
"len",
"(",
"keys",
")",
"/",
"2",
",",
"2",
",",
"cnt",
",",
"sharex",
"=",
"share",
",",
"sharey",
"=",
"share",
")",
"handles",
"=",
"plt",
".",
"plot",
"(",
"far",
",",
"sumdf",
")",
"plt",
".",
"ylabel",
"(",
"'even_rent'",
")",
"plt",
".",
"xlabel",
"(",
"'FAR'",
")",
"plt",
".",
"title",
"(",
"'Rents for use type %s'",
"%",
"name",
")",
"plt",
".",
"legend",
"(",
"handles",
",",
"c",
".",
"parking_configs",
",",
"loc",
"=",
"'lower right'",
",",
"title",
"=",
"'Parking type'",
")",
"cnt",
"+=",
"1",
"plt",
".",
"savefig",
"(",
"'even_rents.png'",
",",
"bbox_inches",
"=",
"0",
")"
] | this code creates the debugging plots to understand
the behavior of the hypothetical building model | [
"this",
"code",
"creates",
"the",
"debugging",
"plots",
"to",
"understand",
"the",
"behavior",
"of",
"the",
"hypothetical",
"building",
"model"
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/sqftproforma.py#L666-L716 | train | 235,834 |
UDST/urbansim | urbansim/models/transition.py | add_rows | def add_rows(data, nrows, starting_index=None, accounting_column=None):
"""
Add rows to data table according to a given nrows.
New rows will have their IDs set to NaN.
Parameters
----------
data : pandas.DataFrame
nrows : int
Number of rows to add.
starting_index : int, optional
The starting index from which to calculate indexes for the new
rows. If not given the max + 1 of the index of `data` will be used.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
Returns
-------
updated : pandas.DataFrame
Table with rows added. New rows will have their index values
set to NaN.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
"""
logger.debug('start: adding {} rows in transition model'.format(nrows))
if nrows == 0:
return data, _empty_index(), _empty_index()
if not starting_index:
starting_index = data.index.values.max() + 1
new_rows = sample_rows(nrows, data, accounting_column=accounting_column)
copied_index = new_rows.index
added_index = pd.Index(np.arange(
starting_index, starting_index + len(new_rows.index), dtype=np.int))
new_rows.index = added_index
logger.debug(
'finish: added {} rows in transition model'.format(len(new_rows)))
return pd.concat([data, new_rows]), added_index, copied_index | python | def add_rows(data, nrows, starting_index=None, accounting_column=None):
"""
Add rows to data table according to a given nrows.
New rows will have their IDs set to NaN.
Parameters
----------
data : pandas.DataFrame
nrows : int
Number of rows to add.
starting_index : int, optional
The starting index from which to calculate indexes for the new
rows. If not given the max + 1 of the index of `data` will be used.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
Returns
-------
updated : pandas.DataFrame
Table with rows added. New rows will have their index values
set to NaN.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
"""
logger.debug('start: adding {} rows in transition model'.format(nrows))
if nrows == 0:
return data, _empty_index(), _empty_index()
if not starting_index:
starting_index = data.index.values.max() + 1
new_rows = sample_rows(nrows, data, accounting_column=accounting_column)
copied_index = new_rows.index
added_index = pd.Index(np.arange(
starting_index, starting_index + len(new_rows.index), dtype=np.int))
new_rows.index = added_index
logger.debug(
'finish: added {} rows in transition model'.format(len(new_rows)))
return pd.concat([data, new_rows]), added_index, copied_index | [
"def",
"add_rows",
"(",
"data",
",",
"nrows",
",",
"starting_index",
"=",
"None",
",",
"accounting_column",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'start: adding {} rows in transition model'",
".",
"format",
"(",
"nrows",
")",
")",
"if",
"nrows",
"==",
"0",
":",
"return",
"data",
",",
"_empty_index",
"(",
")",
",",
"_empty_index",
"(",
")",
"if",
"not",
"starting_index",
":",
"starting_index",
"=",
"data",
".",
"index",
".",
"values",
".",
"max",
"(",
")",
"+",
"1",
"new_rows",
"=",
"sample_rows",
"(",
"nrows",
",",
"data",
",",
"accounting_column",
"=",
"accounting_column",
")",
"copied_index",
"=",
"new_rows",
".",
"index",
"added_index",
"=",
"pd",
".",
"Index",
"(",
"np",
".",
"arange",
"(",
"starting_index",
",",
"starting_index",
"+",
"len",
"(",
"new_rows",
".",
"index",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
")",
"new_rows",
".",
"index",
"=",
"added_index",
"logger",
".",
"debug",
"(",
"'finish: added {} rows in transition model'",
".",
"format",
"(",
"len",
"(",
"new_rows",
")",
")",
")",
"return",
"pd",
".",
"concat",
"(",
"[",
"data",
",",
"new_rows",
"]",
")",
",",
"added_index",
",",
"copied_index"
] | Add rows to data table according to a given nrows.
New rows will have their IDs set to NaN.
Parameters
----------
data : pandas.DataFrame
nrows : int
Number of rows to add.
starting_index : int, optional
The starting index from which to calculate indexes for the new
rows. If not given the max + 1 of the index of `data` will be used.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
Returns
-------
updated : pandas.DataFrame
Table with rows added. New rows will have their index values
set to NaN.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries. | [
"Add",
"rows",
"to",
"data",
"table",
"according",
"to",
"a",
"given",
"nrows",
".",
"New",
"rows",
"will",
"have",
"their",
"IDs",
"set",
"to",
"NaN",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L24-L68 | train | 235,835 |
UDST/urbansim | urbansim/models/transition.py | remove_rows | def remove_rows(data, nrows, accounting_column=None):
"""
Remove a random `nrows` number of rows from a table.
Parameters
----------
data : DataFrame
nrows : float
Number of rows to remove.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
Returns
-------
updated : pandas.DataFrame
Table with random rows removed.
removed : pandas.Index
Indexes of the rows removed from the table.
"""
logger.debug('start: removing {} rows in transition model'.format(nrows))
nrows = abs(nrows) # in case a negative number came in
unit_check = data[accounting_column].sum() if accounting_column else len(data)
if nrows == 0:
return data, _empty_index()
elif nrows > unit_check:
raise ValueError('Number of rows to remove exceeds number of records in table.')
remove_rows = sample_rows(nrows, data, accounting_column=accounting_column, replace=False)
remove_index = remove_rows.index
logger.debug('finish: removed {} rows in transition model'.format(nrows))
return data.loc[data.index.difference(remove_index)], remove_index | python | def remove_rows(data, nrows, accounting_column=None):
"""
Remove a random `nrows` number of rows from a table.
Parameters
----------
data : DataFrame
nrows : float
Number of rows to remove.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
Returns
-------
updated : pandas.DataFrame
Table with random rows removed.
removed : pandas.Index
Indexes of the rows removed from the table.
"""
logger.debug('start: removing {} rows in transition model'.format(nrows))
nrows = abs(nrows) # in case a negative number came in
unit_check = data[accounting_column].sum() if accounting_column else len(data)
if nrows == 0:
return data, _empty_index()
elif nrows > unit_check:
raise ValueError('Number of rows to remove exceeds number of records in table.')
remove_rows = sample_rows(nrows, data, accounting_column=accounting_column, replace=False)
remove_index = remove_rows.index
logger.debug('finish: removed {} rows in transition model'.format(nrows))
return data.loc[data.index.difference(remove_index)], remove_index | [
"def",
"remove_rows",
"(",
"data",
",",
"nrows",
",",
"accounting_column",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'start: removing {} rows in transition model'",
".",
"format",
"(",
"nrows",
")",
")",
"nrows",
"=",
"abs",
"(",
"nrows",
")",
"# in case a negative number came in",
"unit_check",
"=",
"data",
"[",
"accounting_column",
"]",
".",
"sum",
"(",
")",
"if",
"accounting_column",
"else",
"len",
"(",
"data",
")",
"if",
"nrows",
"==",
"0",
":",
"return",
"data",
",",
"_empty_index",
"(",
")",
"elif",
"nrows",
">",
"unit_check",
":",
"raise",
"ValueError",
"(",
"'Number of rows to remove exceeds number of records in table.'",
")",
"remove_rows",
"=",
"sample_rows",
"(",
"nrows",
",",
"data",
",",
"accounting_column",
"=",
"accounting_column",
",",
"replace",
"=",
"False",
")",
"remove_index",
"=",
"remove_rows",
".",
"index",
"logger",
".",
"debug",
"(",
"'finish: removed {} rows in transition model'",
".",
"format",
"(",
"nrows",
")",
")",
"return",
"data",
".",
"loc",
"[",
"data",
".",
"index",
".",
"difference",
"(",
"remove_index",
")",
"]",
",",
"remove_index"
] | Remove a random `nrows` number of rows from a table.
Parameters
----------
data : DataFrame
nrows : float
Number of rows to remove.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
Returns
-------
updated : pandas.DataFrame
Table with random rows removed.
removed : pandas.Index
Indexes of the rows removed from the table. | [
"Remove",
"a",
"random",
"nrows",
"number",
"of",
"rows",
"from",
"a",
"table",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L71-L104 | train | 235,836 |
UDST/urbansim | urbansim/models/transition.py | _update_linked_table | def _update_linked_table(table, col_name, added, copied, removed):
"""
Copy and update rows in a table that has a column referencing another
table that has had rows added via copying.
Parameters
----------
table : pandas.DataFrame
Table to update with new or removed rows.
col_name : str
Name of column in `table` that corresponds to the index values
in `copied` and `removed`.
added : pandas.Index
Indexes of rows that are new in the linked table.
copied : pandas.Index
Indexes of rows that were copied to make new rows in linked table.
removed : pandas.Index
Indexes of rows that were removed from the linked table.
Returns
-------
updated : pandas.DataFrame
"""
logger.debug('start: update linked table after transition')
# handle removals
table = table.loc[~table[col_name].isin(set(removed))]
if (added is None or len(added) == 0):
return table
# map new IDs to the IDs from which they were copied
id_map = pd.concat([pd.Series(copied, name=col_name), pd.Series(added, name='temp_id')], axis=1)
# join to linked table and assign new id
new_rows = id_map.merge(table, on=col_name)
new_rows.drop(col_name, axis=1, inplace=True)
new_rows.rename(columns={'temp_id': col_name}, inplace=True)
# index the new rows
starting_index = table.index.values.max() + 1
new_rows.index = np.arange(starting_index, starting_index + len(new_rows), dtype=np.int)
logger.debug('finish: update linked table after transition')
return pd.concat([table, new_rows]) | python | def _update_linked_table(table, col_name, added, copied, removed):
"""
Copy and update rows in a table that has a column referencing another
table that has had rows added via copying.
Parameters
----------
table : pandas.DataFrame
Table to update with new or removed rows.
col_name : str
Name of column in `table` that corresponds to the index values
in `copied` and `removed`.
added : pandas.Index
Indexes of rows that are new in the linked table.
copied : pandas.Index
Indexes of rows that were copied to make new rows in linked table.
removed : pandas.Index
Indexes of rows that were removed from the linked table.
Returns
-------
updated : pandas.DataFrame
"""
logger.debug('start: update linked table after transition')
# handle removals
table = table.loc[~table[col_name].isin(set(removed))]
if (added is None or len(added) == 0):
return table
# map new IDs to the IDs from which they were copied
id_map = pd.concat([pd.Series(copied, name=col_name), pd.Series(added, name='temp_id')], axis=1)
# join to linked table and assign new id
new_rows = id_map.merge(table, on=col_name)
new_rows.drop(col_name, axis=1, inplace=True)
new_rows.rename(columns={'temp_id': col_name}, inplace=True)
# index the new rows
starting_index = table.index.values.max() + 1
new_rows.index = np.arange(starting_index, starting_index + len(new_rows), dtype=np.int)
logger.debug('finish: update linked table after transition')
return pd.concat([table, new_rows]) | [
"def",
"_update_linked_table",
"(",
"table",
",",
"col_name",
",",
"added",
",",
"copied",
",",
"removed",
")",
":",
"logger",
".",
"debug",
"(",
"'start: update linked table after transition'",
")",
"# handle removals",
"table",
"=",
"table",
".",
"loc",
"[",
"~",
"table",
"[",
"col_name",
"]",
".",
"isin",
"(",
"set",
"(",
"removed",
")",
")",
"]",
"if",
"(",
"added",
"is",
"None",
"or",
"len",
"(",
"added",
")",
"==",
"0",
")",
":",
"return",
"table",
"# map new IDs to the IDs from which they were copied",
"id_map",
"=",
"pd",
".",
"concat",
"(",
"[",
"pd",
".",
"Series",
"(",
"copied",
",",
"name",
"=",
"col_name",
")",
",",
"pd",
".",
"Series",
"(",
"added",
",",
"name",
"=",
"'temp_id'",
")",
"]",
",",
"axis",
"=",
"1",
")",
"# join to linked table and assign new id",
"new_rows",
"=",
"id_map",
".",
"merge",
"(",
"table",
",",
"on",
"=",
"col_name",
")",
"new_rows",
".",
"drop",
"(",
"col_name",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"new_rows",
".",
"rename",
"(",
"columns",
"=",
"{",
"'temp_id'",
":",
"col_name",
"}",
",",
"inplace",
"=",
"True",
")",
"# index the new rows",
"starting_index",
"=",
"table",
".",
"index",
".",
"values",
".",
"max",
"(",
")",
"+",
"1",
"new_rows",
".",
"index",
"=",
"np",
".",
"arange",
"(",
"starting_index",
",",
"starting_index",
"+",
"len",
"(",
"new_rows",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"logger",
".",
"debug",
"(",
"'finish: update linked table after transition'",
")",
"return",
"pd",
".",
"concat",
"(",
"[",
"table",
",",
"new_rows",
"]",
")"
] | Copy and update rows in a table that has a column referencing another
table that has had rows added via copying.
Parameters
----------
table : pandas.DataFrame
Table to update with new or removed rows.
col_name : str
Name of column in `table` that corresponds to the index values
in `copied` and `removed`.
added : pandas.Index
Indexes of rows that are new in the linked table.
copied : pandas.Index
Indexes of rows that were copied to make new rows in linked table.
removed : pandas.Index
Indexes of rows that were removed from the linked table.
Returns
-------
updated : pandas.DataFrame | [
"Copy",
"and",
"update",
"rows",
"in",
"a",
"table",
"that",
"has",
"a",
"column",
"referencing",
"another",
"table",
"that",
"has",
"had",
"rows",
"added",
"via",
"copying",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L424-L468 | train | 235,837 |
UDST/urbansim | urbansim/models/transition.py | TransitionModel.transition | def transition(self, data, year, linked_tables=None):
"""
Add or remove rows from a table based on population targets.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : int
Year number that will be passed to `transitioner`.
linked_tables : dict of tuple, optional
Dictionary of (table, 'column name') pairs. The column name
should match the index of `data`. Indexes in `data` that
are copied or removed will also be copied and removed in
linked tables. They dictionary keys are used in the
returned `updated_links`.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Series
Indexes of new rows in `updated`.
updated_links : dict of pandas.DataFrame
"""
logger.debug('start: transition')
linked_tables = linked_tables or {}
updated_links = {}
with log_start_finish('add/remove rows', logger):
updated, added, copied, removed = self.transitioner(data, year)
for table_name, (table, col) in linked_tables.items():
logger.debug('updating linked table {}'.format(table_name))
updated_links[table_name] = \
_update_linked_table(table, col, added, copied, removed)
logger.debug('finish: transition')
return updated, added, updated_links | python | def transition(self, data, year, linked_tables=None):
"""
Add or remove rows from a table based on population targets.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : int
Year number that will be passed to `transitioner`.
linked_tables : dict of tuple, optional
Dictionary of (table, 'column name') pairs. The column name
should match the index of `data`. Indexes in `data` that
are copied or removed will also be copied and removed in
linked tables. They dictionary keys are used in the
returned `updated_links`.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Series
Indexes of new rows in `updated`.
updated_links : dict of pandas.DataFrame
"""
logger.debug('start: transition')
linked_tables = linked_tables or {}
updated_links = {}
with log_start_finish('add/remove rows', logger):
updated, added, copied, removed = self.transitioner(data, year)
for table_name, (table, col) in linked_tables.items():
logger.debug('updating linked table {}'.format(table_name))
updated_links[table_name] = \
_update_linked_table(table, col, added, copied, removed)
logger.debug('finish: transition')
return updated, added, updated_links | [
"def",
"transition",
"(",
"self",
",",
"data",
",",
"year",
",",
"linked_tables",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'start: transition'",
")",
"linked_tables",
"=",
"linked_tables",
"or",
"{",
"}",
"updated_links",
"=",
"{",
"}",
"with",
"log_start_finish",
"(",
"'add/remove rows'",
",",
"logger",
")",
":",
"updated",
",",
"added",
",",
"copied",
",",
"removed",
"=",
"self",
".",
"transitioner",
"(",
"data",
",",
"year",
")",
"for",
"table_name",
",",
"(",
"table",
",",
"col",
")",
"in",
"linked_tables",
".",
"items",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"'updating linked table {}'",
".",
"format",
"(",
"table_name",
")",
")",
"updated_links",
"[",
"table_name",
"]",
"=",
"_update_linked_table",
"(",
"table",
",",
"col",
",",
"added",
",",
"copied",
",",
"removed",
")",
"logger",
".",
"debug",
"(",
"'finish: transition'",
")",
"return",
"updated",
",",
"added",
",",
"updated_links"
] | Add or remove rows from a table based on population targets.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : int
Year number that will be passed to `transitioner`.
linked_tables : dict of tuple, optional
Dictionary of (table, 'column name') pairs. The column name
should match the index of `data`. Indexes in `data` that
are copied or removed will also be copied and removed in
linked tables. They dictionary keys are used in the
returned `updated_links`.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Series
Indexes of new rows in `updated`.
updated_links : dict of pandas.DataFrame | [
"Add",
"or",
"remove",
"rows",
"from",
"a",
"table",
"based",
"on",
"population",
"targets",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L486-L525 | train | 235,838 |
UDST/urbansim | urbansim/utils/yamlio.py | series_to_yaml_safe | def series_to_yaml_safe(series, ordered=False):
"""
Convert a pandas Series to a dict that will survive YAML serialization
and re-conversion back to a Series.
Parameters
----------
series : pandas.Series
ordered: bool, optional, default False
If True, an OrderedDict is returned.
Returns
-------
safe : dict or OrderedDict
"""
index = series.index.to_native_types(quoting=True)
values = series.values.tolist()
if ordered:
return OrderedDict(
tuple((k, v)) for k, v in zip(index, values))
else:
return {i: v for i, v in zip(index, values)} | python | def series_to_yaml_safe(series, ordered=False):
"""
Convert a pandas Series to a dict that will survive YAML serialization
and re-conversion back to a Series.
Parameters
----------
series : pandas.Series
ordered: bool, optional, default False
If True, an OrderedDict is returned.
Returns
-------
safe : dict or OrderedDict
"""
index = series.index.to_native_types(quoting=True)
values = series.values.tolist()
if ordered:
return OrderedDict(
tuple((k, v)) for k, v in zip(index, values))
else:
return {i: v for i, v in zip(index, values)} | [
"def",
"series_to_yaml_safe",
"(",
"series",
",",
"ordered",
"=",
"False",
")",
":",
"index",
"=",
"series",
".",
"index",
".",
"to_native_types",
"(",
"quoting",
"=",
"True",
")",
"values",
"=",
"series",
".",
"values",
".",
"tolist",
"(",
")",
"if",
"ordered",
":",
"return",
"OrderedDict",
"(",
"tuple",
"(",
"(",
"k",
",",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"index",
",",
"values",
")",
")",
"else",
":",
"return",
"{",
"i",
":",
"v",
"for",
"i",
",",
"v",
"in",
"zip",
"(",
"index",
",",
"values",
")",
"}"
] | Convert a pandas Series to a dict that will survive YAML serialization
and re-conversion back to a Series.
Parameters
----------
series : pandas.Series
ordered: bool, optional, default False
If True, an OrderedDict is returned.
Returns
-------
safe : dict or OrderedDict | [
"Convert",
"a",
"pandas",
"Series",
"to",
"a",
"dict",
"that",
"will",
"survive",
"YAML",
"serialization",
"and",
"re",
"-",
"conversion",
"back",
"to",
"a",
"Series",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L32-L55 | train | 235,839 |
UDST/urbansim | urbansim/utils/yamlio.py | frame_to_yaml_safe | def frame_to_yaml_safe(frame, ordered=False):
"""
Convert a pandas DataFrame to a dictionary that will survive
YAML serialization and re-conversion back to a DataFrame.
Parameters
----------
frame : pandas.DataFrame
ordered: bool, optional, default False
If True, an OrderedDict is returned.
Returns
-------
safe : dict or OrderedDict
"""
if ordered:
return OrderedDict(tuple((col, series_to_yaml_safe(series, True))
for col, series in frame.iteritems()))
else:
return {col: series_to_yaml_safe(series)
for col, series in frame.iteritems()} | python | def frame_to_yaml_safe(frame, ordered=False):
"""
Convert a pandas DataFrame to a dictionary that will survive
YAML serialization and re-conversion back to a DataFrame.
Parameters
----------
frame : pandas.DataFrame
ordered: bool, optional, default False
If True, an OrderedDict is returned.
Returns
-------
safe : dict or OrderedDict
"""
if ordered:
return OrderedDict(tuple((col, series_to_yaml_safe(series, True))
for col, series in frame.iteritems()))
else:
return {col: series_to_yaml_safe(series)
for col, series in frame.iteritems()} | [
"def",
"frame_to_yaml_safe",
"(",
"frame",
",",
"ordered",
"=",
"False",
")",
":",
"if",
"ordered",
":",
"return",
"OrderedDict",
"(",
"tuple",
"(",
"(",
"col",
",",
"series_to_yaml_safe",
"(",
"series",
",",
"True",
")",
")",
"for",
"col",
",",
"series",
"in",
"frame",
".",
"iteritems",
"(",
")",
")",
")",
"else",
":",
"return",
"{",
"col",
":",
"series_to_yaml_safe",
"(",
"series",
")",
"for",
"col",
",",
"series",
"in",
"frame",
".",
"iteritems",
"(",
")",
"}"
] | Convert a pandas DataFrame to a dictionary that will survive
YAML serialization and re-conversion back to a DataFrame.
Parameters
----------
frame : pandas.DataFrame
ordered: bool, optional, default False
If True, an OrderedDict is returned.
Returns
-------
safe : dict or OrderedDict | [
"Convert",
"a",
"pandas",
"DataFrame",
"to",
"a",
"dictionary",
"that",
"will",
"survive",
"YAML",
"serialization",
"and",
"re",
"-",
"conversion",
"back",
"to",
"a",
"DataFrame",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L58-L79 | train | 235,840 |
UDST/urbansim | urbansim/utils/yamlio.py | ordered_yaml | def ordered_yaml(cfg, order=None):
"""
Convert a dictionary to a YAML string with preferential ordering
for some keys. Converted string is meant to be fairly human readable.
Parameters
----------
cfg : dict
Dictionary to convert to a YAML string.
order: list
If provided, overrides the default key ordering.
Returns
-------
str
Nicely formatted YAML string.
"""
if order is None:
order = ['name', 'model_type', 'segmentation_col', 'fit_filters',
'predict_filters',
'choosers_fit_filters', 'choosers_predict_filters',
'alts_fit_filters', 'alts_predict_filters',
'interaction_predict_filters',
'choice_column', 'sample_size', 'estimation_sample_size',
'prediction_sample_size',
'model_expression', 'ytransform', 'min_segment_size',
'default_config', 'models', 'coefficients', 'fitted']
s = []
for key in order:
if key not in cfg:
continue
s.append(
yaml.dump({key: cfg[key]}, default_flow_style=False, indent=4))
for key in cfg:
if key in order:
continue
s.append(
yaml.dump({key: cfg[key]}, default_flow_style=False, indent=4))
return '\n'.join(s) | python | def ordered_yaml(cfg, order=None):
"""
Convert a dictionary to a YAML string with preferential ordering
for some keys. Converted string is meant to be fairly human readable.
Parameters
----------
cfg : dict
Dictionary to convert to a YAML string.
order: list
If provided, overrides the default key ordering.
Returns
-------
str
Nicely formatted YAML string.
"""
if order is None:
order = ['name', 'model_type', 'segmentation_col', 'fit_filters',
'predict_filters',
'choosers_fit_filters', 'choosers_predict_filters',
'alts_fit_filters', 'alts_predict_filters',
'interaction_predict_filters',
'choice_column', 'sample_size', 'estimation_sample_size',
'prediction_sample_size',
'model_expression', 'ytransform', 'min_segment_size',
'default_config', 'models', 'coefficients', 'fitted']
s = []
for key in order:
if key not in cfg:
continue
s.append(
yaml.dump({key: cfg[key]}, default_flow_style=False, indent=4))
for key in cfg:
if key in order:
continue
s.append(
yaml.dump({key: cfg[key]}, default_flow_style=False, indent=4))
return '\n'.join(s) | [
"def",
"ordered_yaml",
"(",
"cfg",
",",
"order",
"=",
"None",
")",
":",
"if",
"order",
"is",
"None",
":",
"order",
"=",
"[",
"'name'",
",",
"'model_type'",
",",
"'segmentation_col'",
",",
"'fit_filters'",
",",
"'predict_filters'",
",",
"'choosers_fit_filters'",
",",
"'choosers_predict_filters'",
",",
"'alts_fit_filters'",
",",
"'alts_predict_filters'",
",",
"'interaction_predict_filters'",
",",
"'choice_column'",
",",
"'sample_size'",
",",
"'estimation_sample_size'",
",",
"'prediction_sample_size'",
",",
"'model_expression'",
",",
"'ytransform'",
",",
"'min_segment_size'",
",",
"'default_config'",
",",
"'models'",
",",
"'coefficients'",
",",
"'fitted'",
"]",
"s",
"=",
"[",
"]",
"for",
"key",
"in",
"order",
":",
"if",
"key",
"not",
"in",
"cfg",
":",
"continue",
"s",
".",
"append",
"(",
"yaml",
".",
"dump",
"(",
"{",
"key",
":",
"cfg",
"[",
"key",
"]",
"}",
",",
"default_flow_style",
"=",
"False",
",",
"indent",
"=",
"4",
")",
")",
"for",
"key",
"in",
"cfg",
":",
"if",
"key",
"in",
"order",
":",
"continue",
"s",
".",
"append",
"(",
"yaml",
".",
"dump",
"(",
"{",
"key",
":",
"cfg",
"[",
"key",
"]",
"}",
",",
"default_flow_style",
"=",
"False",
",",
"indent",
"=",
"4",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"s",
")"
] | Convert a dictionary to a YAML string with preferential ordering
for some keys. Converted string is meant to be fairly human readable.
Parameters
----------
cfg : dict
Dictionary to convert to a YAML string.
order: list
If provided, overrides the default key ordering.
Returns
-------
str
Nicely formatted YAML string. | [
"Convert",
"a",
"dictionary",
"to",
"a",
"YAML",
"string",
"with",
"preferential",
"ordering",
"for",
"some",
"keys",
".",
"Converted",
"string",
"is",
"meant",
"to",
"be",
"fairly",
"human",
"readable",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L92-L134 | train | 235,841 |
UDST/urbansim | urbansim/utils/yamlio.py | convert_to_yaml | def convert_to_yaml(cfg, str_or_buffer):
"""
Convert a dictionary to YAML and return the string or write it out
depending on the type of `str_or_buffer`.
Parameters
----------
cfg : dict or OrderedDict
Dictionary or OrderedDict to convert.
str_or_buffer : None, str, or buffer
If None: the YAML string will be returned.
If string: YAML will be saved to a file.
If buffer: YAML will be written to buffer using the ``.write`` method.
Returns
-------
str or None
YAML string if `str_or_buffer` is None, otherwise None since YAML
is written out to a separate destination.
"""
order = None
if isinstance(cfg, OrderedDict):
order = []
s = ordered_yaml(cfg, order)
if not str_or_buffer:
return s
elif isinstance(str_or_buffer, str):
with open(str_or_buffer, 'w') as f:
f.write(s)
else:
str_or_buffer.write(s) | python | def convert_to_yaml(cfg, str_or_buffer):
"""
Convert a dictionary to YAML and return the string or write it out
depending on the type of `str_or_buffer`.
Parameters
----------
cfg : dict or OrderedDict
Dictionary or OrderedDict to convert.
str_or_buffer : None, str, or buffer
If None: the YAML string will be returned.
If string: YAML will be saved to a file.
If buffer: YAML will be written to buffer using the ``.write`` method.
Returns
-------
str or None
YAML string if `str_or_buffer` is None, otherwise None since YAML
is written out to a separate destination.
"""
order = None
if isinstance(cfg, OrderedDict):
order = []
s = ordered_yaml(cfg, order)
if not str_or_buffer:
return s
elif isinstance(str_or_buffer, str):
with open(str_or_buffer, 'w') as f:
f.write(s)
else:
str_or_buffer.write(s) | [
"def",
"convert_to_yaml",
"(",
"cfg",
",",
"str_or_buffer",
")",
":",
"order",
"=",
"None",
"if",
"isinstance",
"(",
"cfg",
",",
"OrderedDict",
")",
":",
"order",
"=",
"[",
"]",
"s",
"=",
"ordered_yaml",
"(",
"cfg",
",",
"order",
")",
"if",
"not",
"str_or_buffer",
":",
"return",
"s",
"elif",
"isinstance",
"(",
"str_or_buffer",
",",
"str",
")",
":",
"with",
"open",
"(",
"str_or_buffer",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"s",
")",
"else",
":",
"str_or_buffer",
".",
"write",
"(",
"s",
")"
] | Convert a dictionary to YAML and return the string or write it out
depending on the type of `str_or_buffer`.
Parameters
----------
cfg : dict or OrderedDict
Dictionary or OrderedDict to convert.
str_or_buffer : None, str, or buffer
If None: the YAML string will be returned.
If string: YAML will be saved to a file.
If buffer: YAML will be written to buffer using the ``.write`` method.
Returns
-------
str or None
YAML string if `str_or_buffer` is None, otherwise None since YAML
is written out to a separate destination. | [
"Convert",
"a",
"dictionary",
"to",
"YAML",
"and",
"return",
"the",
"string",
"or",
"write",
"it",
"out",
"depending",
"on",
"the",
"type",
"of",
"str_or_buffer",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/utils/yamlio.py#L160-L193 | train | 235,842 |
UDST/urbansim | urbansim/accounts.py | Account.add_transaction | def add_transaction(self, amount, subaccount=None, metadata=None):
"""
Add a new transaction to the account.
Parameters
----------
amount : float
Negative for withdrawls, positive for deposits.
subaccount : object, optional
Any indicator of a subaccount to which this transaction applies.
metadata : dict, optional
Any extra metadata to record with the transaction.
(E.g. Info about where the money is coming from or going.)
May not contain keys 'amount' or 'subaccount'.
"""
metadata = metadata or {}
self.transactions.append(Transaction(amount, subaccount, metadata))
self.balance += amount | python | def add_transaction(self, amount, subaccount=None, metadata=None):
"""
Add a new transaction to the account.
Parameters
----------
amount : float
Negative for withdrawls, positive for deposits.
subaccount : object, optional
Any indicator of a subaccount to which this transaction applies.
metadata : dict, optional
Any extra metadata to record with the transaction.
(E.g. Info about where the money is coming from or going.)
May not contain keys 'amount' or 'subaccount'.
"""
metadata = metadata or {}
self.transactions.append(Transaction(amount, subaccount, metadata))
self.balance += amount | [
"def",
"add_transaction",
"(",
"self",
",",
"amount",
",",
"subaccount",
"=",
"None",
",",
"metadata",
"=",
"None",
")",
":",
"metadata",
"=",
"metadata",
"or",
"{",
"}",
"self",
".",
"transactions",
".",
"append",
"(",
"Transaction",
"(",
"amount",
",",
"subaccount",
",",
"metadata",
")",
")",
"self",
".",
"balance",
"+=",
"amount"
] | Add a new transaction to the account.
Parameters
----------
amount : float
Negative for withdrawls, positive for deposits.
subaccount : object, optional
Any indicator of a subaccount to which this transaction applies.
metadata : dict, optional
Any extra metadata to record with the transaction.
(E.g. Info about where the money is coming from or going.)
May not contain keys 'amount' or 'subaccount'. | [
"Add",
"a",
"new",
"transaction",
"to",
"the",
"account",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/accounts.py#L57-L75 | train | 235,843 |
UDST/urbansim | urbansim/accounts.py | Account.total_transactions_by_subacct | def total_transactions_by_subacct(self, subaccount):
"""
Get the sum of all transactions for a given subaccount.
Parameters
----------
subaccount : object
Identifier of subaccount.
Returns
-------
total : float
"""
return sum(
t.amount for t in self.transactions if t.subaccount == subaccount) | python | def total_transactions_by_subacct(self, subaccount):
"""
Get the sum of all transactions for a given subaccount.
Parameters
----------
subaccount : object
Identifier of subaccount.
Returns
-------
total : float
"""
return sum(
t.amount for t in self.transactions if t.subaccount == subaccount) | [
"def",
"total_transactions_by_subacct",
"(",
"self",
",",
"subaccount",
")",
":",
"return",
"sum",
"(",
"t",
".",
"amount",
"for",
"t",
"in",
"self",
".",
"transactions",
"if",
"t",
".",
"subaccount",
"==",
"subaccount",
")"
] | Get the sum of all transactions for a given subaccount.
Parameters
----------
subaccount : object
Identifier of subaccount.
Returns
-------
total : float | [
"Get",
"the",
"sum",
"of",
"all",
"transactions",
"for",
"a",
"given",
"subaccount",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/accounts.py#L102-L117 | train | 235,844 |
UDST/urbansim | urbansim/accounts.py | Account.to_frame | def to_frame(self):
"""
Return transactions as a pandas DataFrame.
"""
col_names = _column_names_from_metadata(
t.metadata for t in self.transactions)
def trow(t):
return tz.concatv(
(t.amount, t.subaccount),
(t.metadata.get(c) for c in col_names))
rows = [trow(t) for t in self.transactions]
if len(rows) == 0:
return pd.DataFrame(columns=COLS + col_names)
return pd.DataFrame(rows, columns=COLS + col_names) | python | def to_frame(self):
"""
Return transactions as a pandas DataFrame.
"""
col_names = _column_names_from_metadata(
t.metadata for t in self.transactions)
def trow(t):
return tz.concatv(
(t.amount, t.subaccount),
(t.metadata.get(c) for c in col_names))
rows = [trow(t) for t in self.transactions]
if len(rows) == 0:
return pd.DataFrame(columns=COLS + col_names)
return pd.DataFrame(rows, columns=COLS + col_names) | [
"def",
"to_frame",
"(",
"self",
")",
":",
"col_names",
"=",
"_column_names_from_metadata",
"(",
"t",
".",
"metadata",
"for",
"t",
"in",
"self",
".",
"transactions",
")",
"def",
"trow",
"(",
"t",
")",
":",
"return",
"tz",
".",
"concatv",
"(",
"(",
"t",
".",
"amount",
",",
"t",
".",
"subaccount",
")",
",",
"(",
"t",
".",
"metadata",
".",
"get",
"(",
"c",
")",
"for",
"c",
"in",
"col_names",
")",
")",
"rows",
"=",
"[",
"trow",
"(",
"t",
")",
"for",
"t",
"in",
"self",
".",
"transactions",
"]",
"if",
"len",
"(",
"rows",
")",
"==",
"0",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"COLS",
"+",
"col_names",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"rows",
",",
"columns",
"=",
"COLS",
"+",
"col_names",
")"
] | Return transactions as a pandas DataFrame. | [
"Return",
"transactions",
"as",
"a",
"pandas",
"DataFrame",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/accounts.py#L136-L153 | train | 235,845 |
UDST/urbansim | urbansim/models/util.py | apply_filter_query | def apply_filter_query(df, filters=None):
"""
Use the DataFrame.query method to filter a table down to the
desired rows.
Parameters
----------
df : pandas.DataFrame
filters : list of str or str, optional
List of filters to apply. Will be joined together with
' and ' and passed to DataFrame.query. A string will be passed
straight to DataFrame.query.
If not supplied no filtering will be done.
Returns
-------
filtered_df : pandas.DataFrame
"""
with log_start_finish('apply filter query: {!r}'.format(filters), logger):
if filters:
if isinstance(filters, str):
query = filters
else:
query = ' and '.join(filters)
return df.query(query)
else:
return df | python | def apply_filter_query(df, filters=None):
"""
Use the DataFrame.query method to filter a table down to the
desired rows.
Parameters
----------
df : pandas.DataFrame
filters : list of str or str, optional
List of filters to apply. Will be joined together with
' and ' and passed to DataFrame.query. A string will be passed
straight to DataFrame.query.
If not supplied no filtering will be done.
Returns
-------
filtered_df : pandas.DataFrame
"""
with log_start_finish('apply filter query: {!r}'.format(filters), logger):
if filters:
if isinstance(filters, str):
query = filters
else:
query = ' and '.join(filters)
return df.query(query)
else:
return df | [
"def",
"apply_filter_query",
"(",
"df",
",",
"filters",
"=",
"None",
")",
":",
"with",
"log_start_finish",
"(",
"'apply filter query: {!r}'",
".",
"format",
"(",
"filters",
")",
",",
"logger",
")",
":",
"if",
"filters",
":",
"if",
"isinstance",
"(",
"filters",
",",
"str",
")",
":",
"query",
"=",
"filters",
"else",
":",
"query",
"=",
"' and '",
".",
"join",
"(",
"filters",
")",
"return",
"df",
".",
"query",
"(",
"query",
")",
"else",
":",
"return",
"df"
] | Use the DataFrame.query method to filter a table down to the
desired rows.
Parameters
----------
df : pandas.DataFrame
filters : list of str or str, optional
List of filters to apply. Will be joined together with
' and ' and passed to DataFrame.query. A string will be passed
straight to DataFrame.query.
If not supplied no filtering will be done.
Returns
-------
filtered_df : pandas.DataFrame | [
"Use",
"the",
"DataFrame",
".",
"query",
"method",
"to",
"filter",
"a",
"table",
"down",
"to",
"the",
"desired",
"rows",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L24-L51 | train | 235,846 |
UDST/urbansim | urbansim/models/util.py | _filterize | def _filterize(name, value):
"""
Turn a `name` and `value` into a string expression compatible
the ``DataFrame.query`` method.
Parameters
----------
name : str
Should be the name of a column in the table to which the
filter will be applied.
A suffix of '_max' will result in a "less than" filter,
a suffix of '_min' will result in a "greater than or equal to" filter,
and no recognized suffix will result in an "equal to" filter.
value : any
Value side of filter for comparison to column values.
Returns
-------
filter_exp : str
"""
if name.endswith('_min'):
name = name[:-4]
comp = '>='
elif name.endswith('_max'):
name = name[:-4]
comp = '<'
else:
comp = '=='
result = '{} {} {!r}'.format(name, comp, value)
logger.debug(
'converted name={} and value={} to filter {}'.format(
name, value, result))
return result | python | def _filterize(name, value):
"""
Turn a `name` and `value` into a string expression compatible
the ``DataFrame.query`` method.
Parameters
----------
name : str
Should be the name of a column in the table to which the
filter will be applied.
A suffix of '_max' will result in a "less than" filter,
a suffix of '_min' will result in a "greater than or equal to" filter,
and no recognized suffix will result in an "equal to" filter.
value : any
Value side of filter for comparison to column values.
Returns
-------
filter_exp : str
"""
if name.endswith('_min'):
name = name[:-4]
comp = '>='
elif name.endswith('_max'):
name = name[:-4]
comp = '<'
else:
comp = '=='
result = '{} {} {!r}'.format(name, comp, value)
logger.debug(
'converted name={} and value={} to filter {}'.format(
name, value, result))
return result | [
"def",
"_filterize",
"(",
"name",
",",
"value",
")",
":",
"if",
"name",
".",
"endswith",
"(",
"'_min'",
")",
":",
"name",
"=",
"name",
"[",
":",
"-",
"4",
"]",
"comp",
"=",
"'>='",
"elif",
"name",
".",
"endswith",
"(",
"'_max'",
")",
":",
"name",
"=",
"name",
"[",
":",
"-",
"4",
"]",
"comp",
"=",
"'<'",
"else",
":",
"comp",
"=",
"'=='",
"result",
"=",
"'{} {} {!r}'",
".",
"format",
"(",
"name",
",",
"comp",
",",
"value",
")",
"logger",
".",
"debug",
"(",
"'converted name={} and value={} to filter {}'",
".",
"format",
"(",
"name",
",",
"value",
",",
"result",
")",
")",
"return",
"result"
] | Turn a `name` and `value` into a string expression compatible
the ``DataFrame.query`` method.
Parameters
----------
name : str
Should be the name of a column in the table to which the
filter will be applied.
A suffix of '_max' will result in a "less than" filter,
a suffix of '_min' will result in a "greater than or equal to" filter,
and no recognized suffix will result in an "equal to" filter.
value : any
Value side of filter for comparison to column values.
Returns
-------
filter_exp : str | [
"Turn",
"a",
"name",
"and",
"value",
"into",
"a",
"string",
"expression",
"compatible",
"the",
"DataFrame",
".",
"query",
"method",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L54-L89 | train | 235,847 |
UDST/urbansim | urbansim/models/util.py | str_model_expression | def str_model_expression(expr, add_constant=True):
"""
We support specifying model expressions as strings, lists, or dicts;
but for use with patsy and statsmodels we need a string.
This function will take any of those as input and return a string.
Parameters
----------
expr : str, iterable, or dict
A string will be returned unmodified except to add or remove
a constant.
An iterable sequence will be joined together with ' + '.
A dictionary should have ``right_side`` and, optionally,
``left_side`` keys. The ``right_side`` can be a list or a string
and will be handled as above. If ``left_side`` is present it will
be joined with ``right_side`` with ' ~ '.
add_constant : bool, optional
Whether to add a ' + 1' (if True) or ' - 1' (if False) to the model.
If the expression already has a '+ 1' or '- 1' this option will be
ignored.
Returns
-------
model_expression : str
A string model expression suitable for use with statsmodels and patsy.
"""
if not isinstance(expr, str):
if isinstance(expr, collections.Mapping):
left_side = expr.get('left_side')
right_side = str_model_expression(expr['right_side'], add_constant)
else:
# some kind of iterable like a list
left_side = None
right_side = ' + '.join(expr)
if left_side:
model_expression = ' ~ '.join((left_side, right_side))
else:
model_expression = right_side
else:
model_expression = expr
if not has_constant_expr(model_expression):
if add_constant:
model_expression += ' + 1'
else:
model_expression += ' - 1'
logger.debug(
'converted expression: {!r} to model: {!r}'.format(
expr, model_expression))
return model_expression | python | def str_model_expression(expr, add_constant=True):
"""
We support specifying model expressions as strings, lists, or dicts;
but for use with patsy and statsmodels we need a string.
This function will take any of those as input and return a string.
Parameters
----------
expr : str, iterable, or dict
A string will be returned unmodified except to add or remove
a constant.
An iterable sequence will be joined together with ' + '.
A dictionary should have ``right_side`` and, optionally,
``left_side`` keys. The ``right_side`` can be a list or a string
and will be handled as above. If ``left_side`` is present it will
be joined with ``right_side`` with ' ~ '.
add_constant : bool, optional
Whether to add a ' + 1' (if True) or ' - 1' (if False) to the model.
If the expression already has a '+ 1' or '- 1' this option will be
ignored.
Returns
-------
model_expression : str
A string model expression suitable for use with statsmodels and patsy.
"""
if not isinstance(expr, str):
if isinstance(expr, collections.Mapping):
left_side = expr.get('left_side')
right_side = str_model_expression(expr['right_side'], add_constant)
else:
# some kind of iterable like a list
left_side = None
right_side = ' + '.join(expr)
if left_side:
model_expression = ' ~ '.join((left_side, right_side))
else:
model_expression = right_side
else:
model_expression = expr
if not has_constant_expr(model_expression):
if add_constant:
model_expression += ' + 1'
else:
model_expression += ' - 1'
logger.debug(
'converted expression: {!r} to model: {!r}'.format(
expr, model_expression))
return model_expression | [
"def",
"str_model_expression",
"(",
"expr",
",",
"add_constant",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"expr",
",",
"str",
")",
":",
"if",
"isinstance",
"(",
"expr",
",",
"collections",
".",
"Mapping",
")",
":",
"left_side",
"=",
"expr",
".",
"get",
"(",
"'left_side'",
")",
"right_side",
"=",
"str_model_expression",
"(",
"expr",
"[",
"'right_side'",
"]",
",",
"add_constant",
")",
"else",
":",
"# some kind of iterable like a list",
"left_side",
"=",
"None",
"right_side",
"=",
"' + '",
".",
"join",
"(",
"expr",
")",
"if",
"left_side",
":",
"model_expression",
"=",
"' ~ '",
".",
"join",
"(",
"(",
"left_side",
",",
"right_side",
")",
")",
"else",
":",
"model_expression",
"=",
"right_side",
"else",
":",
"model_expression",
"=",
"expr",
"if",
"not",
"has_constant_expr",
"(",
"model_expression",
")",
":",
"if",
"add_constant",
":",
"model_expression",
"+=",
"' + 1'",
"else",
":",
"model_expression",
"+=",
"' - 1'",
"logger",
".",
"debug",
"(",
"'converted expression: {!r} to model: {!r}'",
".",
"format",
"(",
"expr",
",",
"model_expression",
")",
")",
"return",
"model_expression"
] | We support specifying model expressions as strings, lists, or dicts;
but for use with patsy and statsmodels we need a string.
This function will take any of those as input and return a string.
Parameters
----------
expr : str, iterable, or dict
A string will be returned unmodified except to add or remove
a constant.
An iterable sequence will be joined together with ' + '.
A dictionary should have ``right_side`` and, optionally,
``left_side`` keys. The ``right_side`` can be a list or a string
and will be handled as above. If ``left_side`` is present it will
be joined with ``right_side`` with ' ~ '.
add_constant : bool, optional
Whether to add a ' + 1' (if True) or ' - 1' (if False) to the model.
If the expression already has a '+ 1' or '- 1' this option will be
ignored.
Returns
-------
model_expression : str
A string model expression suitable for use with statsmodels and patsy. | [
"We",
"support",
"specifying",
"model",
"expressions",
"as",
"strings",
"lists",
"or",
"dicts",
";",
"but",
"for",
"use",
"with",
"patsy",
"and",
"statsmodels",
"we",
"need",
"a",
"string",
".",
"This",
"function",
"will",
"take",
"any",
"of",
"those",
"as",
"input",
"and",
"return",
"a",
"string",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L174-L227 | train | 235,848 |
UDST/urbansim | urbansim/models/util.py | sorted_groupby | def sorted_groupby(df, groupby):
"""
Perform a groupby on a DataFrame using a specific column
and assuming that that column is sorted.
Parameters
----------
df : pandas.DataFrame
groupby : object
Column name on which to groupby. This column must be sorted.
Returns
-------
generator
Yields pairs of group_name, DataFrame.
"""
start = 0
prev = df[groupby].iloc[start]
for i, x in enumerate(df[groupby]):
if x != prev:
yield prev, df.iloc[start:i]
prev = x
start = i
# need to send back the last group
yield prev, df.iloc[start:] | python | def sorted_groupby(df, groupby):
"""
Perform a groupby on a DataFrame using a specific column
and assuming that that column is sorted.
Parameters
----------
df : pandas.DataFrame
groupby : object
Column name on which to groupby. This column must be sorted.
Returns
-------
generator
Yields pairs of group_name, DataFrame.
"""
start = 0
prev = df[groupby].iloc[start]
for i, x in enumerate(df[groupby]):
if x != prev:
yield prev, df.iloc[start:i]
prev = x
start = i
# need to send back the last group
yield prev, df.iloc[start:] | [
"def",
"sorted_groupby",
"(",
"df",
",",
"groupby",
")",
":",
"start",
"=",
"0",
"prev",
"=",
"df",
"[",
"groupby",
"]",
".",
"iloc",
"[",
"start",
"]",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"df",
"[",
"groupby",
"]",
")",
":",
"if",
"x",
"!=",
"prev",
":",
"yield",
"prev",
",",
"df",
".",
"iloc",
"[",
"start",
":",
"i",
"]",
"prev",
"=",
"x",
"start",
"=",
"i",
"# need to send back the last group",
"yield",
"prev",
",",
"df",
".",
"iloc",
"[",
"start",
":",
"]"
] | Perform a groupby on a DataFrame using a specific column
and assuming that that column is sorted.
Parameters
----------
df : pandas.DataFrame
groupby : object
Column name on which to groupby. This column must be sorted.
Returns
-------
generator
Yields pairs of group_name, DataFrame. | [
"Perform",
"a",
"groupby",
"on",
"a",
"DataFrame",
"using",
"a",
"specific",
"column",
"and",
"assuming",
"that",
"that",
"column",
"is",
"sorted",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L230-L255 | train | 235,849 |
UDST/urbansim | urbansim/models/util.py | columns_in_filters | def columns_in_filters(filters):
"""
Returns a list of the columns used in a set of query filters.
Parameters
----------
filters : list of str or str
List of the filters as passed passed to ``apply_filter_query``.
Returns
-------
columns : list of str
List of all the strings mentioned in the filters.
"""
if not filters:
return []
if not isinstance(filters, str):
filters = ' '.join(filters)
columns = []
reserved = {'and', 'or', 'in', 'not'}
for toknum, tokval, _, _, _ in generate_tokens(StringIO(filters).readline):
if toknum == NAME and tokval not in reserved:
columns.append(tokval)
return list(tz.unique(columns)) | python | def columns_in_filters(filters):
"""
Returns a list of the columns used in a set of query filters.
Parameters
----------
filters : list of str or str
List of the filters as passed passed to ``apply_filter_query``.
Returns
-------
columns : list of str
List of all the strings mentioned in the filters.
"""
if not filters:
return []
if not isinstance(filters, str):
filters = ' '.join(filters)
columns = []
reserved = {'and', 'or', 'in', 'not'}
for toknum, tokval, _, _, _ in generate_tokens(StringIO(filters).readline):
if toknum == NAME and tokval not in reserved:
columns.append(tokval)
return list(tz.unique(columns)) | [
"def",
"columns_in_filters",
"(",
"filters",
")",
":",
"if",
"not",
"filters",
":",
"return",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"filters",
",",
"str",
")",
":",
"filters",
"=",
"' '",
".",
"join",
"(",
"filters",
")",
"columns",
"=",
"[",
"]",
"reserved",
"=",
"{",
"'and'",
",",
"'or'",
",",
"'in'",
",",
"'not'",
"}",
"for",
"toknum",
",",
"tokval",
",",
"_",
",",
"_",
",",
"_",
"in",
"generate_tokens",
"(",
"StringIO",
"(",
"filters",
")",
".",
"readline",
")",
":",
"if",
"toknum",
"==",
"NAME",
"and",
"tokval",
"not",
"in",
"reserved",
":",
"columns",
".",
"append",
"(",
"tokval",
")",
"return",
"list",
"(",
"tz",
".",
"unique",
"(",
"columns",
")",
")"
] | Returns a list of the columns used in a set of query filters.
Parameters
----------
filters : list of str or str
List of the filters as passed passed to ``apply_filter_query``.
Returns
-------
columns : list of str
List of all the strings mentioned in the filters. | [
"Returns",
"a",
"list",
"of",
"the",
"columns",
"used",
"in",
"a",
"set",
"of",
"query",
"filters",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L258-L286 | train | 235,850 |
UDST/urbansim | urbansim/models/util.py | _tokens_from_patsy | def _tokens_from_patsy(node):
"""
Yields all the individual tokens from within a patsy formula
as parsed by patsy.parse_formula.parse_formula.
Parameters
----------
node : patsy.parse_formula.ParseNode
"""
for n in node.args:
for t in _tokens_from_patsy(n):
yield t
if node.token:
yield node.token | python | def _tokens_from_patsy(node):
"""
Yields all the individual tokens from within a patsy formula
as parsed by patsy.parse_formula.parse_formula.
Parameters
----------
node : patsy.parse_formula.ParseNode
"""
for n in node.args:
for t in _tokens_from_patsy(n):
yield t
if node.token:
yield node.token | [
"def",
"_tokens_from_patsy",
"(",
"node",
")",
":",
"for",
"n",
"in",
"node",
".",
"args",
":",
"for",
"t",
"in",
"_tokens_from_patsy",
"(",
"n",
")",
":",
"yield",
"t",
"if",
"node",
".",
"token",
":",
"yield",
"node",
".",
"token"
] | Yields all the individual tokens from within a patsy formula
as parsed by patsy.parse_formula.parse_formula.
Parameters
----------
node : patsy.parse_formula.ParseNode | [
"Yields",
"all",
"the",
"individual",
"tokens",
"from",
"within",
"a",
"patsy",
"formula",
"as",
"parsed",
"by",
"patsy",
".",
"parse_formula",
".",
"parse_formula",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L289-L304 | train | 235,851 |
UDST/urbansim | urbansim/models/util.py | columns_in_formula | def columns_in_formula(formula):
"""
Returns the names of all the columns used in a patsy formula.
Parameters
----------
formula : str, iterable, or dict
Any formula construction supported by ``str_model_expression``.
Returns
-------
columns : list of str
"""
if formula is None:
return []
formula = str_model_expression(formula, add_constant=False)
columns = []
tokens = map(
lambda x: x.extra,
tz.remove(
lambda x: x.extra is None,
_tokens_from_patsy(patsy.parse_formula.parse_formula(formula))))
for tok in tokens:
# if there are parentheses in the expression we
# want to drop them and everything outside
# and start again from the top
if '(' in tok:
start = tok.find('(') + 1
fin = tok.rfind(')')
columns.extend(columns_in_formula(tok[start:fin]))
else:
for toknum, tokval, _, _, _ in generate_tokens(
StringIO(tok).readline):
if toknum == NAME:
columns.append(tokval)
return list(tz.unique(columns)) | python | def columns_in_formula(formula):
"""
Returns the names of all the columns used in a patsy formula.
Parameters
----------
formula : str, iterable, or dict
Any formula construction supported by ``str_model_expression``.
Returns
-------
columns : list of str
"""
if formula is None:
return []
formula = str_model_expression(formula, add_constant=False)
columns = []
tokens = map(
lambda x: x.extra,
tz.remove(
lambda x: x.extra is None,
_tokens_from_patsy(patsy.parse_formula.parse_formula(formula))))
for tok in tokens:
# if there are parentheses in the expression we
# want to drop them and everything outside
# and start again from the top
if '(' in tok:
start = tok.find('(') + 1
fin = tok.rfind(')')
columns.extend(columns_in_formula(tok[start:fin]))
else:
for toknum, tokval, _, _, _ in generate_tokens(
StringIO(tok).readline):
if toknum == NAME:
columns.append(tokval)
return list(tz.unique(columns)) | [
"def",
"columns_in_formula",
"(",
"formula",
")",
":",
"if",
"formula",
"is",
"None",
":",
"return",
"[",
"]",
"formula",
"=",
"str_model_expression",
"(",
"formula",
",",
"add_constant",
"=",
"False",
")",
"columns",
"=",
"[",
"]",
"tokens",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"extra",
",",
"tz",
".",
"remove",
"(",
"lambda",
"x",
":",
"x",
".",
"extra",
"is",
"None",
",",
"_tokens_from_patsy",
"(",
"patsy",
".",
"parse_formula",
".",
"parse_formula",
"(",
"formula",
")",
")",
")",
")",
"for",
"tok",
"in",
"tokens",
":",
"# if there are parentheses in the expression we",
"# want to drop them and everything outside",
"# and start again from the top",
"if",
"'('",
"in",
"tok",
":",
"start",
"=",
"tok",
".",
"find",
"(",
"'('",
")",
"+",
"1",
"fin",
"=",
"tok",
".",
"rfind",
"(",
"')'",
")",
"columns",
".",
"extend",
"(",
"columns_in_formula",
"(",
"tok",
"[",
"start",
":",
"fin",
"]",
")",
")",
"else",
":",
"for",
"toknum",
",",
"tokval",
",",
"_",
",",
"_",
",",
"_",
"in",
"generate_tokens",
"(",
"StringIO",
"(",
"tok",
")",
".",
"readline",
")",
":",
"if",
"toknum",
"==",
"NAME",
":",
"columns",
".",
"append",
"(",
"tokval",
")",
"return",
"list",
"(",
"tz",
".",
"unique",
"(",
"columns",
")",
")"
] | Returns the names of all the columns used in a patsy formula.
Parameters
----------
formula : str, iterable, or dict
Any formula construction supported by ``str_model_expression``.
Returns
-------
columns : list of str | [
"Returns",
"the",
"names",
"of",
"all",
"the",
"columns",
"used",
"in",
"a",
"patsy",
"formula",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L307-L347 | train | 235,852 |
UDST/urbansim | urbansim/models/regression.py | fit_model | def fit_model(df, filters, model_expression):
"""
Use statsmodels OLS to construct a model relation.
Parameters
----------
df : pandas.DataFrame
Data to use for fit. Should contain all the columns
referenced in the `model_expression`.
filters : list of str
Any filters to apply before doing the model fit.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
Returns
-------
fit : statsmodels.regression.linear_model.OLSResults
"""
df = util.apply_filter_query(df, filters)
model = smf.ols(formula=model_expression, data=df)
if len(model.exog) != len(df):
raise ModelEvaluationError(
'Estimated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
with log_start_finish('statsmodels OLS fit', logger):
return model.fit() | python | def fit_model(df, filters, model_expression):
"""
Use statsmodels OLS to construct a model relation.
Parameters
----------
df : pandas.DataFrame
Data to use for fit. Should contain all the columns
referenced in the `model_expression`.
filters : list of str
Any filters to apply before doing the model fit.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
Returns
-------
fit : statsmodels.regression.linear_model.OLSResults
"""
df = util.apply_filter_query(df, filters)
model = smf.ols(formula=model_expression, data=df)
if len(model.exog) != len(df):
raise ModelEvaluationError(
'Estimated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
with log_start_finish('statsmodels OLS fit', logger):
return model.fit() | [
"def",
"fit_model",
"(",
"df",
",",
"filters",
",",
"model_expression",
")",
":",
"df",
"=",
"util",
".",
"apply_filter_query",
"(",
"df",
",",
"filters",
")",
"model",
"=",
"smf",
".",
"ols",
"(",
"formula",
"=",
"model_expression",
",",
"data",
"=",
"df",
")",
"if",
"len",
"(",
"model",
".",
"exog",
")",
"!=",
"len",
"(",
"df",
")",
":",
"raise",
"ModelEvaluationError",
"(",
"'Estimated data does not have the same length as input. '",
"'This suggests there are null values in one or more of '",
"'the input columns.'",
")",
"with",
"log_start_finish",
"(",
"'statsmodels OLS fit'",
",",
"logger",
")",
":",
"return",
"model",
".",
"fit",
"(",
")"
] | Use statsmodels OLS to construct a model relation.
Parameters
----------
df : pandas.DataFrame
Data to use for fit. Should contain all the columns
referenced in the `model_expression`.
filters : list of str
Any filters to apply before doing the model fit.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
Returns
-------
fit : statsmodels.regression.linear_model.OLSResults | [
"Use",
"statsmodels",
"OLS",
"to",
"construct",
"a",
"model",
"relation",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L25-L55 | train | 235,853 |
UDST/urbansim | urbansim/models/regression.py | predict | def predict(df, filters, model_fit, ytransform=None):
"""
Apply model to new data to predict new dependent values.
Parameters
----------
df : pandas.DataFrame
filters : list of str
Any filters to apply before doing prediction.
model_fit : statsmodels.regression.linear_model.OLSResults
Result of model estimation.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `df`
after applying filters.
"""
df = util.apply_filter_query(df, filters)
with log_start_finish('statsmodels predict', logger):
sim_data = model_fit.predict(df)
if len(sim_data) != len(df):
raise ModelEvaluationError(
'Predicted data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
if ytransform:
sim_data = ytransform(sim_data)
return pd.Series(sim_data, index=df.index) | python | def predict(df, filters, model_fit, ytransform=None):
"""
Apply model to new data to predict new dependent values.
Parameters
----------
df : pandas.DataFrame
filters : list of str
Any filters to apply before doing prediction.
model_fit : statsmodels.regression.linear_model.OLSResults
Result of model estimation.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `df`
after applying filters.
"""
df = util.apply_filter_query(df, filters)
with log_start_finish('statsmodels predict', logger):
sim_data = model_fit.predict(df)
if len(sim_data) != len(df):
raise ModelEvaluationError(
'Predicted data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
if ytransform:
sim_data = ytransform(sim_data)
return pd.Series(sim_data, index=df.index) | [
"def",
"predict",
"(",
"df",
",",
"filters",
",",
"model_fit",
",",
"ytransform",
"=",
"None",
")",
":",
"df",
"=",
"util",
".",
"apply_filter_query",
"(",
"df",
",",
"filters",
")",
"with",
"log_start_finish",
"(",
"'statsmodels predict'",
",",
"logger",
")",
":",
"sim_data",
"=",
"model_fit",
".",
"predict",
"(",
"df",
")",
"if",
"len",
"(",
"sim_data",
")",
"!=",
"len",
"(",
"df",
")",
":",
"raise",
"ModelEvaluationError",
"(",
"'Predicted data does not have the same length as input. '",
"'This suggests there are null values in one or more of '",
"'the input columns.'",
")",
"if",
"ytransform",
":",
"sim_data",
"=",
"ytransform",
"(",
"sim_data",
")",
"return",
"pd",
".",
"Series",
"(",
"sim_data",
",",
"index",
"=",
"df",
".",
"index",
")"
] | Apply model to new data to predict new dependent values.
Parameters
----------
df : pandas.DataFrame
filters : list of str
Any filters to apply before doing prediction.
model_fit : statsmodels.regression.linear_model.OLSResults
Result of model estimation.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `df`
after applying filters. | [
"Apply",
"model",
"to",
"new",
"data",
"to",
"predict",
"new",
"dependent",
"values",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L58-L97 | train | 235,854 |
UDST/urbansim | urbansim/models/regression.py | _model_fit_to_table | def _model_fit_to_table(fit):
"""
Produce a pandas DataFrame of model fit results from a statsmodels
fit result object.
Parameters
----------
fit : statsmodels.regression.linear_model.RegressionResults
Returns
-------
fit_parameters : pandas.DataFrame
Will have columns 'Coefficient', 'Std. Error', and 'T-Score'.
Index will be model terms.
This frame will also have non-standard attributes
.rsquared and .rsquared_adj with the same meaning and value
as on `fit`.
"""
fit_parameters = pd.DataFrame(
{'Coefficient': fit.params,
'Std. Error': fit.bse,
'T-Score': fit.tvalues})
fit_parameters.rsquared = fit.rsquared
fit_parameters.rsquared_adj = fit.rsquared_adj
return fit_parameters | python | def _model_fit_to_table(fit):
"""
Produce a pandas DataFrame of model fit results from a statsmodels
fit result object.
Parameters
----------
fit : statsmodels.regression.linear_model.RegressionResults
Returns
-------
fit_parameters : pandas.DataFrame
Will have columns 'Coefficient', 'Std. Error', and 'T-Score'.
Index will be model terms.
This frame will also have non-standard attributes
.rsquared and .rsquared_adj with the same meaning and value
as on `fit`.
"""
fit_parameters = pd.DataFrame(
{'Coefficient': fit.params,
'Std. Error': fit.bse,
'T-Score': fit.tvalues})
fit_parameters.rsquared = fit.rsquared
fit_parameters.rsquared_adj = fit.rsquared_adj
return fit_parameters | [
"def",
"_model_fit_to_table",
"(",
"fit",
")",
":",
"fit_parameters",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"'Coefficient'",
":",
"fit",
".",
"params",
",",
"'Std. Error'",
":",
"fit",
".",
"bse",
",",
"'T-Score'",
":",
"fit",
".",
"tvalues",
"}",
")",
"fit_parameters",
".",
"rsquared",
"=",
"fit",
".",
"rsquared",
"fit_parameters",
".",
"rsquared_adj",
"=",
"fit",
".",
"rsquared_adj",
"return",
"fit_parameters"
] | Produce a pandas DataFrame of model fit results from a statsmodels
fit result object.
Parameters
----------
fit : statsmodels.regression.linear_model.RegressionResults
Returns
-------
fit_parameters : pandas.DataFrame
Will have columns 'Coefficient', 'Std. Error', and 'T-Score'.
Index will be model terms.
This frame will also have non-standard attributes
.rsquared and .rsquared_adj with the same meaning and value
as on `fit`. | [
"Produce",
"a",
"pandas",
"DataFrame",
"of",
"model",
"fit",
"results",
"from",
"a",
"statsmodels",
"fit",
"result",
"object",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L178-L204 | train | 235,855 |
UDST/urbansim | urbansim/models/regression.py | _FakeRegressionResults.predict | def predict(self, data):
"""
Predict new values by running data through the fit model.
Parameters
----------
data : pandas.DataFrame
Table with columns corresponding to the RHS of `model_expression`.
Returns
-------
predicted : ndarray
Array of predicted values.
"""
with log_start_finish('_FakeRegressionResults prediction', logger):
model_design = dmatrix(
self._rhs, data=data, return_type='dataframe')
return model_design.dot(self.params).values | python | def predict(self, data):
"""
Predict new values by running data through the fit model.
Parameters
----------
data : pandas.DataFrame
Table with columns corresponding to the RHS of `model_expression`.
Returns
-------
predicted : ndarray
Array of predicted values.
"""
with log_start_finish('_FakeRegressionResults prediction', logger):
model_design = dmatrix(
self._rhs, data=data, return_type='dataframe')
return model_design.dot(self.params).values | [
"def",
"predict",
"(",
"self",
",",
"data",
")",
":",
"with",
"log_start_finish",
"(",
"'_FakeRegressionResults prediction'",
",",
"logger",
")",
":",
"model_design",
"=",
"dmatrix",
"(",
"self",
".",
"_rhs",
",",
"data",
"=",
"data",
",",
"return_type",
"=",
"'dataframe'",
")",
"return",
"model_design",
".",
"dot",
"(",
"self",
".",
"params",
")",
".",
"values"
] | Predict new values by running data through the fit model.
Parameters
----------
data : pandas.DataFrame
Table with columns corresponding to the RHS of `model_expression`.
Returns
-------
predicted : ndarray
Array of predicted values. | [
"Predict",
"new",
"values",
"by",
"running",
"data",
"through",
"the",
"fit",
"model",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L157-L175 | train | 235,856 |
UDST/urbansim | urbansim/models/regression.py | RegressionModel.from_yaml | def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a RegressionModel instance from a saved YAML configuration.
Arguments are mutually exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
RegressionModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
model = cls(
cfg['fit_filters'],
cfg['predict_filters'],
cfg['model_expression'],
YTRANSFORM_MAPPING[cfg['ytransform']],
cfg['name'])
if 'fitted' in cfg and cfg['fitted']:
fit_parameters = pd.DataFrame(cfg['fit_parameters'])
fit_parameters.rsquared = cfg['fit_rsquared']
fit_parameters.rsquared_adj = cfg['fit_rsquared_adj']
model.model_fit = _FakeRegressionResults(
model.str_model_expression,
fit_parameters,
cfg['fit_rsquared'], cfg['fit_rsquared_adj'])
model.fit_parameters = fit_parameters
logger.debug('loaded regression model {} from YAML'.format(model.name))
return model | python | def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a RegressionModel instance from a saved YAML configuration.
Arguments are mutually exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
RegressionModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
model = cls(
cfg['fit_filters'],
cfg['predict_filters'],
cfg['model_expression'],
YTRANSFORM_MAPPING[cfg['ytransform']],
cfg['name'])
if 'fitted' in cfg and cfg['fitted']:
fit_parameters = pd.DataFrame(cfg['fit_parameters'])
fit_parameters.rsquared = cfg['fit_rsquared']
fit_parameters.rsquared_adj = cfg['fit_rsquared_adj']
model.model_fit = _FakeRegressionResults(
model.str_model_expression,
fit_parameters,
cfg['fit_rsquared'], cfg['fit_rsquared_adj'])
model.fit_parameters = fit_parameters
logger.debug('loaded regression model {} from YAML'.format(model.name))
return model | [
"def",
"from_yaml",
"(",
"cls",
",",
"yaml_str",
"=",
"None",
",",
"str_or_buffer",
"=",
"None",
")",
":",
"cfg",
"=",
"yamlio",
".",
"yaml_to_dict",
"(",
"yaml_str",
",",
"str_or_buffer",
")",
"model",
"=",
"cls",
"(",
"cfg",
"[",
"'fit_filters'",
"]",
",",
"cfg",
"[",
"'predict_filters'",
"]",
",",
"cfg",
"[",
"'model_expression'",
"]",
",",
"YTRANSFORM_MAPPING",
"[",
"cfg",
"[",
"'ytransform'",
"]",
"]",
",",
"cfg",
"[",
"'name'",
"]",
")",
"if",
"'fitted'",
"in",
"cfg",
"and",
"cfg",
"[",
"'fitted'",
"]",
":",
"fit_parameters",
"=",
"pd",
".",
"DataFrame",
"(",
"cfg",
"[",
"'fit_parameters'",
"]",
")",
"fit_parameters",
".",
"rsquared",
"=",
"cfg",
"[",
"'fit_rsquared'",
"]",
"fit_parameters",
".",
"rsquared_adj",
"=",
"cfg",
"[",
"'fit_rsquared_adj'",
"]",
"model",
".",
"model_fit",
"=",
"_FakeRegressionResults",
"(",
"model",
".",
"str_model_expression",
",",
"fit_parameters",
",",
"cfg",
"[",
"'fit_rsquared'",
"]",
",",
"cfg",
"[",
"'fit_rsquared_adj'",
"]",
")",
"model",
".",
"fit_parameters",
"=",
"fit_parameters",
"logger",
".",
"debug",
"(",
"'loaded regression model {} from YAML'",
".",
"format",
"(",
"model",
".",
"name",
")",
")",
"return",
"model"
] | Create a RegressionModel instance from a saved YAML configuration.
Arguments are mutually exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
RegressionModel | [
"Create",
"a",
"RegressionModel",
"instance",
"from",
"a",
"saved",
"YAML",
"configuration",
".",
"Arguments",
"are",
"mutually",
"exclusive",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L260-L298 | train | 235,857 |
UDST/urbansim | urbansim/models/regression.py | RegressionModel.predict | def predict(self, data):
"""
Predict a new data set based on an estimated model.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must contain all the columns
referenced by the right-hand side of the `model_expression`.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `data`
after applying filters.
"""
self.assert_fitted()
with log_start_finish('predicting model {}'.format(self.name), logger):
return predict(
data, self.predict_filters, self.model_fit, self.ytransform) | python | def predict(self, data):
"""
Predict a new data set based on an estimated model.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must contain all the columns
referenced by the right-hand side of the `model_expression`.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `data`
after applying filters.
"""
self.assert_fitted()
with log_start_finish('predicting model {}'.format(self.name), logger):
return predict(
data, self.predict_filters, self.model_fit, self.ytransform) | [
"def",
"predict",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"assert_fitted",
"(",
")",
"with",
"log_start_finish",
"(",
"'predicting model {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
",",
"logger",
")",
":",
"return",
"predict",
"(",
"data",
",",
"self",
".",
"predict_filters",
",",
"self",
".",
"model_fit",
",",
"self",
".",
"ytransform",
")"
] | Predict a new data set based on an estimated model.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must contain all the columns
referenced by the right-hand side of the `model_expression`.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `data`
after applying filters. | [
"Predict",
"a",
"new",
"data",
"set",
"based",
"on",
"an",
"estimated",
"model",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L390-L410 | train | 235,858 |
UDST/urbansim | urbansim/models/regression.py | RegressionModel.to_dict | def to_dict(self):
"""
Returns a dictionary representation of a RegressionModel instance.
"""
d = {
'model_type': 'regression',
'name': self.name,
'fit_filters': self.fit_filters,
'predict_filters': self.predict_filters,
'model_expression': self.model_expression,
'ytransform': YTRANSFORM_MAPPING[self.ytransform],
'fitted': self.fitted,
'fit_parameters': None,
'fit_rsquared': None,
'fit_rsquared_adj': None
}
if self.fitted:
d['fit_parameters'] = yamlio.frame_to_yaml_safe(
self.fit_parameters)
d['fit_rsquared'] = float(self.model_fit.rsquared)
d['fit_rsquared_adj'] = float(self.model_fit.rsquared_adj)
return d | python | def to_dict(self):
"""
Returns a dictionary representation of a RegressionModel instance.
"""
d = {
'model_type': 'regression',
'name': self.name,
'fit_filters': self.fit_filters,
'predict_filters': self.predict_filters,
'model_expression': self.model_expression,
'ytransform': YTRANSFORM_MAPPING[self.ytransform],
'fitted': self.fitted,
'fit_parameters': None,
'fit_rsquared': None,
'fit_rsquared_adj': None
}
if self.fitted:
d['fit_parameters'] = yamlio.frame_to_yaml_safe(
self.fit_parameters)
d['fit_rsquared'] = float(self.model_fit.rsquared)
d['fit_rsquared_adj'] = float(self.model_fit.rsquared_adj)
return d | [
"def",
"to_dict",
"(",
"self",
")",
":",
"d",
"=",
"{",
"'model_type'",
":",
"'regression'",
",",
"'name'",
":",
"self",
".",
"name",
",",
"'fit_filters'",
":",
"self",
".",
"fit_filters",
",",
"'predict_filters'",
":",
"self",
".",
"predict_filters",
",",
"'model_expression'",
":",
"self",
".",
"model_expression",
",",
"'ytransform'",
":",
"YTRANSFORM_MAPPING",
"[",
"self",
".",
"ytransform",
"]",
",",
"'fitted'",
":",
"self",
".",
"fitted",
",",
"'fit_parameters'",
":",
"None",
",",
"'fit_rsquared'",
":",
"None",
",",
"'fit_rsquared_adj'",
":",
"None",
"}",
"if",
"self",
".",
"fitted",
":",
"d",
"[",
"'fit_parameters'",
"]",
"=",
"yamlio",
".",
"frame_to_yaml_safe",
"(",
"self",
".",
"fit_parameters",
")",
"d",
"[",
"'fit_rsquared'",
"]",
"=",
"float",
"(",
"self",
".",
"model_fit",
".",
"rsquared",
")",
"d",
"[",
"'fit_rsquared_adj'",
"]",
"=",
"float",
"(",
"self",
".",
"model_fit",
".",
"rsquared_adj",
")",
"return",
"d"
] | Returns a dictionary representation of a RegressionModel instance. | [
"Returns",
"a",
"dictionary",
"representation",
"of",
"a",
"RegressionModel",
"instance",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L412-L436 | train | 235,859 |
UDST/urbansim | urbansim/models/regression.py | RegressionModel.columns_used | def columns_used(self):
"""
Returns all the columns used in this model for filtering
and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.model_expression)))) | python | def columns_used(self):
"""
Returns all the columns used in this model for filtering
and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.model_expression)))) | [
"def",
"columns_used",
"(",
"self",
")",
":",
"return",
"list",
"(",
"tz",
".",
"unique",
"(",
"tz",
".",
"concatv",
"(",
"util",
".",
"columns_in_filters",
"(",
"self",
".",
"fit_filters",
")",
",",
"util",
".",
"columns_in_filters",
"(",
"self",
".",
"predict_filters",
")",
",",
"util",
".",
"columns_in_formula",
"(",
"self",
".",
"model_expression",
")",
")",
")",
")"
] | Returns all the columns used in this model for filtering
and in the model expression. | [
"Returns",
"all",
"the",
"columns",
"used",
"in",
"this",
"model",
"for",
"filtering",
"and",
"in",
"the",
"model",
"expression",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L460-L469 | train | 235,860 |
UDST/urbansim | urbansim/models/regression.py | RegressionModelGroup.add_model | def add_model(self, model):
"""
Add a `RegressionModel` instance.
Parameters
----------
model : `RegressionModel`
Should have a ``.name`` attribute matching one of
the groupby segments.
"""
logger.debug(
'adding model {} to group {}'.format(model.name, self.name))
self.models[model.name] = model | python | def add_model(self, model):
"""
Add a `RegressionModel` instance.
Parameters
----------
model : `RegressionModel`
Should have a ``.name`` attribute matching one of
the groupby segments.
"""
logger.debug(
'adding model {} to group {}'.format(model.name, self.name))
self.models[model.name] = model | [
"def",
"add_model",
"(",
"self",
",",
"model",
")",
":",
"logger",
".",
"debug",
"(",
"'adding model {} to group {}'",
".",
"format",
"(",
"model",
".",
"name",
",",
"self",
".",
"name",
")",
")",
"self",
".",
"models",
"[",
"model",
".",
"name",
"]",
"=",
"model"
] | Add a `RegressionModel` instance.
Parameters
----------
model : `RegressionModel`
Should have a ``.name`` attribute matching one of
the groupby segments. | [
"Add",
"a",
"RegressionModel",
"instance",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L546-L559 | train | 235,861 |
UDST/urbansim | urbansim/models/regression.py | RegressionModelGroup.add_model_from_params | def add_model_from_params(self, name, fit_filters, predict_filters,
model_expression, ytransform=None):
"""
Add a model by passing arguments through to `RegressionModel`.
Parameters
----------
name : any
Must match a groupby segment name.
fit_filters : list of str
Filters applied before fitting the model.
predict_filters : list of str
Filters applied before calculating new data points.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
"""
logger.debug(
'adding model {} to group {}'.format(name, self.name))
model = RegressionModel(
fit_filters, predict_filters, model_expression, ytransform, name)
self.models[name] = model | python | def add_model_from_params(self, name, fit_filters, predict_filters,
model_expression, ytransform=None):
"""
Add a model by passing arguments through to `RegressionModel`.
Parameters
----------
name : any
Must match a groupby segment name.
fit_filters : list of str
Filters applied before fitting the model.
predict_filters : list of str
Filters applied before calculating new data points.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
"""
logger.debug(
'adding model {} to group {}'.format(name, self.name))
model = RegressionModel(
fit_filters, predict_filters, model_expression, ytransform, name)
self.models[name] = model | [
"def",
"add_model_from_params",
"(",
"self",
",",
"name",
",",
"fit_filters",
",",
"predict_filters",
",",
"model_expression",
",",
"ytransform",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'adding model {} to group {}'",
".",
"format",
"(",
"name",
",",
"self",
".",
"name",
")",
")",
"model",
"=",
"RegressionModel",
"(",
"fit_filters",
",",
"predict_filters",
",",
"model_expression",
",",
"ytransform",
",",
"name",
")",
"self",
".",
"models",
"[",
"name",
"]",
"=",
"model"
] | Add a model by passing arguments through to `RegressionModel`.
Parameters
----------
name : any
Must match a groupby segment name.
fit_filters : list of str
Filters applied before fitting the model.
predict_filters : list of str
Filters applied before calculating new data points.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied. | [
"Add",
"a",
"model",
"by",
"passing",
"arguments",
"through",
"to",
"RegressionModel",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L561-L590 | train | 235,862 |
UDST/urbansim | urbansim/models/regression.py | RegressionModelGroup.fit | def fit(self, data, debug=False):
"""
Fit each of the models in the group.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true (default false) will pass the debug parameter
to model estimation.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
with log_start_finish(
'fitting models in group {}'.format(self.name), logger):
return {name: self.models[name].fit(df, debug=debug)
for name, df in self._iter_groups(data)} | python | def fit(self, data, debug=False):
"""
Fit each of the models in the group.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true (default false) will pass the debug parameter
to model estimation.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
with log_start_finish(
'fitting models in group {}'.format(self.name), logger):
return {name: self.models[name].fit(df, debug=debug)
for name, df in self._iter_groups(data)} | [
"def",
"fit",
"(",
"self",
",",
"data",
",",
"debug",
"=",
"False",
")",
":",
"with",
"log_start_finish",
"(",
"'fitting models in group {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
",",
"logger",
")",
":",
"return",
"{",
"name",
":",
"self",
".",
"models",
"[",
"name",
"]",
".",
"fit",
"(",
"df",
",",
"debug",
"=",
"debug",
")",
"for",
"name",
",",
"df",
"in",
"self",
".",
"_iter_groups",
"(",
"data",
")",
"}"
] | Fit each of the models in the group.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true (default false) will pass the debug parameter
to model estimation.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names. | [
"Fit",
"each",
"of",
"the",
"models",
"in",
"the",
"group",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L612-L633 | train | 235,863 |
UDST/urbansim | urbansim/models/regression.py | SegmentedRegressionModel.from_yaml | def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a SegmentedRegressionModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedRegressionModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
default_model_expr = cfg['default_config']['model_expression']
default_ytransform = cfg['default_config']['ytransform']
seg = cls(
cfg['segmentation_col'], cfg['fit_filters'],
cfg['predict_filters'], default_model_expr,
YTRANSFORM_MAPPING[default_ytransform], cfg['min_segment_size'],
cfg['name'])
if "models" not in cfg:
cfg["models"] = {}
for name, m in cfg['models'].items():
m['model_expression'] = m.get(
'model_expression', default_model_expr)
m['ytransform'] = m.get('ytransform', default_ytransform)
m['fit_filters'] = None
m['predict_filters'] = None
reg = RegressionModel.from_yaml(yamlio.convert_to_yaml(m, None))
seg._group.add_model(reg)
logger.debug(
'loaded segmented regression model {} from yaml'.format(seg.name))
return seg | python | def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a SegmentedRegressionModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedRegressionModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
default_model_expr = cfg['default_config']['model_expression']
default_ytransform = cfg['default_config']['ytransform']
seg = cls(
cfg['segmentation_col'], cfg['fit_filters'],
cfg['predict_filters'], default_model_expr,
YTRANSFORM_MAPPING[default_ytransform], cfg['min_segment_size'],
cfg['name'])
if "models" not in cfg:
cfg["models"] = {}
for name, m in cfg['models'].items():
m['model_expression'] = m.get(
'model_expression', default_model_expr)
m['ytransform'] = m.get('ytransform', default_ytransform)
m['fit_filters'] = None
m['predict_filters'] = None
reg = RegressionModel.from_yaml(yamlio.convert_to_yaml(m, None))
seg._group.add_model(reg)
logger.debug(
'loaded segmented regression model {} from yaml'.format(seg.name))
return seg | [
"def",
"from_yaml",
"(",
"cls",
",",
"yaml_str",
"=",
"None",
",",
"str_or_buffer",
"=",
"None",
")",
":",
"cfg",
"=",
"yamlio",
".",
"yaml_to_dict",
"(",
"yaml_str",
",",
"str_or_buffer",
")",
"default_model_expr",
"=",
"cfg",
"[",
"'default_config'",
"]",
"[",
"'model_expression'",
"]",
"default_ytransform",
"=",
"cfg",
"[",
"'default_config'",
"]",
"[",
"'ytransform'",
"]",
"seg",
"=",
"cls",
"(",
"cfg",
"[",
"'segmentation_col'",
"]",
",",
"cfg",
"[",
"'fit_filters'",
"]",
",",
"cfg",
"[",
"'predict_filters'",
"]",
",",
"default_model_expr",
",",
"YTRANSFORM_MAPPING",
"[",
"default_ytransform",
"]",
",",
"cfg",
"[",
"'min_segment_size'",
"]",
",",
"cfg",
"[",
"'name'",
"]",
")",
"if",
"\"models\"",
"not",
"in",
"cfg",
":",
"cfg",
"[",
"\"models\"",
"]",
"=",
"{",
"}",
"for",
"name",
",",
"m",
"in",
"cfg",
"[",
"'models'",
"]",
".",
"items",
"(",
")",
":",
"m",
"[",
"'model_expression'",
"]",
"=",
"m",
".",
"get",
"(",
"'model_expression'",
",",
"default_model_expr",
")",
"m",
"[",
"'ytransform'",
"]",
"=",
"m",
".",
"get",
"(",
"'ytransform'",
",",
"default_ytransform",
")",
"m",
"[",
"'fit_filters'",
"]",
"=",
"None",
"m",
"[",
"'predict_filters'",
"]",
"=",
"None",
"reg",
"=",
"RegressionModel",
".",
"from_yaml",
"(",
"yamlio",
".",
"convert_to_yaml",
"(",
"m",
",",
"None",
")",
")",
"seg",
".",
"_group",
".",
"add_model",
"(",
"reg",
")",
"logger",
".",
"debug",
"(",
"'loaded segmented regression model {} from yaml'",
".",
"format",
"(",
"seg",
".",
"name",
")",
")",
"return",
"seg"
] | Create a SegmentedRegressionModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedRegressionModel | [
"Create",
"a",
"SegmentedRegressionModel",
"instance",
"from",
"a",
"saved",
"YAML",
"configuration",
".",
"Arguments",
"are",
"mutally",
"exclusive",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L726-L768 | train | 235,864 |
UDST/urbansim | urbansim/models/regression.py | SegmentedRegressionModel.add_segment | def add_segment(self, name, model_expression=None, ytransform='default'):
"""
Add a new segment with its own model expression and ytransform.
Parameters
----------
name :
Segment name. Must match a segment in the groupby of the data.
model_expression : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
If not given the default model will be used, which must not be
None.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
If not given the default ytransform will be used.
"""
if not model_expression:
if self.default_model_expr is None:
raise ValueError(
'No default model available, '
'you must supply a model experssion.')
model_expression = self.default_model_expr
if ytransform == 'default':
ytransform = self.default_ytransform
# no fit or predict filters, we'll take care of that this side.
self._group.add_model_from_params(
name, None, None, model_expression, ytransform)
logger.debug('added segment {} to model {}'.format(name, self.name)) | python | def add_segment(self, name, model_expression=None, ytransform='default'):
"""
Add a new segment with its own model expression and ytransform.
Parameters
----------
name :
Segment name. Must match a segment in the groupby of the data.
model_expression : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
If not given the default model will be used, which must not be
None.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
If not given the default ytransform will be used.
"""
if not model_expression:
if self.default_model_expr is None:
raise ValueError(
'No default model available, '
'you must supply a model experssion.')
model_expression = self.default_model_expr
if ytransform == 'default':
ytransform = self.default_ytransform
# no fit or predict filters, we'll take care of that this side.
self._group.add_model_from_params(
name, None, None, model_expression, ytransform)
logger.debug('added segment {} to model {}'.format(name, self.name)) | [
"def",
"add_segment",
"(",
"self",
",",
"name",
",",
"model_expression",
"=",
"None",
",",
"ytransform",
"=",
"'default'",
")",
":",
"if",
"not",
"model_expression",
":",
"if",
"self",
".",
"default_model_expr",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'No default model available, '",
"'you must supply a model experssion.'",
")",
"model_expression",
"=",
"self",
".",
"default_model_expr",
"if",
"ytransform",
"==",
"'default'",
":",
"ytransform",
"=",
"self",
".",
"default_ytransform",
"# no fit or predict filters, we'll take care of that this side.",
"self",
".",
"_group",
".",
"add_model_from_params",
"(",
"name",
",",
"None",
",",
"None",
",",
"model_expression",
",",
"ytransform",
")",
"logger",
".",
"debug",
"(",
"'added segment {} to model {}'",
".",
"format",
"(",
"name",
",",
"self",
".",
"name",
")",
")"
] | Add a new segment with its own model expression and ytransform.
Parameters
----------
name :
Segment name. Must match a segment in the groupby of the data.
model_expression : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
If not given the default model will be used, which must not be
None.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
If not given the default ytransform will be used. | [
"Add",
"a",
"new",
"segment",
"with",
"its",
"own",
"model",
"expression",
"and",
"ytransform",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L770-L806 | train | 235,865 |
UDST/urbansim | urbansim/models/regression.py | SegmentedRegressionModel.fit | def fit(self, data, debug=False):
"""
Fit each segment. Segments that have not already been explicitly
added will be automatically added with default model and ytransform.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true will pass debug to the fit method of each model.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
data = util.apply_filter_query(data, self.fit_filters)
unique = data[self.segmentation_col].unique()
value_counts = data[self.segmentation_col].value_counts()
# Remove any existing segments that may no longer have counterparts
# in the data. This can happen when loading a saved model and then
# calling this method with data that no longer has segments that
# were there the last time this was called.
gone = set(self._group.models) - set(unique)
for g in gone:
del self._group.models[g]
for x in unique:
if x not in self._group.models and \
value_counts[x] > self.min_segment_size:
self.add_segment(x)
with log_start_finish(
'fitting models in segmented model {}'.format(self.name),
logger):
return self._group.fit(data, debug=debug) | python | def fit(self, data, debug=False):
"""
Fit each segment. Segments that have not already been explicitly
added will be automatically added with default model and ytransform.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true will pass debug to the fit method of each model.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
data = util.apply_filter_query(data, self.fit_filters)
unique = data[self.segmentation_col].unique()
value_counts = data[self.segmentation_col].value_counts()
# Remove any existing segments that may no longer have counterparts
# in the data. This can happen when loading a saved model and then
# calling this method with data that no longer has segments that
# were there the last time this was called.
gone = set(self._group.models) - set(unique)
for g in gone:
del self._group.models[g]
for x in unique:
if x not in self._group.models and \
value_counts[x] > self.min_segment_size:
self.add_segment(x)
with log_start_finish(
'fitting models in segmented model {}'.format(self.name),
logger):
return self._group.fit(data, debug=debug) | [
"def",
"fit",
"(",
"self",
",",
"data",
",",
"debug",
"=",
"False",
")",
":",
"data",
"=",
"util",
".",
"apply_filter_query",
"(",
"data",
",",
"self",
".",
"fit_filters",
")",
"unique",
"=",
"data",
"[",
"self",
".",
"segmentation_col",
"]",
".",
"unique",
"(",
")",
"value_counts",
"=",
"data",
"[",
"self",
".",
"segmentation_col",
"]",
".",
"value_counts",
"(",
")",
"# Remove any existing segments that may no longer have counterparts",
"# in the data. This can happen when loading a saved model and then",
"# calling this method with data that no longer has segments that",
"# were there the last time this was called.",
"gone",
"=",
"set",
"(",
"self",
".",
"_group",
".",
"models",
")",
"-",
"set",
"(",
"unique",
")",
"for",
"g",
"in",
"gone",
":",
"del",
"self",
".",
"_group",
".",
"models",
"[",
"g",
"]",
"for",
"x",
"in",
"unique",
":",
"if",
"x",
"not",
"in",
"self",
".",
"_group",
".",
"models",
"and",
"value_counts",
"[",
"x",
"]",
">",
"self",
".",
"min_segment_size",
":",
"self",
".",
"add_segment",
"(",
"x",
")",
"with",
"log_start_finish",
"(",
"'fitting models in segmented model {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
",",
"logger",
")",
":",
"return",
"self",
".",
"_group",
".",
"fit",
"(",
"data",
",",
"debug",
"=",
"debug",
")"
] | Fit each segment. Segments that have not already been explicitly
added will be automatically added with default model and ytransform.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true will pass debug to the fit method of each model.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names. | [
"Fit",
"each",
"segment",
".",
"Segments",
"that",
"have",
"not",
"already",
"been",
"explicitly",
"added",
"will",
"be",
"automatically",
"added",
"with",
"default",
"model",
"and",
"ytransform",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L808-L847 | train | 235,866 |
UDST/urbansim | urbansim/models/regression.py | SegmentedRegressionModel.columns_used | def columns_used(self):
"""
Returns all the columns used across all models in the group
for filtering and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.default_model_expr),
self._group.columns_used(),
[self.segmentation_col]))) | python | def columns_used(self):
"""
Returns all the columns used across all models in the group
for filtering and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.default_model_expr),
self._group.columns_used(),
[self.segmentation_col]))) | [
"def",
"columns_used",
"(",
"self",
")",
":",
"return",
"list",
"(",
"tz",
".",
"unique",
"(",
"tz",
".",
"concatv",
"(",
"util",
".",
"columns_in_filters",
"(",
"self",
".",
"fit_filters",
")",
",",
"util",
".",
"columns_in_filters",
"(",
"self",
".",
"predict_filters",
")",
",",
"util",
".",
"columns_in_formula",
"(",
"self",
".",
"default_model_expr",
")",
",",
"self",
".",
"_group",
".",
"columns_used",
"(",
")",
",",
"[",
"self",
".",
"segmentation_col",
"]",
")",
")",
")"
] | Returns all the columns used across all models in the group
for filtering and in the model expression. | [
"Returns",
"all",
"the",
"columns",
"used",
"across",
"all",
"models",
"in",
"the",
"group",
"for",
"filtering",
"and",
"in",
"the",
"model",
"expression",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L956-L967 | train | 235,867 |
UDST/urbansim | urbansim/models/relocation.py | find_movers | def find_movers(choosers, rates, rate_column):
"""
Returns an array of the indexes of the `choosers` that are slated
to move.
Parameters
----------
choosers : pandas.DataFrame
Table of agents from which to find movers.
rates : pandas.DataFrame
Table of relocation rates. Index is unused.
Other columns describe filters on the `choosers`
table so that different segments can have different relocation
rates. Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
An example `rates` structure:
age_of_head_max age_of_head_min
nan 65
65 40
In this example the `choosers` table would need to have an
'age_of_head' column on which to filter.
nan should be used to flag filters that do not apply
in a given row.
rate_column : object
Name of column in `rates` table that has relocation rates.
Returns
-------
movers : pandas.Index
Suitable for indexing `choosers` by index.
"""
logger.debug('start: find movers for relocation')
relocation_rates = pd.Series(
np.zeros(len(choosers)), index=choosers.index)
for _, row in rates.iterrows():
indexes = util.filter_table(choosers, row, ignore={rate_column}).index
relocation_rates.loc[indexes] = row[rate_column]
movers = relocation_rates.index[
relocation_rates > np.random.random(len(choosers))]
logger.debug('picked {} movers for relocation'.format(len(movers)))
logger.debug('finish: find movers for relocation')
return movers | python | def find_movers(choosers, rates, rate_column):
"""
Returns an array of the indexes of the `choosers` that are slated
to move.
Parameters
----------
choosers : pandas.DataFrame
Table of agents from which to find movers.
rates : pandas.DataFrame
Table of relocation rates. Index is unused.
Other columns describe filters on the `choosers`
table so that different segments can have different relocation
rates. Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
An example `rates` structure:
age_of_head_max age_of_head_min
nan 65
65 40
In this example the `choosers` table would need to have an
'age_of_head' column on which to filter.
nan should be used to flag filters that do not apply
in a given row.
rate_column : object
Name of column in `rates` table that has relocation rates.
Returns
-------
movers : pandas.Index
Suitable for indexing `choosers` by index.
"""
logger.debug('start: find movers for relocation')
relocation_rates = pd.Series(
np.zeros(len(choosers)), index=choosers.index)
for _, row in rates.iterrows():
indexes = util.filter_table(choosers, row, ignore={rate_column}).index
relocation_rates.loc[indexes] = row[rate_column]
movers = relocation_rates.index[
relocation_rates > np.random.random(len(choosers))]
logger.debug('picked {} movers for relocation'.format(len(movers)))
logger.debug('finish: find movers for relocation')
return movers | [
"def",
"find_movers",
"(",
"choosers",
",",
"rates",
",",
"rate_column",
")",
":",
"logger",
".",
"debug",
"(",
"'start: find movers for relocation'",
")",
"relocation_rates",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"zeros",
"(",
"len",
"(",
"choosers",
")",
")",
",",
"index",
"=",
"choosers",
".",
"index",
")",
"for",
"_",
",",
"row",
"in",
"rates",
".",
"iterrows",
"(",
")",
":",
"indexes",
"=",
"util",
".",
"filter_table",
"(",
"choosers",
",",
"row",
",",
"ignore",
"=",
"{",
"rate_column",
"}",
")",
".",
"index",
"relocation_rates",
".",
"loc",
"[",
"indexes",
"]",
"=",
"row",
"[",
"rate_column",
"]",
"movers",
"=",
"relocation_rates",
".",
"index",
"[",
"relocation_rates",
">",
"np",
".",
"random",
".",
"random",
"(",
"len",
"(",
"choosers",
")",
")",
"]",
"logger",
".",
"debug",
"(",
"'picked {} movers for relocation'",
".",
"format",
"(",
"len",
"(",
"movers",
")",
")",
")",
"logger",
".",
"debug",
"(",
"'finish: find movers for relocation'",
")",
"return",
"movers"
] | Returns an array of the indexes of the `choosers` that are slated
to move.
Parameters
----------
choosers : pandas.DataFrame
Table of agents from which to find movers.
rates : pandas.DataFrame
Table of relocation rates. Index is unused.
Other columns describe filters on the `choosers`
table so that different segments can have different relocation
rates. Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
An example `rates` structure:
age_of_head_max age_of_head_min
nan 65
65 40
In this example the `choosers` table would need to have an
'age_of_head' column on which to filter.
nan should be used to flag filters that do not apply
in a given row.
rate_column : object
Name of column in `rates` table that has relocation rates.
Returns
-------
movers : pandas.Index
Suitable for indexing `choosers` by index. | [
"Returns",
"an",
"array",
"of",
"the",
"indexes",
"of",
"the",
"choosers",
"that",
"are",
"slated",
"to",
"move",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/relocation.py#L16-L67 | train | 235,868 |
UDST/urbansim | urbansim/models/supplydemand.py | _calculate_adjustment | def _calculate_adjustment(
lcm, choosers, alternatives, alt_segmenter,
clip_change_low, clip_change_high, multiplier_func=None):
"""
Calculate adjustments to prices to compensate for
supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
clip_change_low : float
The minimum amount by which to multiply prices each iteration.
clip_change_high : float
The maximum amount by which to multiply prices each iteration.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
alts_muliplier : pandas.Series
Same index as `alternatives`, values clipped to `clip_change_low`
and `clip_change_high`.
submarkets_multiplier : pandas.Series
Index is unique values from `alt_segmenter`, values are the ratio
of demand / supply for each segment in `alt_segmenter`.
finished : boolean
boolean indicator that this adjustment should be considered the
final adjustment (if True). If false, the iterative algorithm
should continue.
"""
logger.debug('start: calculate supply and demand price adjustment ratio')
# probabilities of agents choosing * number of agents = demand
demand = lcm.summed_probabilities(choosers, alternatives)
# group by submarket
demand = demand.groupby(alt_segmenter.loc[demand.index].values).sum()
# number of alternatives
supply = alt_segmenter.value_counts()
if multiplier_func is not None:
multiplier, finished = multiplier_func(demand, supply)
else:
multiplier, finished = (demand / supply), False
multiplier = multiplier.clip(clip_change_low, clip_change_high)
# broadcast multiplier back to alternatives index
alts_muliplier = multiplier.loc[alt_segmenter]
alts_muliplier.index = alt_segmenter.index
logger.debug(
('finish: calculate supply and demand price adjustment multiplier '
'with mean multiplier {}').format(multiplier.mean()))
return alts_muliplier, multiplier, finished | python | def _calculate_adjustment(
lcm, choosers, alternatives, alt_segmenter,
clip_change_low, clip_change_high, multiplier_func=None):
"""
Calculate adjustments to prices to compensate for
supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
clip_change_low : float
The minimum amount by which to multiply prices each iteration.
clip_change_high : float
The maximum amount by which to multiply prices each iteration.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
alts_muliplier : pandas.Series
Same index as `alternatives`, values clipped to `clip_change_low`
and `clip_change_high`.
submarkets_multiplier : pandas.Series
Index is unique values from `alt_segmenter`, values are the ratio
of demand / supply for each segment in `alt_segmenter`.
finished : boolean
boolean indicator that this adjustment should be considered the
final adjustment (if True). If false, the iterative algorithm
should continue.
"""
logger.debug('start: calculate supply and demand price adjustment ratio')
# probabilities of agents choosing * number of agents = demand
demand = lcm.summed_probabilities(choosers, alternatives)
# group by submarket
demand = demand.groupby(alt_segmenter.loc[demand.index].values).sum()
# number of alternatives
supply = alt_segmenter.value_counts()
if multiplier_func is not None:
multiplier, finished = multiplier_func(demand, supply)
else:
multiplier, finished = (demand / supply), False
multiplier = multiplier.clip(clip_change_low, clip_change_high)
# broadcast multiplier back to alternatives index
alts_muliplier = multiplier.loc[alt_segmenter]
alts_muliplier.index = alt_segmenter.index
logger.debug(
('finish: calculate supply and demand price adjustment multiplier '
'with mean multiplier {}').format(multiplier.mean()))
return alts_muliplier, multiplier, finished | [
"def",
"_calculate_adjustment",
"(",
"lcm",
",",
"choosers",
",",
"alternatives",
",",
"alt_segmenter",
",",
"clip_change_low",
",",
"clip_change_high",
",",
"multiplier_func",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'start: calculate supply and demand price adjustment ratio'",
")",
"# probabilities of agents choosing * number of agents = demand",
"demand",
"=",
"lcm",
".",
"summed_probabilities",
"(",
"choosers",
",",
"alternatives",
")",
"# group by submarket",
"demand",
"=",
"demand",
".",
"groupby",
"(",
"alt_segmenter",
".",
"loc",
"[",
"demand",
".",
"index",
"]",
".",
"values",
")",
".",
"sum",
"(",
")",
"# number of alternatives",
"supply",
"=",
"alt_segmenter",
".",
"value_counts",
"(",
")",
"if",
"multiplier_func",
"is",
"not",
"None",
":",
"multiplier",
",",
"finished",
"=",
"multiplier_func",
"(",
"demand",
",",
"supply",
")",
"else",
":",
"multiplier",
",",
"finished",
"=",
"(",
"demand",
"/",
"supply",
")",
",",
"False",
"multiplier",
"=",
"multiplier",
".",
"clip",
"(",
"clip_change_low",
",",
"clip_change_high",
")",
"# broadcast multiplier back to alternatives index",
"alts_muliplier",
"=",
"multiplier",
".",
"loc",
"[",
"alt_segmenter",
"]",
"alts_muliplier",
".",
"index",
"=",
"alt_segmenter",
".",
"index",
"logger",
".",
"debug",
"(",
"(",
"'finish: calculate supply and demand price adjustment multiplier '",
"'with mean multiplier {}'",
")",
".",
"format",
"(",
"multiplier",
".",
"mean",
"(",
")",
")",
")",
"return",
"alts_muliplier",
",",
"multiplier",
",",
"finished"
] | Calculate adjustments to prices to compensate for
supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
clip_change_low : float
The minimum amount by which to multiply prices each iteration.
clip_change_high : float
The maximum amount by which to multiply prices each iteration.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
alts_muliplier : pandas.Series
Same index as `alternatives`, values clipped to `clip_change_low`
and `clip_change_high`.
submarkets_multiplier : pandas.Series
Index is unique values from `alt_segmenter`, values are the ratio
of demand / supply for each segment in `alt_segmenter`.
finished : boolean
boolean indicator that this adjustment should be considered the
final adjustment (if True). If false, the iterative algorithm
should continue. | [
"Calculate",
"adjustments",
"to",
"prices",
"to",
"compensate",
"for",
"supply",
"and",
"demand",
"effects",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/supplydemand.py#L15-L81 | train | 235,869 |
UDST/urbansim | urbansim/models/supplydemand.py | supply_and_demand | def supply_and_demand(
lcm, choosers, alternatives, alt_segmenter, price_col,
base_multiplier=None, clip_change_low=0.75, clip_change_high=1.25,
iterations=5, multiplier_func=None):
"""
Adjust real estate prices to compensate for supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : str, array, or pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
If a string, it is expected to be the name of a column
in `alternatives`. If a Series it should have the same index
as `alternatives`.
price_col : str
The name of the column in `alternatives` that corresponds to price.
This column is what is adjusted by this model.
base_multiplier : pandas.Series, optional
A series describing a starting multiplier for submarket prices.
Index should be submarket IDs.
clip_change_low : float, optional
The minimum amount by which to multiply prices each iteration.
clip_change_high : float, optional
The maximum amount by which to multiply prices each iteration.
iterations : int, optional
Number of times to update prices based on supply/demand comparisons.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
new_prices : pandas.Series
Equivalent of the `price_col` in `alternatives`.
submarkets_ratios : pandas.Series
Price adjustment ratio for each submarket. If `base_multiplier` is
given this will be a cummulative multiplier including the
`base_multiplier` and the multipliers calculated for this year.
"""
logger.debug('start: calculating supply and demand price adjustment')
# copy alternatives so we don't modify the user's original
alternatives = alternatives.copy()
# if alt_segmenter is a string, get the actual column for segmenting demand
if isinstance(alt_segmenter, str):
alt_segmenter = alternatives[alt_segmenter]
elif isinstance(alt_segmenter, np.array):
alt_segmenter = pd.Series(alt_segmenter, index=alternatives.index)
choosers, alternatives = lcm.apply_predict_filters(choosers, alternatives)
alt_segmenter = alt_segmenter.loc[alternatives.index]
# check base ratio and apply it to prices if given
if base_multiplier is not None:
bm = base_multiplier.loc[alt_segmenter]
bm.index = alt_segmenter.index
alternatives[price_col] = alternatives[price_col] * bm
base_multiplier = base_multiplier.copy()
for _ in range(iterations):
alts_muliplier, submarkets_multiplier, finished = _calculate_adjustment(
lcm, choosers, alternatives, alt_segmenter,
clip_change_low, clip_change_high, multiplier_func=multiplier_func)
alternatives[price_col] = alternatives[price_col] * alts_muliplier
# might need to initialize this for holding cumulative multiplier
if base_multiplier is None:
base_multiplier = pd.Series(
np.ones(len(submarkets_multiplier)),
index=submarkets_multiplier.index)
base_multiplier *= submarkets_multiplier
if finished:
break
logger.debug('finish: calculating supply and demand price adjustment')
return alternatives[price_col], base_multiplier | python | def supply_and_demand(
lcm, choosers, alternatives, alt_segmenter, price_col,
base_multiplier=None, clip_change_low=0.75, clip_change_high=1.25,
iterations=5, multiplier_func=None):
"""
Adjust real estate prices to compensate for supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : str, array, or pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
If a string, it is expected to be the name of a column
in `alternatives`. If a Series it should have the same index
as `alternatives`.
price_col : str
The name of the column in `alternatives` that corresponds to price.
This column is what is adjusted by this model.
base_multiplier : pandas.Series, optional
A series describing a starting multiplier for submarket prices.
Index should be submarket IDs.
clip_change_low : float, optional
The minimum amount by which to multiply prices each iteration.
clip_change_high : float, optional
The maximum amount by which to multiply prices each iteration.
iterations : int, optional
Number of times to update prices based on supply/demand comparisons.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
new_prices : pandas.Series
Equivalent of the `price_col` in `alternatives`.
submarkets_ratios : pandas.Series
Price adjustment ratio for each submarket. If `base_multiplier` is
given this will be a cummulative multiplier including the
`base_multiplier` and the multipliers calculated for this year.
"""
logger.debug('start: calculating supply and demand price adjustment')
# copy alternatives so we don't modify the user's original
alternatives = alternatives.copy()
# if alt_segmenter is a string, get the actual column for segmenting demand
if isinstance(alt_segmenter, str):
alt_segmenter = alternatives[alt_segmenter]
elif isinstance(alt_segmenter, np.array):
alt_segmenter = pd.Series(alt_segmenter, index=alternatives.index)
choosers, alternatives = lcm.apply_predict_filters(choosers, alternatives)
alt_segmenter = alt_segmenter.loc[alternatives.index]
# check base ratio and apply it to prices if given
if base_multiplier is not None:
bm = base_multiplier.loc[alt_segmenter]
bm.index = alt_segmenter.index
alternatives[price_col] = alternatives[price_col] * bm
base_multiplier = base_multiplier.copy()
for _ in range(iterations):
alts_muliplier, submarkets_multiplier, finished = _calculate_adjustment(
lcm, choosers, alternatives, alt_segmenter,
clip_change_low, clip_change_high, multiplier_func=multiplier_func)
alternatives[price_col] = alternatives[price_col] * alts_muliplier
# might need to initialize this for holding cumulative multiplier
if base_multiplier is None:
base_multiplier = pd.Series(
np.ones(len(submarkets_multiplier)),
index=submarkets_multiplier.index)
base_multiplier *= submarkets_multiplier
if finished:
break
logger.debug('finish: calculating supply and demand price adjustment')
return alternatives[price_col], base_multiplier | [
"def",
"supply_and_demand",
"(",
"lcm",
",",
"choosers",
",",
"alternatives",
",",
"alt_segmenter",
",",
"price_col",
",",
"base_multiplier",
"=",
"None",
",",
"clip_change_low",
"=",
"0.75",
",",
"clip_change_high",
"=",
"1.25",
",",
"iterations",
"=",
"5",
",",
"multiplier_func",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'start: calculating supply and demand price adjustment'",
")",
"# copy alternatives so we don't modify the user's original",
"alternatives",
"=",
"alternatives",
".",
"copy",
"(",
")",
"# if alt_segmenter is a string, get the actual column for segmenting demand",
"if",
"isinstance",
"(",
"alt_segmenter",
",",
"str",
")",
":",
"alt_segmenter",
"=",
"alternatives",
"[",
"alt_segmenter",
"]",
"elif",
"isinstance",
"(",
"alt_segmenter",
",",
"np",
".",
"array",
")",
":",
"alt_segmenter",
"=",
"pd",
".",
"Series",
"(",
"alt_segmenter",
",",
"index",
"=",
"alternatives",
".",
"index",
")",
"choosers",
",",
"alternatives",
"=",
"lcm",
".",
"apply_predict_filters",
"(",
"choosers",
",",
"alternatives",
")",
"alt_segmenter",
"=",
"alt_segmenter",
".",
"loc",
"[",
"alternatives",
".",
"index",
"]",
"# check base ratio and apply it to prices if given",
"if",
"base_multiplier",
"is",
"not",
"None",
":",
"bm",
"=",
"base_multiplier",
".",
"loc",
"[",
"alt_segmenter",
"]",
"bm",
".",
"index",
"=",
"alt_segmenter",
".",
"index",
"alternatives",
"[",
"price_col",
"]",
"=",
"alternatives",
"[",
"price_col",
"]",
"*",
"bm",
"base_multiplier",
"=",
"base_multiplier",
".",
"copy",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"iterations",
")",
":",
"alts_muliplier",
",",
"submarkets_multiplier",
",",
"finished",
"=",
"_calculate_adjustment",
"(",
"lcm",
",",
"choosers",
",",
"alternatives",
",",
"alt_segmenter",
",",
"clip_change_low",
",",
"clip_change_high",
",",
"multiplier_func",
"=",
"multiplier_func",
")",
"alternatives",
"[",
"price_col",
"]",
"=",
"alternatives",
"[",
"price_col",
"]",
"*",
"alts_muliplier",
"# might need to initialize this for holding cumulative multiplier",
"if",
"base_multiplier",
"is",
"None",
":",
"base_multiplier",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"ones",
"(",
"len",
"(",
"submarkets_multiplier",
")",
")",
",",
"index",
"=",
"submarkets_multiplier",
".",
"index",
")",
"base_multiplier",
"*=",
"submarkets_multiplier",
"if",
"finished",
":",
"break",
"logger",
".",
"debug",
"(",
"'finish: calculating supply and demand price adjustment'",
")",
"return",
"alternatives",
"[",
"price_col",
"]",
",",
"base_multiplier"
] | Adjust real estate prices to compensate for supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : str, array, or pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
If a string, it is expected to be the name of a column
in `alternatives`. If a Series it should have the same index
as `alternatives`.
price_col : str
The name of the column in `alternatives` that corresponds to price.
This column is what is adjusted by this model.
base_multiplier : pandas.Series, optional
A series describing a starting multiplier for submarket prices.
Index should be submarket IDs.
clip_change_low : float, optional
The minimum amount by which to multiply prices each iteration.
clip_change_high : float, optional
The maximum amount by which to multiply prices each iteration.
iterations : int, optional
Number of times to update prices based on supply/demand comparisons.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
new_prices : pandas.Series
Equivalent of the `price_col` in `alternatives`.
submarkets_ratios : pandas.Series
Price adjustment ratio for each submarket. If `base_multiplier` is
given this will be a cummulative multiplier including the
`base_multiplier` and the multipliers calculated for this year. | [
"Adjust",
"real",
"estate",
"prices",
"to",
"compensate",
"for",
"supply",
"and",
"demand",
"effects",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/supplydemand.py#L84-L173 | train | 235,870 |
UDST/urbansim | urbansim/developer/developer.py | Developer._max_form | def _max_form(f, colname):
"""
Assumes dataframe with hierarchical columns with first index equal to the
use and second index equal to the attribute.
e.g. f.columns equal to::
mixedoffice building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
industrial building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
"""
df = f.stack(level=0)[[colname]].stack().unstack(level=1).reset_index(level=1, drop=True)
return df.idxmax(axis=1) | python | def _max_form(f, colname):
"""
Assumes dataframe with hierarchical columns with first index equal to the
use and second index equal to the attribute.
e.g. f.columns equal to::
mixedoffice building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
industrial building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
"""
df = f.stack(level=0)[[colname]].stack().unstack(level=1).reset_index(level=1, drop=True)
return df.idxmax(axis=1) | [
"def",
"_max_form",
"(",
"f",
",",
"colname",
")",
":",
"df",
"=",
"f",
".",
"stack",
"(",
"level",
"=",
"0",
")",
"[",
"[",
"colname",
"]",
"]",
".",
"stack",
"(",
")",
".",
"unstack",
"(",
"level",
"=",
"1",
")",
".",
"reset_index",
"(",
"level",
"=",
"1",
",",
"drop",
"=",
"True",
")",
"return",
"df",
".",
"idxmax",
"(",
"axis",
"=",
"1",
")"
] | Assumes dataframe with hierarchical columns with first index equal to the
use and second index equal to the attribute.
e.g. f.columns equal to::
mixedoffice building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
industrial building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost | [
"Assumes",
"dataframe",
"with",
"hierarchical",
"columns",
"with",
"first",
"index",
"equal",
"to",
"the",
"use",
"and",
"second",
"index",
"equal",
"to",
"the",
"attribute",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/developer.py#L23-L44 | train | 235,871 |
UDST/urbansim | urbansim/developer/developer.py | Developer.keep_form_with_max_profit | def keep_form_with_max_profit(self, forms=None):
"""
This converts the dataframe, which shows all profitable forms,
to the form with the greatest profit, so that more profitable
forms outcompete less profitable forms.
Parameters
----------
forms: list of strings
List of forms which compete which other. Can leave some out.
Returns
-------
Nothing. Goes from a multi-index to a single index with only the
most profitable form.
"""
f = self.feasibility
if forms is not None:
f = f[forms]
if len(f) > 0:
mu = self._max_form(f, "max_profit")
indexes = [tuple(x) for x in mu.reset_index().values]
else:
indexes = []
df = f.stack(level=0).loc[indexes]
df.index.names = ["parcel_id", "form"]
df = df.reset_index(level=1)
return df | python | def keep_form_with_max_profit(self, forms=None):
"""
This converts the dataframe, which shows all profitable forms,
to the form with the greatest profit, so that more profitable
forms outcompete less profitable forms.
Parameters
----------
forms: list of strings
List of forms which compete which other. Can leave some out.
Returns
-------
Nothing. Goes from a multi-index to a single index with only the
most profitable form.
"""
f = self.feasibility
if forms is not None:
f = f[forms]
if len(f) > 0:
mu = self._max_form(f, "max_profit")
indexes = [tuple(x) for x in mu.reset_index().values]
else:
indexes = []
df = f.stack(level=0).loc[indexes]
df.index.names = ["parcel_id", "form"]
df = df.reset_index(level=1)
return df | [
"def",
"keep_form_with_max_profit",
"(",
"self",
",",
"forms",
"=",
"None",
")",
":",
"f",
"=",
"self",
".",
"feasibility",
"if",
"forms",
"is",
"not",
"None",
":",
"f",
"=",
"f",
"[",
"forms",
"]",
"if",
"len",
"(",
"f",
")",
">",
"0",
":",
"mu",
"=",
"self",
".",
"_max_form",
"(",
"f",
",",
"\"max_profit\"",
")",
"indexes",
"=",
"[",
"tuple",
"(",
"x",
")",
"for",
"x",
"in",
"mu",
".",
"reset_index",
"(",
")",
".",
"values",
"]",
"else",
":",
"indexes",
"=",
"[",
"]",
"df",
"=",
"f",
".",
"stack",
"(",
"level",
"=",
"0",
")",
".",
"loc",
"[",
"indexes",
"]",
"df",
".",
"index",
".",
"names",
"=",
"[",
"\"parcel_id\"",
",",
"\"form\"",
"]",
"df",
"=",
"df",
".",
"reset_index",
"(",
"level",
"=",
"1",
")",
"return",
"df"
] | This converts the dataframe, which shows all profitable forms,
to the form with the greatest profit, so that more profitable
forms outcompete less profitable forms.
Parameters
----------
forms: list of strings
List of forms which compete which other. Can leave some out.
Returns
-------
Nothing. Goes from a multi-index to a single index with only the
most profitable form. | [
"This",
"converts",
"the",
"dataframe",
"which",
"shows",
"all",
"profitable",
"forms",
"to",
"the",
"form",
"with",
"the",
"greatest",
"profit",
"so",
"that",
"more",
"profitable",
"forms",
"outcompete",
"less",
"profitable",
"forms",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/developer.py#L46-L75 | train | 235,872 |
UDST/urbansim | urbansim/developer/developer.py | Developer.compute_units_to_build | def compute_units_to_build(num_agents, num_units, target_vacancy):
"""
Compute number of units to build to match target vacancy.
Parameters
----------
num_agents : int
number of agents that need units in the region
num_units : int
number of units in buildings
target_vacancy : float (0-1.0)
target vacancy rate
Returns
-------
number_of_units : int
the number of units that need to be built
"""
print("Number of agents: {:,}".format(num_agents))
print("Number of agent spaces: {:,}".format(int(num_units)))
assert target_vacancy < 1.0
target_units = int(max(num_agents / (1 - target_vacancy) - num_units, 0))
print("Current vacancy = {:.2f}"
.format(1 - num_agents / float(num_units)))
print("Target vacancy = {:.2f}, target of new units = {:,}"
.format(target_vacancy, target_units))
return target_units | python | def compute_units_to_build(num_agents, num_units, target_vacancy):
"""
Compute number of units to build to match target vacancy.
Parameters
----------
num_agents : int
number of agents that need units in the region
num_units : int
number of units in buildings
target_vacancy : float (0-1.0)
target vacancy rate
Returns
-------
number_of_units : int
the number of units that need to be built
"""
print("Number of agents: {:,}".format(num_agents))
print("Number of agent spaces: {:,}".format(int(num_units)))
assert target_vacancy < 1.0
target_units = int(max(num_agents / (1 - target_vacancy) - num_units, 0))
print("Current vacancy = {:.2f}"
.format(1 - num_agents / float(num_units)))
print("Target vacancy = {:.2f}, target of new units = {:,}"
.format(target_vacancy, target_units))
return target_units | [
"def",
"compute_units_to_build",
"(",
"num_agents",
",",
"num_units",
",",
"target_vacancy",
")",
":",
"print",
"(",
"\"Number of agents: {:,}\"",
".",
"format",
"(",
"num_agents",
")",
")",
"print",
"(",
"\"Number of agent spaces: {:,}\"",
".",
"format",
"(",
"int",
"(",
"num_units",
")",
")",
")",
"assert",
"target_vacancy",
"<",
"1.0",
"target_units",
"=",
"int",
"(",
"max",
"(",
"num_agents",
"/",
"(",
"1",
"-",
"target_vacancy",
")",
"-",
"num_units",
",",
"0",
")",
")",
"print",
"(",
"\"Current vacancy = {:.2f}\"",
".",
"format",
"(",
"1",
"-",
"num_agents",
"/",
"float",
"(",
"num_units",
")",
")",
")",
"print",
"(",
"\"Target vacancy = {:.2f}, target of new units = {:,}\"",
".",
"format",
"(",
"target_vacancy",
",",
"target_units",
")",
")",
"return",
"target_units"
] | Compute number of units to build to match target vacancy.
Parameters
----------
num_agents : int
number of agents that need units in the region
num_units : int
number of units in buildings
target_vacancy : float (0-1.0)
target vacancy rate
Returns
-------
number_of_units : int
the number of units that need to be built | [
"Compute",
"number",
"of",
"units",
"to",
"build",
"to",
"match",
"target",
"vacancy",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/developer.py#L78-L104 | train | 235,873 |
UDST/urbansim | urbansim/developer/developer.py | Developer.pick | def pick(self, form, target_units, parcel_size, ave_unit_size,
current_units, max_parcel_size=200000, min_unit_size=400,
drop_after_build=True, residential=True, bldg_sqft_per_job=400.0,
profit_to_prob_func=None):
"""
Choose the buildings from the list that are feasible to build in
order to match the specified demand.
Parameters
----------
form : string or list
One or more of the building forms from the pro forma specification -
e.g. "residential" or "mixedresidential" - these are configuration
parameters passed previously to the pro forma. If more than one form
is passed the forms compete with each other (based on profitability)
for which one gets built in order to meet demand.
target_units : int
The number of units to build. For non-residential buildings this
should be passed as the number of job spaces that need to be created.
parcel_size : series
The size of the parcels. This was passed to feasibility as well,
but should be passed here as well. Index should be parcel_ids.
ave_unit_size : series
The average residential unit size around each parcel - this is
indexed by parcel, but is usually a disaggregated version of a
zonal or accessibility aggregation.
bldg_sqft_per_job : float (default 400.0)
The average square feet per job for this building form.
min_unit_size : float
Values less than this number in ave_unit_size will be set to this
number. Deals with cases where units are currently not built.
current_units : series
The current number of units on the parcel. Is used to compute the
net number of units produced by the developer model. Many times
the developer model is redeveloping units (demolishing them) and
is trying to meet a total number of net units produced.
max_parcel_size : float
Parcels larger than this size will not be considered for
development - usually large parcels should be specified manually
in a development projects table.
drop_after_build : bool
Whether or not to drop parcels from consideration after they
have been chosen for development. Usually this is true so as
to not develop the same parcel twice.
residential: bool
If creating non-residential buildings set this to false and
developer will fill in job_spaces rather than residential_units
profit_to_prob_func: function
As there are so many ways to turn the development feasibility
into a probability to select it for building, the user may pass
a function which takes the feasibility dataframe and returns
a series of probabilities. If no function is passed, the behavior
of this method will not change
Returns
-------
None if thar are no feasible buildings
new_buildings : dataframe
DataFrame of buildings to add. These buildings are rows from the
DataFrame that is returned from feasibility.
"""
if len(self.feasibility) == 0:
# no feasible buildings, might as well bail
return
if form is None:
df = self.feasibility
elif isinstance(form, list):
df = self.keep_form_with_max_profit(form)
else:
df = self.feasibility[form]
# feasible buildings only for this building type
df = df[df.max_profit_far > 0]
ave_unit_size[ave_unit_size < min_unit_size] = min_unit_size
df["ave_unit_size"] = ave_unit_size
df["parcel_size"] = parcel_size
df['current_units'] = current_units
df = df[df.parcel_size < max_parcel_size]
df['residential_units'] = (df.residential_sqft / df.ave_unit_size).round()
df['job_spaces'] = (df.non_residential_sqft / bldg_sqft_per_job).round()
if residential:
df['net_units'] = df.residential_units - df.current_units
else:
df['net_units'] = df.job_spaces - df.current_units
df = df[df.net_units > 0]
if len(df) == 0:
print("WARNING THERE ARE NO FEASIBLE BUILDING TO CHOOSE FROM")
return
# print "Describe of net units\n", df.net_units.describe()
print("Sum of net units that are profitable: {:,}"
.format(int(df.net_units.sum())))
if profit_to_prob_func:
p = profit_to_prob_func(df)
else:
df['max_profit_per_size'] = df.max_profit / df.parcel_size
p = df.max_profit_per_size.values / df.max_profit_per_size.sum()
if df.net_units.sum() < target_units:
print("WARNING THERE WERE NOT ENOUGH PROFITABLE UNITS TO",
"MATCH DEMAND")
build_idx = df.index.values
elif target_units <= 0:
build_idx = []
else:
# we don't know how many developments we will need, as they differ in net_units.
# If all developments have net_units of 1 than we need target_units of them.
# So we choose the smaller of available developments and target_units.
choices = np.random.choice(df.index.values, size=min(len(df.index), target_units),
replace=False, p=p)
tot_units = df.net_units.loc[choices].values.cumsum()
ind = int(np.searchsorted(tot_units, target_units, side="left")) + 1
build_idx = choices[:ind]
if drop_after_build:
self.feasibility = self.feasibility.drop(build_idx)
new_df = df.loc[build_idx]
new_df.index.name = "parcel_id"
return new_df.reset_index() | python | def pick(self, form, target_units, parcel_size, ave_unit_size,
current_units, max_parcel_size=200000, min_unit_size=400,
drop_after_build=True, residential=True, bldg_sqft_per_job=400.0,
profit_to_prob_func=None):
"""
Choose the buildings from the list that are feasible to build in
order to match the specified demand.
Parameters
----------
form : string or list
One or more of the building forms from the pro forma specification -
e.g. "residential" or "mixedresidential" - these are configuration
parameters passed previously to the pro forma. If more than one form
is passed the forms compete with each other (based on profitability)
for which one gets built in order to meet demand.
target_units : int
The number of units to build. For non-residential buildings this
should be passed as the number of job spaces that need to be created.
parcel_size : series
The size of the parcels. This was passed to feasibility as well,
but should be passed here as well. Index should be parcel_ids.
ave_unit_size : series
The average residential unit size around each parcel - this is
indexed by parcel, but is usually a disaggregated version of a
zonal or accessibility aggregation.
bldg_sqft_per_job : float (default 400.0)
The average square feet per job for this building form.
min_unit_size : float
Values less than this number in ave_unit_size will be set to this
number. Deals with cases where units are currently not built.
current_units : series
The current number of units on the parcel. Is used to compute the
net number of units produced by the developer model. Many times
the developer model is redeveloping units (demolishing them) and
is trying to meet a total number of net units produced.
max_parcel_size : float
Parcels larger than this size will not be considered for
development - usually large parcels should be specified manually
in a development projects table.
drop_after_build : bool
Whether or not to drop parcels from consideration after they
have been chosen for development. Usually this is true so as
to not develop the same parcel twice.
residential: bool
If creating non-residential buildings set this to false and
developer will fill in job_spaces rather than residential_units
profit_to_prob_func: function
As there are so many ways to turn the development feasibility
into a probability to select it for building, the user may pass
a function which takes the feasibility dataframe and returns
a series of probabilities. If no function is passed, the behavior
of this method will not change
Returns
-------
None if thar are no feasible buildings
new_buildings : dataframe
DataFrame of buildings to add. These buildings are rows from the
DataFrame that is returned from feasibility.
"""
if len(self.feasibility) == 0:
# no feasible buildings, might as well bail
return
if form is None:
df = self.feasibility
elif isinstance(form, list):
df = self.keep_form_with_max_profit(form)
else:
df = self.feasibility[form]
# feasible buildings only for this building type
df = df[df.max_profit_far > 0]
ave_unit_size[ave_unit_size < min_unit_size] = min_unit_size
df["ave_unit_size"] = ave_unit_size
df["parcel_size"] = parcel_size
df['current_units'] = current_units
df = df[df.parcel_size < max_parcel_size]
df['residential_units'] = (df.residential_sqft / df.ave_unit_size).round()
df['job_spaces'] = (df.non_residential_sqft / bldg_sqft_per_job).round()
if residential:
df['net_units'] = df.residential_units - df.current_units
else:
df['net_units'] = df.job_spaces - df.current_units
df = df[df.net_units > 0]
if len(df) == 0:
print("WARNING THERE ARE NO FEASIBLE BUILDING TO CHOOSE FROM")
return
# print "Describe of net units\n", df.net_units.describe()
print("Sum of net units that are profitable: {:,}"
.format(int(df.net_units.sum())))
if profit_to_prob_func:
p = profit_to_prob_func(df)
else:
df['max_profit_per_size'] = df.max_profit / df.parcel_size
p = df.max_profit_per_size.values / df.max_profit_per_size.sum()
if df.net_units.sum() < target_units:
print("WARNING THERE WERE NOT ENOUGH PROFITABLE UNITS TO",
"MATCH DEMAND")
build_idx = df.index.values
elif target_units <= 0:
build_idx = []
else:
# we don't know how many developments we will need, as they differ in net_units.
# If all developments have net_units of 1 than we need target_units of them.
# So we choose the smaller of available developments and target_units.
choices = np.random.choice(df.index.values, size=min(len(df.index), target_units),
replace=False, p=p)
tot_units = df.net_units.loc[choices].values.cumsum()
ind = int(np.searchsorted(tot_units, target_units, side="left")) + 1
build_idx = choices[:ind]
if drop_after_build:
self.feasibility = self.feasibility.drop(build_idx)
new_df = df.loc[build_idx]
new_df.index.name = "parcel_id"
return new_df.reset_index() | [
"def",
"pick",
"(",
"self",
",",
"form",
",",
"target_units",
",",
"parcel_size",
",",
"ave_unit_size",
",",
"current_units",
",",
"max_parcel_size",
"=",
"200000",
",",
"min_unit_size",
"=",
"400",
",",
"drop_after_build",
"=",
"True",
",",
"residential",
"=",
"True",
",",
"bldg_sqft_per_job",
"=",
"400.0",
",",
"profit_to_prob_func",
"=",
"None",
")",
":",
"if",
"len",
"(",
"self",
".",
"feasibility",
")",
"==",
"0",
":",
"# no feasible buildings, might as well bail",
"return",
"if",
"form",
"is",
"None",
":",
"df",
"=",
"self",
".",
"feasibility",
"elif",
"isinstance",
"(",
"form",
",",
"list",
")",
":",
"df",
"=",
"self",
".",
"keep_form_with_max_profit",
"(",
"form",
")",
"else",
":",
"df",
"=",
"self",
".",
"feasibility",
"[",
"form",
"]",
"# feasible buildings only for this building type",
"df",
"=",
"df",
"[",
"df",
".",
"max_profit_far",
">",
"0",
"]",
"ave_unit_size",
"[",
"ave_unit_size",
"<",
"min_unit_size",
"]",
"=",
"min_unit_size",
"df",
"[",
"\"ave_unit_size\"",
"]",
"=",
"ave_unit_size",
"df",
"[",
"\"parcel_size\"",
"]",
"=",
"parcel_size",
"df",
"[",
"'current_units'",
"]",
"=",
"current_units",
"df",
"=",
"df",
"[",
"df",
".",
"parcel_size",
"<",
"max_parcel_size",
"]",
"df",
"[",
"'residential_units'",
"]",
"=",
"(",
"df",
".",
"residential_sqft",
"/",
"df",
".",
"ave_unit_size",
")",
".",
"round",
"(",
")",
"df",
"[",
"'job_spaces'",
"]",
"=",
"(",
"df",
".",
"non_residential_sqft",
"/",
"bldg_sqft_per_job",
")",
".",
"round",
"(",
")",
"if",
"residential",
":",
"df",
"[",
"'net_units'",
"]",
"=",
"df",
".",
"residential_units",
"-",
"df",
".",
"current_units",
"else",
":",
"df",
"[",
"'net_units'",
"]",
"=",
"df",
".",
"job_spaces",
"-",
"df",
".",
"current_units",
"df",
"=",
"df",
"[",
"df",
".",
"net_units",
">",
"0",
"]",
"if",
"len",
"(",
"df",
")",
"==",
"0",
":",
"print",
"(",
"\"WARNING THERE ARE NO FEASIBLE BUILDING TO CHOOSE FROM\"",
")",
"return",
"# print \"Describe of net units\\n\", df.net_units.describe()",
"print",
"(",
"\"Sum of net units that are profitable: {:,}\"",
".",
"format",
"(",
"int",
"(",
"df",
".",
"net_units",
".",
"sum",
"(",
")",
")",
")",
")",
"if",
"profit_to_prob_func",
":",
"p",
"=",
"profit_to_prob_func",
"(",
"df",
")",
"else",
":",
"df",
"[",
"'max_profit_per_size'",
"]",
"=",
"df",
".",
"max_profit",
"/",
"df",
".",
"parcel_size",
"p",
"=",
"df",
".",
"max_profit_per_size",
".",
"values",
"/",
"df",
".",
"max_profit_per_size",
".",
"sum",
"(",
")",
"if",
"df",
".",
"net_units",
".",
"sum",
"(",
")",
"<",
"target_units",
":",
"print",
"(",
"\"WARNING THERE WERE NOT ENOUGH PROFITABLE UNITS TO\"",
",",
"\"MATCH DEMAND\"",
")",
"build_idx",
"=",
"df",
".",
"index",
".",
"values",
"elif",
"target_units",
"<=",
"0",
":",
"build_idx",
"=",
"[",
"]",
"else",
":",
"# we don't know how many developments we will need, as they differ in net_units.",
"# If all developments have net_units of 1 than we need target_units of them.",
"# So we choose the smaller of available developments and target_units.",
"choices",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"df",
".",
"index",
".",
"values",
",",
"size",
"=",
"min",
"(",
"len",
"(",
"df",
".",
"index",
")",
",",
"target_units",
")",
",",
"replace",
"=",
"False",
",",
"p",
"=",
"p",
")",
"tot_units",
"=",
"df",
".",
"net_units",
".",
"loc",
"[",
"choices",
"]",
".",
"values",
".",
"cumsum",
"(",
")",
"ind",
"=",
"int",
"(",
"np",
".",
"searchsorted",
"(",
"tot_units",
",",
"target_units",
",",
"side",
"=",
"\"left\"",
")",
")",
"+",
"1",
"build_idx",
"=",
"choices",
"[",
":",
"ind",
"]",
"if",
"drop_after_build",
":",
"self",
".",
"feasibility",
"=",
"self",
".",
"feasibility",
".",
"drop",
"(",
"build_idx",
")",
"new_df",
"=",
"df",
".",
"loc",
"[",
"build_idx",
"]",
"new_df",
".",
"index",
".",
"name",
"=",
"\"parcel_id\"",
"return",
"new_df",
".",
"reset_index",
"(",
")"
] | Choose the buildings from the list that are feasible to build in
order to match the specified demand.
Parameters
----------
form : string or list
One or more of the building forms from the pro forma specification -
e.g. "residential" or "mixedresidential" - these are configuration
parameters passed previously to the pro forma. If more than one form
is passed the forms compete with each other (based on profitability)
for which one gets built in order to meet demand.
target_units : int
The number of units to build. For non-residential buildings this
should be passed as the number of job spaces that need to be created.
parcel_size : series
The size of the parcels. This was passed to feasibility as well,
but should be passed here as well. Index should be parcel_ids.
ave_unit_size : series
The average residential unit size around each parcel - this is
indexed by parcel, but is usually a disaggregated version of a
zonal or accessibility aggregation.
bldg_sqft_per_job : float (default 400.0)
The average square feet per job for this building form.
min_unit_size : float
Values less than this number in ave_unit_size will be set to this
number. Deals with cases where units are currently not built.
current_units : series
The current number of units on the parcel. Is used to compute the
net number of units produced by the developer model. Many times
the developer model is redeveloping units (demolishing them) and
is trying to meet a total number of net units produced.
max_parcel_size : float
Parcels larger than this size will not be considered for
development - usually large parcels should be specified manually
in a development projects table.
drop_after_build : bool
Whether or not to drop parcels from consideration after they
have been chosen for development. Usually this is true so as
to not develop the same parcel twice.
residential: bool
If creating non-residential buildings set this to false and
developer will fill in job_spaces rather than residential_units
profit_to_prob_func: function
As there are so many ways to turn the development feasibility
into a probability to select it for building, the user may pass
a function which takes the feasibility dataframe and returns
a series of probabilities. If no function is passed, the behavior
of this method will not change
Returns
-------
None if thar are no feasible buildings
new_buildings : dataframe
DataFrame of buildings to add. These buildings are rows from the
DataFrame that is returned from feasibility. | [
"Choose",
"the",
"buildings",
"from",
"the",
"list",
"that",
"are",
"feasible",
"to",
"build",
"in",
"order",
"to",
"match",
"the",
"specified",
"demand",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/developer.py#L106-L231 | train | 235,874 |
linkedin/luminol | src/luminol/__init__.py | Luminol._analyze_root_causes | def _analyze_root_causes(self):
"""
Conduct root cause analysis.
The first metric of the list is taken as the root cause right now.
"""
causes = {}
for a in self.anomalies:
try:
causes[a] = self.correlations[a][0]
except IndexError:
raise exceptions.InvalidDataFormat('luminol.luminol: dict correlations contains empty list.')
self.causes = causes | python | def _analyze_root_causes(self):
"""
Conduct root cause analysis.
The first metric of the list is taken as the root cause right now.
"""
causes = {}
for a in self.anomalies:
try:
causes[a] = self.correlations[a][0]
except IndexError:
raise exceptions.InvalidDataFormat('luminol.luminol: dict correlations contains empty list.')
self.causes = causes | [
"def",
"_analyze_root_causes",
"(",
"self",
")",
":",
"causes",
"=",
"{",
"}",
"for",
"a",
"in",
"self",
".",
"anomalies",
":",
"try",
":",
"causes",
"[",
"a",
"]",
"=",
"self",
".",
"correlations",
"[",
"a",
"]",
"[",
"0",
"]",
"except",
"IndexError",
":",
"raise",
"exceptions",
".",
"InvalidDataFormat",
"(",
"'luminol.luminol: dict correlations contains empty list.'",
")",
"self",
".",
"causes",
"=",
"causes"
] | Conduct root cause analysis.
The first metric of the list is taken as the root cause right now. | [
"Conduct",
"root",
"cause",
"analysis",
".",
"The",
"first",
"metric",
"of",
"the",
"list",
"is",
"taken",
"as",
"the",
"root",
"cause",
"right",
"now",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/__init__.py#L32-L43 | train | 235,875 |
linkedin/luminol | src/luminol/correlator.py | Correlator._sanity_check | def _sanity_check(self):
"""
Check if the time series have more than two data points.
"""
if len(self.time_series_a) < 2 or len(self.time_series_b) < 2:
raise exceptions.NotEnoughDataPoints('luminol.Correlator: Too few data points!') | python | def _sanity_check(self):
"""
Check if the time series have more than two data points.
"""
if len(self.time_series_a) < 2 or len(self.time_series_b) < 2:
raise exceptions.NotEnoughDataPoints('luminol.Correlator: Too few data points!') | [
"def",
"_sanity_check",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"time_series_a",
")",
"<",
"2",
"or",
"len",
"(",
"self",
".",
"time_series_b",
")",
"<",
"2",
":",
"raise",
"exceptions",
".",
"NotEnoughDataPoints",
"(",
"'luminol.Correlator: Too few data points!'",
")"
] | Check if the time series have more than two data points. | [
"Check",
"if",
"the",
"time",
"series",
"have",
"more",
"than",
"two",
"data",
"points",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/correlator.py#L92-L97 | train | 235,876 |
linkedin/luminol | src/luminol/correlator.py | Correlator._correlate | def _correlate(self):
"""
Run correlation algorithm.
"""
a = self.algorithm(**self.algorithm_params)
self.correlation_result = a.run() | python | def _correlate(self):
"""
Run correlation algorithm.
"""
a = self.algorithm(**self.algorithm_params)
self.correlation_result = a.run() | [
"def",
"_correlate",
"(",
"self",
")",
":",
"a",
"=",
"self",
".",
"algorithm",
"(",
"*",
"*",
"self",
".",
"algorithm_params",
")",
"self",
".",
"correlation_result",
"=",
"a",
".",
"run",
"(",
")"
] | Run correlation algorithm. | [
"Run",
"correlation",
"algorithm",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/correlator.py#L99-L104 | train | 235,877 |
linkedin/luminol | demo/src/rca.py | RCA._analyze | def _analyze(self):
"""
Analyzes if a matrix has anomalies.
If any anomaly is found, determine if the matrix correlates with any other matrixes.
To be implemented.
"""
output = defaultdict(list)
output_by_name = defaultdict(list)
scores = self.anomaly_detector.get_all_scores()
if self.anomalies:
for anomaly in self.anomalies:
metrix_scores = scores
start_t, end_t = anomaly.get_time_window()
t = anomaly.exact_timestamp
# Compute extended start timestamp and extended end timestamp.
room = (end_t - start_t) / 2
if not room:
room = 30
extended_start_t = start_t - room
extended_end_t = end_t + room
metrix_scores_cropped = metrix_scores.crop(extended_start_t, extended_end_t)
# Adjust the two timestamps if not enough data points are included.
while len(metrix_scores_cropped) < 2:
extended_start_t = extended_start_t - room
extended_end_t = extended_end_t + room
metrix_scores_cropped = metrix_scores.crop(extended_start_t, extended_end_t)
# Correlate with other metrics
for entry in self.related_metrices:
try:
entry_correlation_result = Correlator(self.metrix, entry, time_period=(extended_start_t, extended_end_t),
use_anomaly_score=True).get_correlation_result()
record = extended_start_t, extended_end_t, entry_correlation_result.__dict__, entry
record_by_name = extended_start_t, extended_end_t, entry_correlation_result.__dict__
output[t].append(record)
output_by_name[entry].append(record_by_name)
except exceptions.NotEnoughDataPoints:
pass
self.output = output
self.output_by_name = output_by_name | python | def _analyze(self):
"""
Analyzes if a matrix has anomalies.
If any anomaly is found, determine if the matrix correlates with any other matrixes.
To be implemented.
"""
output = defaultdict(list)
output_by_name = defaultdict(list)
scores = self.anomaly_detector.get_all_scores()
if self.anomalies:
for anomaly in self.anomalies:
metrix_scores = scores
start_t, end_t = anomaly.get_time_window()
t = anomaly.exact_timestamp
# Compute extended start timestamp and extended end timestamp.
room = (end_t - start_t) / 2
if not room:
room = 30
extended_start_t = start_t - room
extended_end_t = end_t + room
metrix_scores_cropped = metrix_scores.crop(extended_start_t, extended_end_t)
# Adjust the two timestamps if not enough data points are included.
while len(metrix_scores_cropped) < 2:
extended_start_t = extended_start_t - room
extended_end_t = extended_end_t + room
metrix_scores_cropped = metrix_scores.crop(extended_start_t, extended_end_t)
# Correlate with other metrics
for entry in self.related_metrices:
try:
entry_correlation_result = Correlator(self.metrix, entry, time_period=(extended_start_t, extended_end_t),
use_anomaly_score=True).get_correlation_result()
record = extended_start_t, extended_end_t, entry_correlation_result.__dict__, entry
record_by_name = extended_start_t, extended_end_t, entry_correlation_result.__dict__
output[t].append(record)
output_by_name[entry].append(record_by_name)
except exceptions.NotEnoughDataPoints:
pass
self.output = output
self.output_by_name = output_by_name | [
"def",
"_analyze",
"(",
"self",
")",
":",
"output",
"=",
"defaultdict",
"(",
"list",
")",
"output_by_name",
"=",
"defaultdict",
"(",
"list",
")",
"scores",
"=",
"self",
".",
"anomaly_detector",
".",
"get_all_scores",
"(",
")",
"if",
"self",
".",
"anomalies",
":",
"for",
"anomaly",
"in",
"self",
".",
"anomalies",
":",
"metrix_scores",
"=",
"scores",
"start_t",
",",
"end_t",
"=",
"anomaly",
".",
"get_time_window",
"(",
")",
"t",
"=",
"anomaly",
".",
"exact_timestamp",
"# Compute extended start timestamp and extended end timestamp.",
"room",
"=",
"(",
"end_t",
"-",
"start_t",
")",
"/",
"2",
"if",
"not",
"room",
":",
"room",
"=",
"30",
"extended_start_t",
"=",
"start_t",
"-",
"room",
"extended_end_t",
"=",
"end_t",
"+",
"room",
"metrix_scores_cropped",
"=",
"metrix_scores",
".",
"crop",
"(",
"extended_start_t",
",",
"extended_end_t",
")",
"# Adjust the two timestamps if not enough data points are included.",
"while",
"len",
"(",
"metrix_scores_cropped",
")",
"<",
"2",
":",
"extended_start_t",
"=",
"extended_start_t",
"-",
"room",
"extended_end_t",
"=",
"extended_end_t",
"+",
"room",
"metrix_scores_cropped",
"=",
"metrix_scores",
".",
"crop",
"(",
"extended_start_t",
",",
"extended_end_t",
")",
"# Correlate with other metrics",
"for",
"entry",
"in",
"self",
".",
"related_metrices",
":",
"try",
":",
"entry_correlation_result",
"=",
"Correlator",
"(",
"self",
".",
"metrix",
",",
"entry",
",",
"time_period",
"=",
"(",
"extended_start_t",
",",
"extended_end_t",
")",
",",
"use_anomaly_score",
"=",
"True",
")",
".",
"get_correlation_result",
"(",
")",
"record",
"=",
"extended_start_t",
",",
"extended_end_t",
",",
"entry_correlation_result",
".",
"__dict__",
",",
"entry",
"record_by_name",
"=",
"extended_start_t",
",",
"extended_end_t",
",",
"entry_correlation_result",
".",
"__dict__",
"output",
"[",
"t",
"]",
".",
"append",
"(",
"record",
")",
"output_by_name",
"[",
"entry",
"]",
".",
"append",
"(",
"record_by_name",
")",
"except",
"exceptions",
".",
"NotEnoughDataPoints",
":",
"pass",
"self",
".",
"output",
"=",
"output",
"self",
".",
"output_by_name",
"=",
"output_by_name"
] | Analyzes if a matrix has anomalies.
If any anomaly is found, determine if the matrix correlates with any other matrixes.
To be implemented. | [
"Analyzes",
"if",
"a",
"matrix",
"has",
"anomalies",
".",
"If",
"any",
"anomaly",
"is",
"found",
"determine",
"if",
"the",
"matrix",
"correlates",
"with",
"any",
"other",
"matrixes",
".",
"To",
"be",
"implemented",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/demo/src/rca.py#L49-L92 | train | 235,878 |
linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/default_detector.py | DefaultDetector._set_scores | def _set_scores(self):
"""
Set anomaly scores using a weighted sum.
"""
anom_scores_ema = self.exp_avg_detector.run()
anom_scores_deri = self.derivative_detector.run()
anom_scores = {}
for timestamp in anom_scores_ema.timestamps:
# Compute a weighted anomaly score.
anom_scores[timestamp] = max(anom_scores_ema[timestamp],
anom_scores_ema[timestamp] * DEFAULT_DETECTOR_EMA_WEIGHT + anom_scores_deri[timestamp] * (1 - DEFAULT_DETECTOR_EMA_WEIGHT))
# If ema score is significant enough, take the bigger one of the weighted score and deri score.
if anom_scores_ema[timestamp] > DEFAULT_DETECTOR_EMA_SIGNIFICANT:
anom_scores[timestamp] = max(anom_scores[timestamp], anom_scores_deri[timestamp])
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | python | def _set_scores(self):
"""
Set anomaly scores using a weighted sum.
"""
anom_scores_ema = self.exp_avg_detector.run()
anom_scores_deri = self.derivative_detector.run()
anom_scores = {}
for timestamp in anom_scores_ema.timestamps:
# Compute a weighted anomaly score.
anom_scores[timestamp] = max(anom_scores_ema[timestamp],
anom_scores_ema[timestamp] * DEFAULT_DETECTOR_EMA_WEIGHT + anom_scores_deri[timestamp] * (1 - DEFAULT_DETECTOR_EMA_WEIGHT))
# If ema score is significant enough, take the bigger one of the weighted score and deri score.
if anom_scores_ema[timestamp] > DEFAULT_DETECTOR_EMA_SIGNIFICANT:
anom_scores[timestamp] = max(anom_scores[timestamp], anom_scores_deri[timestamp])
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | [
"def",
"_set_scores",
"(",
"self",
")",
":",
"anom_scores_ema",
"=",
"self",
".",
"exp_avg_detector",
".",
"run",
"(",
")",
"anom_scores_deri",
"=",
"self",
".",
"derivative_detector",
".",
"run",
"(",
")",
"anom_scores",
"=",
"{",
"}",
"for",
"timestamp",
"in",
"anom_scores_ema",
".",
"timestamps",
":",
"# Compute a weighted anomaly score.",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"max",
"(",
"anom_scores_ema",
"[",
"timestamp",
"]",
",",
"anom_scores_ema",
"[",
"timestamp",
"]",
"*",
"DEFAULT_DETECTOR_EMA_WEIGHT",
"+",
"anom_scores_deri",
"[",
"timestamp",
"]",
"*",
"(",
"1",
"-",
"DEFAULT_DETECTOR_EMA_WEIGHT",
")",
")",
"# If ema score is significant enough, take the bigger one of the weighted score and deri score.",
"if",
"anom_scores_ema",
"[",
"timestamp",
"]",
">",
"DEFAULT_DETECTOR_EMA_SIGNIFICANT",
":",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"max",
"(",
"anom_scores",
"[",
"timestamp",
"]",
",",
"anom_scores_deri",
"[",
"timestamp",
"]",
")",
"self",
".",
"anom_scores",
"=",
"TimeSeries",
"(",
"self",
".",
"_denoise_scores",
"(",
"anom_scores",
")",
")"
] | Set anomaly scores using a weighted sum. | [
"Set",
"anomaly",
"scores",
"using",
"a",
"weighted",
"sum",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/default_detector.py#L35-L49 | train | 235,879 |
linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/derivative_detector.py | DerivativeDetector._compute_derivatives | def _compute_derivatives(self):
"""
Compute derivatives of the time series.
"""
derivatives = []
for i, (timestamp, value) in enumerate(self.time_series_items):
if i > 0:
pre_item = self.time_series_items[i - 1]
pre_timestamp = pre_item[0]
pre_value = pre_item[1]
td = timestamp - pre_timestamp
derivative = (value - pre_value) / td if td != 0 else value - pre_value
derivative = abs(derivative)
derivatives.append(derivative)
# First timestamp is assigned the same derivative as the second timestamp.
if derivatives:
derivatives.insert(0, derivatives[0])
self.derivatives = derivatives | python | def _compute_derivatives(self):
"""
Compute derivatives of the time series.
"""
derivatives = []
for i, (timestamp, value) in enumerate(self.time_series_items):
if i > 0:
pre_item = self.time_series_items[i - 1]
pre_timestamp = pre_item[0]
pre_value = pre_item[1]
td = timestamp - pre_timestamp
derivative = (value - pre_value) / td if td != 0 else value - pre_value
derivative = abs(derivative)
derivatives.append(derivative)
# First timestamp is assigned the same derivative as the second timestamp.
if derivatives:
derivatives.insert(0, derivatives[0])
self.derivatives = derivatives | [
"def",
"_compute_derivatives",
"(",
"self",
")",
":",
"derivatives",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"timestamp",
",",
"value",
")",
"in",
"enumerate",
"(",
"self",
".",
"time_series_items",
")",
":",
"if",
"i",
">",
"0",
":",
"pre_item",
"=",
"self",
".",
"time_series_items",
"[",
"i",
"-",
"1",
"]",
"pre_timestamp",
"=",
"pre_item",
"[",
"0",
"]",
"pre_value",
"=",
"pre_item",
"[",
"1",
"]",
"td",
"=",
"timestamp",
"-",
"pre_timestamp",
"derivative",
"=",
"(",
"value",
"-",
"pre_value",
")",
"/",
"td",
"if",
"td",
"!=",
"0",
"else",
"value",
"-",
"pre_value",
"derivative",
"=",
"abs",
"(",
"derivative",
")",
"derivatives",
".",
"append",
"(",
"derivative",
")",
"# First timestamp is assigned the same derivative as the second timestamp.",
"if",
"derivatives",
":",
"derivatives",
".",
"insert",
"(",
"0",
",",
"derivatives",
"[",
"0",
"]",
")",
"self",
".",
"derivatives",
"=",
"derivatives"
] | Compute derivatives of the time series. | [
"Compute",
"derivatives",
"of",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/derivative_detector.py#L38-L55 | train | 235,880 |
linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py | BitmapDetector._sanity_check | def _sanity_check(self):
"""
Check if there are enough data points.
"""
windows = self.lag_window_size + self.future_window_size
if (not self.lag_window_size or not self.future_window_size or self.time_series_length < windows or windows < DEFAULT_BITMAP_MINIMAL_POINTS_IN_WINDOWS):
raise exceptions.NotEnoughDataPoints
# If window size is too big, too many data points will be assigned a score of 0 in the first lag window
# and the last future window.
if self.lag_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS:
self.lag_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS
if self.future_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS:
self.future_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS | python | def _sanity_check(self):
"""
Check if there are enough data points.
"""
windows = self.lag_window_size + self.future_window_size
if (not self.lag_window_size or not self.future_window_size or self.time_series_length < windows or windows < DEFAULT_BITMAP_MINIMAL_POINTS_IN_WINDOWS):
raise exceptions.NotEnoughDataPoints
# If window size is too big, too many data points will be assigned a score of 0 in the first lag window
# and the last future window.
if self.lag_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS:
self.lag_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS
if self.future_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS:
self.future_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS | [
"def",
"_sanity_check",
"(",
"self",
")",
":",
"windows",
"=",
"self",
".",
"lag_window_size",
"+",
"self",
".",
"future_window_size",
"if",
"(",
"not",
"self",
".",
"lag_window_size",
"or",
"not",
"self",
".",
"future_window_size",
"or",
"self",
".",
"time_series_length",
"<",
"windows",
"or",
"windows",
"<",
"DEFAULT_BITMAP_MINIMAL_POINTS_IN_WINDOWS",
")",
":",
"raise",
"exceptions",
".",
"NotEnoughDataPoints",
"# If window size is too big, too many data points will be assigned a score of 0 in the first lag window",
"# and the last future window.",
"if",
"self",
".",
"lag_window_size",
">",
"DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS",
":",
"self",
".",
"lag_window_size",
"=",
"DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS",
"if",
"self",
".",
"future_window_size",
">",
"DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS",
":",
"self",
".",
"future_window_size",
"=",
"DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS"
] | Check if there are enough data points. | [
"Check",
"if",
"there",
"are",
"enough",
"data",
"points",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py#L60-L73 | train | 235,881 |
linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py | BitmapDetector._generate_SAX | def _generate_SAX(self):
"""
Generate SAX representation for all values of the time series.
"""
sections = {}
self.value_min = self.time_series.min()
self.value_max = self.time_series.max()
# Break the whole value range into different sections.
section_height = (self.value_max - self.value_min) / self.precision
for section_number in range(self.precision):
sections[section_number] = self.value_min + section_number * section_height
# Generate SAX representation.
self.sax = ''.join(self._generate_SAX_single(sections, value) for value in self.time_series.values) | python | def _generate_SAX(self):
"""
Generate SAX representation for all values of the time series.
"""
sections = {}
self.value_min = self.time_series.min()
self.value_max = self.time_series.max()
# Break the whole value range into different sections.
section_height = (self.value_max - self.value_min) / self.precision
for section_number in range(self.precision):
sections[section_number] = self.value_min + section_number * section_height
# Generate SAX representation.
self.sax = ''.join(self._generate_SAX_single(sections, value) for value in self.time_series.values) | [
"def",
"_generate_SAX",
"(",
"self",
")",
":",
"sections",
"=",
"{",
"}",
"self",
".",
"value_min",
"=",
"self",
".",
"time_series",
".",
"min",
"(",
")",
"self",
".",
"value_max",
"=",
"self",
".",
"time_series",
".",
"max",
"(",
")",
"# Break the whole value range into different sections.",
"section_height",
"=",
"(",
"self",
".",
"value_max",
"-",
"self",
".",
"value_min",
")",
"/",
"self",
".",
"precision",
"for",
"section_number",
"in",
"range",
"(",
"self",
".",
"precision",
")",
":",
"sections",
"[",
"section_number",
"]",
"=",
"self",
".",
"value_min",
"+",
"section_number",
"*",
"section_height",
"# Generate SAX representation.",
"self",
".",
"sax",
"=",
"''",
".",
"join",
"(",
"self",
".",
"_generate_SAX_single",
"(",
"sections",
",",
"value",
")",
"for",
"value",
"in",
"self",
".",
"time_series",
".",
"values",
")"
] | Generate SAX representation for all values of the time series. | [
"Generate",
"SAX",
"representation",
"for",
"all",
"values",
"of",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py#L92-L104 | train | 235,882 |
linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py | BitmapDetector._set_scores | def _set_scores(self):
"""
Compute anomaly scores for the time series by sliding both lagging window and future window.
"""
anom_scores = {}
self._generate_SAX()
self._construct_all_SAX_chunk_dict()
length = self.time_series_length
lws = self.lag_window_size
fws = self.future_window_size
for i, timestamp in enumerate(self.time_series.timestamps):
if i < lws or i > length - fws:
anom_scores[timestamp] = 0
else:
anom_scores[timestamp] = self._compute_anom_score_between_two_windows(i)
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | python | def _set_scores(self):
"""
Compute anomaly scores for the time series by sliding both lagging window and future window.
"""
anom_scores = {}
self._generate_SAX()
self._construct_all_SAX_chunk_dict()
length = self.time_series_length
lws = self.lag_window_size
fws = self.future_window_size
for i, timestamp in enumerate(self.time_series.timestamps):
if i < lws or i > length - fws:
anom_scores[timestamp] = 0
else:
anom_scores[timestamp] = self._compute_anom_score_between_two_windows(i)
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | [
"def",
"_set_scores",
"(",
"self",
")",
":",
"anom_scores",
"=",
"{",
"}",
"self",
".",
"_generate_SAX",
"(",
")",
"self",
".",
"_construct_all_SAX_chunk_dict",
"(",
")",
"length",
"=",
"self",
".",
"time_series_length",
"lws",
"=",
"self",
".",
"lag_window_size",
"fws",
"=",
"self",
".",
"future_window_size",
"for",
"i",
",",
"timestamp",
"in",
"enumerate",
"(",
"self",
".",
"time_series",
".",
"timestamps",
")",
":",
"if",
"i",
"<",
"lws",
"or",
"i",
">",
"length",
"-",
"fws",
":",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"0",
"else",
":",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"self",
".",
"_compute_anom_score_between_two_windows",
"(",
"i",
")",
"self",
".",
"anom_scores",
"=",
"TimeSeries",
"(",
"self",
".",
"_denoise_scores",
"(",
"anom_scores",
")",
")"
] | Compute anomaly scores for the time series by sliding both lagging window and future window. | [
"Compute",
"anomaly",
"scores",
"for",
"the",
"time",
"series",
"by",
"sliding",
"both",
"lagging",
"window",
"and",
"future",
"window",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py#L196-L212 | train | 235,883 |
linkedin/luminol | src/luminol/algorithms/correlator_algorithms/cross_correlator.py | CrossCorrelator._detect_correlation | def _detect_correlation(self):
"""
Detect correlation by computing correlation coefficients for all allowed shift steps,
then take the maximum.
"""
correlations = []
shifted_correlations = []
self.time_series_a.normalize()
self.time_series_b.normalize()
a, b = self.time_series_a.align(self.time_series_b)
a_values, b_values = a.values, b.values
a_avg, b_avg = a.average(), b.average()
a_stdev, b_stdev = a.stdev(), b.stdev()
n = len(a)
denom = a_stdev * b_stdev * n
# Find the maximal shift steps according to the maximal shift seconds.
allowed_shift_step = self._find_allowed_shift(a.timestamps)
if allowed_shift_step:
shift_upper_bound = allowed_shift_step
shift_lower_bound = -allowed_shift_step
else:
shift_upper_bound = 1
shift_lower_bound = 0
for delay in range(shift_lower_bound, shift_upper_bound):
delay_in_seconds = a.timestamps[abs(delay)] - a.timestamps[0]
if delay < 0:
delay_in_seconds = -delay_in_seconds
s = 0
for i in range(n):
j = i + delay
if j < 0 or j >= n:
continue
else:
s += ((a_values[i] - a_avg) * (b_values[j] - b_avg))
r = s / denom if denom != 0 else s
correlations.append([delay_in_seconds, r])
# Take shift into account to create a "shifted correlation coefficient".
if self.max_shift_milliseconds:
shifted_correlations.append(r * (1 + float(delay_in_seconds) / self.max_shift_milliseconds * self.shift_impact))
else:
shifted_correlations.append(r)
max_correlation = list(max(correlations, key=lambda k: k[1]))
max_shifted_correlation = max(shifted_correlations)
max_correlation.append(max_shifted_correlation)
self.correlation_result = CorrelationResult(*max_correlation) | python | def _detect_correlation(self):
"""
Detect correlation by computing correlation coefficients for all allowed shift steps,
then take the maximum.
"""
correlations = []
shifted_correlations = []
self.time_series_a.normalize()
self.time_series_b.normalize()
a, b = self.time_series_a.align(self.time_series_b)
a_values, b_values = a.values, b.values
a_avg, b_avg = a.average(), b.average()
a_stdev, b_stdev = a.stdev(), b.stdev()
n = len(a)
denom = a_stdev * b_stdev * n
# Find the maximal shift steps according to the maximal shift seconds.
allowed_shift_step = self._find_allowed_shift(a.timestamps)
if allowed_shift_step:
shift_upper_bound = allowed_shift_step
shift_lower_bound = -allowed_shift_step
else:
shift_upper_bound = 1
shift_lower_bound = 0
for delay in range(shift_lower_bound, shift_upper_bound):
delay_in_seconds = a.timestamps[abs(delay)] - a.timestamps[0]
if delay < 0:
delay_in_seconds = -delay_in_seconds
s = 0
for i in range(n):
j = i + delay
if j < 0 or j >= n:
continue
else:
s += ((a_values[i] - a_avg) * (b_values[j] - b_avg))
r = s / denom if denom != 0 else s
correlations.append([delay_in_seconds, r])
# Take shift into account to create a "shifted correlation coefficient".
if self.max_shift_milliseconds:
shifted_correlations.append(r * (1 + float(delay_in_seconds) / self.max_shift_milliseconds * self.shift_impact))
else:
shifted_correlations.append(r)
max_correlation = list(max(correlations, key=lambda k: k[1]))
max_shifted_correlation = max(shifted_correlations)
max_correlation.append(max_shifted_correlation)
self.correlation_result = CorrelationResult(*max_correlation) | [
"def",
"_detect_correlation",
"(",
"self",
")",
":",
"correlations",
"=",
"[",
"]",
"shifted_correlations",
"=",
"[",
"]",
"self",
".",
"time_series_a",
".",
"normalize",
"(",
")",
"self",
".",
"time_series_b",
".",
"normalize",
"(",
")",
"a",
",",
"b",
"=",
"self",
".",
"time_series_a",
".",
"align",
"(",
"self",
".",
"time_series_b",
")",
"a_values",
",",
"b_values",
"=",
"a",
".",
"values",
",",
"b",
".",
"values",
"a_avg",
",",
"b_avg",
"=",
"a",
".",
"average",
"(",
")",
",",
"b",
".",
"average",
"(",
")",
"a_stdev",
",",
"b_stdev",
"=",
"a",
".",
"stdev",
"(",
")",
",",
"b",
".",
"stdev",
"(",
")",
"n",
"=",
"len",
"(",
"a",
")",
"denom",
"=",
"a_stdev",
"*",
"b_stdev",
"*",
"n",
"# Find the maximal shift steps according to the maximal shift seconds.",
"allowed_shift_step",
"=",
"self",
".",
"_find_allowed_shift",
"(",
"a",
".",
"timestamps",
")",
"if",
"allowed_shift_step",
":",
"shift_upper_bound",
"=",
"allowed_shift_step",
"shift_lower_bound",
"=",
"-",
"allowed_shift_step",
"else",
":",
"shift_upper_bound",
"=",
"1",
"shift_lower_bound",
"=",
"0",
"for",
"delay",
"in",
"range",
"(",
"shift_lower_bound",
",",
"shift_upper_bound",
")",
":",
"delay_in_seconds",
"=",
"a",
".",
"timestamps",
"[",
"abs",
"(",
"delay",
")",
"]",
"-",
"a",
".",
"timestamps",
"[",
"0",
"]",
"if",
"delay",
"<",
"0",
":",
"delay_in_seconds",
"=",
"-",
"delay_in_seconds",
"s",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"j",
"=",
"i",
"+",
"delay",
"if",
"j",
"<",
"0",
"or",
"j",
">=",
"n",
":",
"continue",
"else",
":",
"s",
"+=",
"(",
"(",
"a_values",
"[",
"i",
"]",
"-",
"a_avg",
")",
"*",
"(",
"b_values",
"[",
"j",
"]",
"-",
"b_avg",
")",
")",
"r",
"=",
"s",
"/",
"denom",
"if",
"denom",
"!=",
"0",
"else",
"s",
"correlations",
".",
"append",
"(",
"[",
"delay_in_seconds",
",",
"r",
"]",
")",
"# Take shift into account to create a \"shifted correlation coefficient\".",
"if",
"self",
".",
"max_shift_milliseconds",
":",
"shifted_correlations",
".",
"append",
"(",
"r",
"*",
"(",
"1",
"+",
"float",
"(",
"delay_in_seconds",
")",
"/",
"self",
".",
"max_shift_milliseconds",
"*",
"self",
".",
"shift_impact",
")",
")",
"else",
":",
"shifted_correlations",
".",
"append",
"(",
"r",
")",
"max_correlation",
"=",
"list",
"(",
"max",
"(",
"correlations",
",",
"key",
"=",
"lambda",
"k",
":",
"k",
"[",
"1",
"]",
")",
")",
"max_shifted_correlation",
"=",
"max",
"(",
"shifted_correlations",
")",
"max_correlation",
".",
"append",
"(",
"max_shifted_correlation",
")",
"self",
".",
"correlation_result",
"=",
"CorrelationResult",
"(",
"*",
"max_correlation",
")"
] | Detect correlation by computing correlation coefficients for all allowed shift steps,
then take the maximum. | [
"Detect",
"correlation",
"by",
"computing",
"correlation",
"coefficients",
"for",
"all",
"allowed",
"shift",
"steps",
"then",
"take",
"the",
"maximum",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/correlator_algorithms/cross_correlator.py#L39-L83 | train | 235,884 |
linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/exp_avg_detector.py | ExpAvgDetector._compute_anom_data_using_window | def _compute_anom_data_using_window(self):
"""
Compute anomaly scores using a lagging window.
"""
anom_scores = {}
values = self.time_series.values
stdev = numpy.std(values)
for i, (timestamp, value) in enumerate(self.time_series_items):
if i < self.lag_window_size:
anom_score = self._compute_anom_score(values[:i + 1], value)
else:
anom_score = self._compute_anom_score(values[i - self.lag_window_size: i + 1], value)
if stdev:
anom_scores[timestamp] = anom_score / stdev
else:
anom_scores[timestamp] = anom_score
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | python | def _compute_anom_data_using_window(self):
"""
Compute anomaly scores using a lagging window.
"""
anom_scores = {}
values = self.time_series.values
stdev = numpy.std(values)
for i, (timestamp, value) in enumerate(self.time_series_items):
if i < self.lag_window_size:
anom_score = self._compute_anom_score(values[:i + 1], value)
else:
anom_score = self._compute_anom_score(values[i - self.lag_window_size: i + 1], value)
if stdev:
anom_scores[timestamp] = anom_score / stdev
else:
anom_scores[timestamp] = anom_score
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | [
"def",
"_compute_anom_data_using_window",
"(",
"self",
")",
":",
"anom_scores",
"=",
"{",
"}",
"values",
"=",
"self",
".",
"time_series",
".",
"values",
"stdev",
"=",
"numpy",
".",
"std",
"(",
"values",
")",
"for",
"i",
",",
"(",
"timestamp",
",",
"value",
")",
"in",
"enumerate",
"(",
"self",
".",
"time_series_items",
")",
":",
"if",
"i",
"<",
"self",
".",
"lag_window_size",
":",
"anom_score",
"=",
"self",
".",
"_compute_anom_score",
"(",
"values",
"[",
":",
"i",
"+",
"1",
"]",
",",
"value",
")",
"else",
":",
"anom_score",
"=",
"self",
".",
"_compute_anom_score",
"(",
"values",
"[",
"i",
"-",
"self",
".",
"lag_window_size",
":",
"i",
"+",
"1",
"]",
",",
"value",
")",
"if",
"stdev",
":",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"anom_score",
"/",
"stdev",
"else",
":",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"anom_score",
"self",
".",
"anom_scores",
"=",
"TimeSeries",
"(",
"self",
".",
"_denoise_scores",
"(",
"anom_scores",
")",
")"
] | Compute anomaly scores using a lagging window. | [
"Compute",
"anomaly",
"scores",
"using",
"a",
"lagging",
"window",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/exp_avg_detector.py#L53-L69 | train | 235,885 |
linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/exp_avg_detector.py | ExpAvgDetector._compute_anom_data_decay_all | def _compute_anom_data_decay_all(self):
"""
Compute anomaly scores using a lagging window covering all the data points before.
"""
anom_scores = {}
values = self.time_series.values
ema = utils.compute_ema(self.smoothing_factor, values)
stdev = numpy.std(values)
for i, (timestamp, value) in enumerate(self.time_series_items):
anom_score = abs((value - ema[i]) / stdev) if stdev else value - ema[i]
anom_scores[timestamp] = anom_score
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | python | def _compute_anom_data_decay_all(self):
"""
Compute anomaly scores using a lagging window covering all the data points before.
"""
anom_scores = {}
values = self.time_series.values
ema = utils.compute_ema(self.smoothing_factor, values)
stdev = numpy.std(values)
for i, (timestamp, value) in enumerate(self.time_series_items):
anom_score = abs((value - ema[i]) / stdev) if stdev else value - ema[i]
anom_scores[timestamp] = anom_score
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | [
"def",
"_compute_anom_data_decay_all",
"(",
"self",
")",
":",
"anom_scores",
"=",
"{",
"}",
"values",
"=",
"self",
".",
"time_series",
".",
"values",
"ema",
"=",
"utils",
".",
"compute_ema",
"(",
"self",
".",
"smoothing_factor",
",",
"values",
")",
"stdev",
"=",
"numpy",
".",
"std",
"(",
"values",
")",
"for",
"i",
",",
"(",
"timestamp",
",",
"value",
")",
"in",
"enumerate",
"(",
"self",
".",
"time_series_items",
")",
":",
"anom_score",
"=",
"abs",
"(",
"(",
"value",
"-",
"ema",
"[",
"i",
"]",
")",
"/",
"stdev",
")",
"if",
"stdev",
"else",
"value",
"-",
"ema",
"[",
"i",
"]",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"anom_score",
"self",
".",
"anom_scores",
"=",
"TimeSeries",
"(",
"self",
".",
"_denoise_scores",
"(",
"anom_scores",
")",
")"
] | Compute anomaly scores using a lagging window covering all the data points before. | [
"Compute",
"anomaly",
"scores",
"using",
"a",
"lagging",
"window",
"covering",
"all",
"the",
"data",
"points",
"before",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/exp_avg_detector.py#L71-L82 | train | 235,886 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries._generic_binary_op | def _generic_binary_op(self, other, op):
"""
Perform the method operation specified in the op parameter on the values
within the instance's time series values and either another time series
or a constant number value.
:param other: Time series of values or a constant number to use in calculations with instance's time series.
:param func op: The method to perform the calculation between the values.
:return: :class:`TimeSeries` object.
"""
output = {}
if isinstance(other, TimeSeries):
for key, value in self.items():
if key in other:
try:
result = op(value, other[key])
if result is NotImplemented:
other_type = type(other[key])
other_op = vars(other_type).get(op.__name__)
if other_op:
output[key] = other_op(other_type(value), other[key])
else:
output[key] = result
except ZeroDivisionError:
continue
else:
for key, value in self.items():
try:
result = op(value, other)
if result is NotImplemented:
other_type = type(other)
other_op = vars(other_type).get(op.__name__)
if other_op:
output[key] = other_op(other_type(value), other)
else:
output[key] = result
except ZeroDivisionError:
continue
if output:
return TimeSeries(output)
else:
raise ValueError('TimeSeries data was empty or invalid.') | python | def _generic_binary_op(self, other, op):
"""
Perform the method operation specified in the op parameter on the values
within the instance's time series values and either another time series
or a constant number value.
:param other: Time series of values or a constant number to use in calculations with instance's time series.
:param func op: The method to perform the calculation between the values.
:return: :class:`TimeSeries` object.
"""
output = {}
if isinstance(other, TimeSeries):
for key, value in self.items():
if key in other:
try:
result = op(value, other[key])
if result is NotImplemented:
other_type = type(other[key])
other_op = vars(other_type).get(op.__name__)
if other_op:
output[key] = other_op(other_type(value), other[key])
else:
output[key] = result
except ZeroDivisionError:
continue
else:
for key, value in self.items():
try:
result = op(value, other)
if result is NotImplemented:
other_type = type(other)
other_op = vars(other_type).get(op.__name__)
if other_op:
output[key] = other_op(other_type(value), other)
else:
output[key] = result
except ZeroDivisionError:
continue
if output:
return TimeSeries(output)
else:
raise ValueError('TimeSeries data was empty or invalid.') | [
"def",
"_generic_binary_op",
"(",
"self",
",",
"other",
",",
"op",
")",
":",
"output",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"other",
",",
"TimeSeries",
")",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"other",
":",
"try",
":",
"result",
"=",
"op",
"(",
"value",
",",
"other",
"[",
"key",
"]",
")",
"if",
"result",
"is",
"NotImplemented",
":",
"other_type",
"=",
"type",
"(",
"other",
"[",
"key",
"]",
")",
"other_op",
"=",
"vars",
"(",
"other_type",
")",
".",
"get",
"(",
"op",
".",
"__name__",
")",
"if",
"other_op",
":",
"output",
"[",
"key",
"]",
"=",
"other_op",
"(",
"other_type",
"(",
"value",
")",
",",
"other",
"[",
"key",
"]",
")",
"else",
":",
"output",
"[",
"key",
"]",
"=",
"result",
"except",
"ZeroDivisionError",
":",
"continue",
"else",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"items",
"(",
")",
":",
"try",
":",
"result",
"=",
"op",
"(",
"value",
",",
"other",
")",
"if",
"result",
"is",
"NotImplemented",
":",
"other_type",
"=",
"type",
"(",
"other",
")",
"other_op",
"=",
"vars",
"(",
"other_type",
")",
".",
"get",
"(",
"op",
".",
"__name__",
")",
"if",
"other_op",
":",
"output",
"[",
"key",
"]",
"=",
"other_op",
"(",
"other_type",
"(",
"value",
")",
",",
"other",
")",
"else",
":",
"output",
"[",
"key",
"]",
"=",
"result",
"except",
"ZeroDivisionError",
":",
"continue",
"if",
"output",
":",
"return",
"TimeSeries",
"(",
"output",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'TimeSeries data was empty or invalid.'",
")"
] | Perform the method operation specified in the op parameter on the values
within the instance's time series values and either another time series
or a constant number value.
:param other: Time series of values or a constant number to use in calculations with instance's time series.
:param func op: The method to perform the calculation between the values.
:return: :class:`TimeSeries` object. | [
"Perform",
"the",
"method",
"operation",
"specified",
"in",
"the",
"op",
"parameter",
"on",
"the",
"values",
"within",
"the",
"instance",
"s",
"time",
"series",
"values",
"and",
"either",
"another",
"time",
"series",
"or",
"a",
"constant",
"number",
"value",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L150-L192 | train | 235,887 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries._get_value_type | def _get_value_type(self, other):
"""
Get the object type of the value within the values portion of the time series.
:return: `type` of object
"""
if self.values:
return type(self.values[0])
elif isinstance(other, TimeSeries) and other.values:
return type(other.values[0])
else:
raise ValueError('Cannot perform arithmetic on empty time series.') | python | def _get_value_type(self, other):
"""
Get the object type of the value within the values portion of the time series.
:return: `type` of object
"""
if self.values:
return type(self.values[0])
elif isinstance(other, TimeSeries) and other.values:
return type(other.values[0])
else:
raise ValueError('Cannot perform arithmetic on empty time series.') | [
"def",
"_get_value_type",
"(",
"self",
",",
"other",
")",
":",
"if",
"self",
".",
"values",
":",
"return",
"type",
"(",
"self",
".",
"values",
"[",
"0",
"]",
")",
"elif",
"isinstance",
"(",
"other",
",",
"TimeSeries",
")",
"and",
"other",
".",
"values",
":",
"return",
"type",
"(",
"other",
".",
"values",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Cannot perform arithmetic on empty time series.'",
")"
] | Get the object type of the value within the values portion of the time series.
:return: `type` of object | [
"Get",
"the",
"object",
"type",
"of",
"the",
"value",
"within",
"the",
"values",
"portion",
"of",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L194-L205 | train | 235,888 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.smooth | def smooth(self, smoothing_factor):
"""
return a new time series which is a exponential smoothed version of the original data series.
soomth forward once, backward once, and then take the average.
:param float smoothing_factor: smoothing factor
:return: :class:`TimeSeries` object.
"""
forward_smooth = {}
backward_smooth = {}
output = {}
if self:
pre = self.values[0]
next = self.values[-1]
for key, value in self.items():
forward_smooth[key] = smoothing_factor * pre + (1 - smoothing_factor) * value
pre = forward_smooth[key]
for key, value in reversed(self.items()):
backward_smooth[key] = smoothing_factor * next + (1 - smoothing_factor) * value
next = backward_smooth[key]
for key in forward_smooth.keys():
output[key] = (forward_smooth[key] + backward_smooth[key]) / 2
return TimeSeries(output) | python | def smooth(self, smoothing_factor):
"""
return a new time series which is a exponential smoothed version of the original data series.
soomth forward once, backward once, and then take the average.
:param float smoothing_factor: smoothing factor
:return: :class:`TimeSeries` object.
"""
forward_smooth = {}
backward_smooth = {}
output = {}
if self:
pre = self.values[0]
next = self.values[-1]
for key, value in self.items():
forward_smooth[key] = smoothing_factor * pre + (1 - smoothing_factor) * value
pre = forward_smooth[key]
for key, value in reversed(self.items()):
backward_smooth[key] = smoothing_factor * next + (1 - smoothing_factor) * value
next = backward_smooth[key]
for key in forward_smooth.keys():
output[key] = (forward_smooth[key] + backward_smooth[key]) / 2
return TimeSeries(output) | [
"def",
"smooth",
"(",
"self",
",",
"smoothing_factor",
")",
":",
"forward_smooth",
"=",
"{",
"}",
"backward_smooth",
"=",
"{",
"}",
"output",
"=",
"{",
"}",
"if",
"self",
":",
"pre",
"=",
"self",
".",
"values",
"[",
"0",
"]",
"next",
"=",
"self",
".",
"values",
"[",
"-",
"1",
"]",
"for",
"key",
",",
"value",
"in",
"self",
".",
"items",
"(",
")",
":",
"forward_smooth",
"[",
"key",
"]",
"=",
"smoothing_factor",
"*",
"pre",
"+",
"(",
"1",
"-",
"smoothing_factor",
")",
"*",
"value",
"pre",
"=",
"forward_smooth",
"[",
"key",
"]",
"for",
"key",
",",
"value",
"in",
"reversed",
"(",
"self",
".",
"items",
"(",
")",
")",
":",
"backward_smooth",
"[",
"key",
"]",
"=",
"smoothing_factor",
"*",
"next",
"+",
"(",
"1",
"-",
"smoothing_factor",
")",
"*",
"value",
"next",
"=",
"backward_smooth",
"[",
"key",
"]",
"for",
"key",
"in",
"forward_smooth",
".",
"keys",
"(",
")",
":",
"output",
"[",
"key",
"]",
"=",
"(",
"forward_smooth",
"[",
"key",
"]",
"+",
"backward_smooth",
"[",
"key",
"]",
")",
"/",
"2",
"return",
"TimeSeries",
"(",
"output",
")"
] | return a new time series which is a exponential smoothed version of the original data series.
soomth forward once, backward once, and then take the average.
:param float smoothing_factor: smoothing factor
:return: :class:`TimeSeries` object. | [
"return",
"a",
"new",
"time",
"series",
"which",
"is",
"a",
"exponential",
"smoothed",
"version",
"of",
"the",
"original",
"data",
"series",
".",
"soomth",
"forward",
"once",
"backward",
"once",
"and",
"then",
"take",
"the",
"average",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L248-L272 | train | 235,889 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.add_offset | def add_offset(self, offset):
"""
Return a new time series with all timestamps incremented by some offset.
:param int offset: The number of seconds to offset the time series.
:return: `None`
"""
self.timestamps = [ts + offset for ts in self.timestamps] | python | def add_offset(self, offset):
"""
Return a new time series with all timestamps incremented by some offset.
:param int offset: The number of seconds to offset the time series.
:return: `None`
"""
self.timestamps = [ts + offset for ts in self.timestamps] | [
"def",
"add_offset",
"(",
"self",
",",
"offset",
")",
":",
"self",
".",
"timestamps",
"=",
"[",
"ts",
"+",
"offset",
"for",
"ts",
"in",
"self",
".",
"timestamps",
"]"
] | Return a new time series with all timestamps incremented by some offset.
:param int offset: The number of seconds to offset the time series.
:return: `None` | [
"Return",
"a",
"new",
"time",
"series",
"with",
"all",
"timestamps",
"incremented",
"by",
"some",
"offset",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L274-L281 | train | 235,890 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.normalize | def normalize(self):
"""
Return a new time series with all values normalized to 0 to 1.
:return: `None`
"""
maximum = self.max()
if maximum:
self.values = [value / maximum for value in self.values] | python | def normalize(self):
"""
Return a new time series with all values normalized to 0 to 1.
:return: `None`
"""
maximum = self.max()
if maximum:
self.values = [value / maximum for value in self.values] | [
"def",
"normalize",
"(",
"self",
")",
":",
"maximum",
"=",
"self",
".",
"max",
"(",
")",
"if",
"maximum",
":",
"self",
".",
"values",
"=",
"[",
"value",
"/",
"maximum",
"for",
"value",
"in",
"self",
".",
"values",
"]"
] | Return a new time series with all values normalized to 0 to 1.
:return: `None` | [
"Return",
"a",
"new",
"time",
"series",
"with",
"all",
"values",
"normalized",
"to",
"0",
"to",
"1",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L283-L291 | train | 235,891 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.crop | def crop(self, start_timestamp, end_timestamp):
"""
Return a new TimeSeries object contains all the timstamps and values within
the specified range.
:param int start_timestamp: the start timestamp value
:param int end_timestamp: the end timestamp value
:return: :class:`TimeSeries` object.
"""
output = {}
for key, value in self.items():
if key >= start_timestamp and key <= end_timestamp:
output[key] = value
if output:
return TimeSeries(output)
else:
raise ValueError('TimeSeries data was empty or invalid.') | python | def crop(self, start_timestamp, end_timestamp):
"""
Return a new TimeSeries object contains all the timstamps and values within
the specified range.
:param int start_timestamp: the start timestamp value
:param int end_timestamp: the end timestamp value
:return: :class:`TimeSeries` object.
"""
output = {}
for key, value in self.items():
if key >= start_timestamp and key <= end_timestamp:
output[key] = value
if output:
return TimeSeries(output)
else:
raise ValueError('TimeSeries data was empty or invalid.') | [
"def",
"crop",
"(",
"self",
",",
"start_timestamp",
",",
"end_timestamp",
")",
":",
"output",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"key",
">=",
"start_timestamp",
"and",
"key",
"<=",
"end_timestamp",
":",
"output",
"[",
"key",
"]",
"=",
"value",
"if",
"output",
":",
"return",
"TimeSeries",
"(",
"output",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'TimeSeries data was empty or invalid.'",
")"
] | Return a new TimeSeries object contains all the timstamps and values within
the specified range.
:param int start_timestamp: the start timestamp value
:param int end_timestamp: the end timestamp value
:return: :class:`TimeSeries` object. | [
"Return",
"a",
"new",
"TimeSeries",
"object",
"contains",
"all",
"the",
"timstamps",
"and",
"values",
"within",
"the",
"specified",
"range",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L293-L310 | train | 235,892 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.average | def average(self, default=None):
"""
Calculate the average value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the average value or `None`.
"""
return numpy.asscalar(numpy.average(self.values)) if self.values else default | python | def average(self, default=None):
"""
Calculate the average value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the average value or `None`.
"""
return numpy.asscalar(numpy.average(self.values)) if self.values else default | [
"def",
"average",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"average",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the average value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the average value or `None`. | [
"Calculate",
"the",
"average",
"value",
"over",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L312-L319 | train | 235,893 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.median | def median(self, default=None):
"""
Calculate the median value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the median value or `None`.
"""
return numpy.asscalar(numpy.median(self.values)) if self.values else default | python | def median(self, default=None):
"""
Calculate the median value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the median value or `None`.
"""
return numpy.asscalar(numpy.median(self.values)) if self.values else default | [
"def",
"median",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"median",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the median value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the median value or `None`. | [
"Calculate",
"the",
"median",
"value",
"over",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L321-L328 | train | 235,894 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.max | def max(self, default=None):
"""
Calculate the maximum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`.
"""
return numpy.asscalar(numpy.max(self.values)) if self.values else default | python | def max(self, default=None):
"""
Calculate the maximum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`.
"""
return numpy.asscalar(numpy.max(self.values)) if self.values else default | [
"def",
"max",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"max",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the maximum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`. | [
"Calculate",
"the",
"maximum",
"value",
"over",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L330-L337 | train | 235,895 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.min | def min(self, default=None):
"""
Calculate the minimum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`.
"""
return numpy.asscalar(numpy.min(self.values)) if self.values else default | python | def min(self, default=None):
"""
Calculate the minimum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`.
"""
return numpy.asscalar(numpy.min(self.values)) if self.values else default | [
"def",
"min",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"min",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the minimum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`. | [
"Calculate",
"the",
"minimum",
"value",
"over",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L339-L346 | train | 235,896 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.percentile | def percentile(self, n, default=None):
"""
Calculate the Nth Percentile value over the time series.
:param int n: Integer value of the percentile to calculate.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the Nth percentile value or `None`.
"""
return numpy.asscalar(numpy.percentile(self.values, n)) if self.values else default | python | def percentile(self, n, default=None):
"""
Calculate the Nth Percentile value over the time series.
:param int n: Integer value of the percentile to calculate.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the Nth percentile value or `None`.
"""
return numpy.asscalar(numpy.percentile(self.values, n)) if self.values else default | [
"def",
"percentile",
"(",
"self",
",",
"n",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"percentile",
"(",
"self",
".",
"values",
",",
"n",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the Nth Percentile value over the time series.
:param int n: Integer value of the percentile to calculate.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the Nth percentile value or `None`. | [
"Calculate",
"the",
"Nth",
"Percentile",
"value",
"over",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L348-L356 | train | 235,897 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.stdev | def stdev(self, default=None):
"""
Calculate the standard deviation of the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the standard deviation value or `None`.
"""
return numpy.asscalar(numpy.std(self.values)) if self.values else default | python | def stdev(self, default=None):
"""
Calculate the standard deviation of the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the standard deviation value or `None`.
"""
return numpy.asscalar(numpy.std(self.values)) if self.values else default | [
"def",
"stdev",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"std",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the standard deviation of the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the standard deviation value or `None`. | [
"Calculate",
"the",
"standard",
"deviation",
"of",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L358-L365 | train | 235,898 |
linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.sum | def sum(self, default=None):
"""
Calculate the sum of all the values in the times series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the sum or `None`.
"""
return numpy.asscalar(numpy.sum(self.values)) if self.values else default | python | def sum(self, default=None):
"""
Calculate the sum of all the values in the times series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the sum or `None`.
"""
return numpy.asscalar(numpy.sum(self.values)) if self.values else default | [
"def",
"sum",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"sum",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the sum of all the values in the times series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the sum or `None`. | [
"Calculate",
"the",
"sum",
"of",
"all",
"the",
"values",
"in",
"the",
"times",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L367-L374 | train | 235,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.