text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert(self, value, view):
"""Check that the value is a string and matches the pattern. """ |
if isinstance(value, BASESTRING):
if self.pattern and not self.regex.match(value):
self.fail(
u"must match the pattern {0}".format(self.pattern),
view
)
return value
else:
self.fail(u'must be a string', view, True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert(self, value, view):
"""Ensure that the value follows at least one template. """ |
is_mapping = isinstance(self.template, MappingTemplate)
for candidate in self.allowed:
try:
if is_mapping:
if isinstance(candidate, Filename) and \
candidate.relative_to:
next_template = candidate.template_with_relatives(
view,
self.template
)
next_template.subtemplates[view.key] = as_template(
candidate
)
else:
next_template = MappingTemplate({view.key: candidate})
return view.parent.get(next_template)[view.key]
else:
return view.get(candidate)
except ConfigTemplateError:
raise
except ConfigError:
pass
except ValueError as exc:
raise ConfigTemplateError(exc)
self.fail(
u'must be one of {0}, not {1}'.format(
repr(self.allowed), repr(value)
),
view
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def export_live_eggs(self, env=False):
"""Adds all of the eggs in the current environment to PYTHONPATH.""" |
path_eggs = [p for p in sys.path if p.endswith('.egg')]
command = self.get_finalized_command("egg_info")
egg_base = path.abspath(command.egg_base)
unique_path_eggs = set(path_eggs + [egg_base])
os.environ['PYTHONPATH'] = ':'.join(unique_path_eggs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_from_environment():
'''
Return the JavaScript runtime that is specified in EXECJS_RUNTIME environment variable.
If EXECJS_RUNTIME environment variable is empty or invalid, return None.
'''
name = os.environ.get("EXECJS_RUNTIME", "")
if not name:
return None
try:
return _find_runtime_by_name(name)
except exceptions.RuntimeUnavailableError:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split_table_cells(self, row):
""" An iterator returning all the table cells in a row with their positions, accounting for escaping. """ |
row = iter(row)
col = 0
start_col = col + 1
cell = ''
first_cell = True
while True:
char = next(row, None)
col += 1
if char == '|':
if first_cell:
# First cell (content before the first |) is skipped
first_cell = False
else:
yield (cell, start_col)
cell = ''
start_col = col + 1
elif char == '\\':
char = next(row)
col += 1
if char == 'n':
cell += '\n'
else:
if char not in ['|', '\\']:
cell += '\\'
cell += char
elif char:
cell += char
else:
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_freq(data_series, freq, atomic_freq="1 Min", series_type="cumulative"):
"""Resample data to a different frequency. This method can be used to upsample or downsample meter data. The assumption it makes to do so is that meter data is constant and averaged over the given periods. For instance, to convert billing-period data to daily data, this method first upsamples to the atomic frequency (1 minute freqency, by default), "spreading" usage evenly across all minutes in each period. Then it downsamples to hourly frequency and returns that result. With instantaneous series, the data is copied to all contiguous time intervals and the mean over `freq` is returned. **Caveats**: - This method gives a fair amount of flexibility in resampling as long as you are OK with the assumption that usage is constant over the period (this assumption is generally broken in observed data at large enough frequencies, so this caveat should not be taken lightly). Parameters data_series : :any:`pandas.Series` Data to resample. Should have a :any:`pandas.DatetimeIndex`. freq : :any:`str` The frequency to resample to. This should be given in a form recognized by the :any:`pandas.Series.resample` method. atomic_freq : :any:`str`, optional The "atomic" frequency of the intermediate data form. This can be adjusted to a higher atomic frequency to increase speed or memory performance. series_type : :any:`str`, {'cumulative', ‘instantaneous’}, default 'cumulative' Type of data sampling. 'cumulative' data can be spread over smaller time intervals and is aggregated using addition (e.g. meter data). 'instantaneous' data is copied (not spread) over smaller time intervals and is aggregated by averaging (e.g. weather data). Returns ------- resampled_data : :any:`pandas.Series` Data resampled to the given frequency. """ |
# TODO(philngo): make sure this complies with CalTRACK 2.2.2.1
if not isinstance(data_series, pd.Series):
raise ValueError(
"expected series, got object with class {}".format(data_series.__class__)
)
if data_series.empty:
return data_series
series = remove_duplicates(data_series)
target_freq = pd.Timedelta(atomic_freq)
timedeltas = (series.index[1:] - series.index[:-1]).append(
pd.TimedeltaIndex([pd.NaT])
)
if series_type == "cumulative":
spread_factor = target_freq.total_seconds() / timedeltas.total_seconds()
series_spread = series * spread_factor
atomic_series = series_spread.asfreq(atomic_freq, method="ffill")
resampled = atomic_series.resample(freq).sum()
resampled_with_nans = atomic_series.resample(freq).mean()
resampled = resampled[resampled_with_nans.notnull()].reindex(resampled.index)
elif series_type == "instantaneous":
atomic_series = series.asfreq(atomic_freq, method="ffill")
resampled = atomic_series.resample(freq).mean()
if resampled.index[-1] < series.index[-1]:
# this adds a null at the end using the target frequency
last_index = pd.date_range(resampled.index[-1], freq=freq, periods=2)[1:]
resampled = (
pd.concat([resampled, pd.Series(np.nan, index=last_index)])
.resample(freq)
.mean()
)
return resampled |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_baseline_data( data, start=None, end=None, max_days=365, allow_billing_period_overshoot=False, ignore_billing_period_gap_for_day_count=False, ):
""" Filter down to baseline period data. .. note:: For compliance with CalTRACK, set ``max_days=365`` (section 2.2.1.1). Parameters data : :any:`pandas.DataFrame` or :any:`pandas.Series` The data to filter to baseline data. This data will be filtered down to an acceptable baseline period according to the dates passed as `start` and `end`, or the maximum period specified with `max_days`. start : :any:`datetime.datetime` A timezone-aware datetime that represents the earliest allowable start date for the baseline data. The stricter of this or `max_days` is used to determine the earliest allowable baseline period date. end : :any:`datetime.datetime` A timezone-aware datetime that represents the latest allowable end date for the baseline data, i.e., the latest date for which data is available before the intervention begins. max_days : :any:`int`, default 365 The maximum length of the period. Ignored if `end` is not set. The stricter of this or `start` is used to determine the earliest allowable baseline period date. allow_billing_period_overshoot : :any:`bool`, default False If True, count `max_days` from the end of the last billing data period that ends before the `end` date, rather than from the exact `end` date. Otherwise use the exact `end` date as the cutoff. ignore_billing_period_gap_for_day_count : :any:`bool`, default False If True, instead of going back `max_days` from either the `end` date or end of the last billing period before that date (depending on the value of the `allow_billing_period_overshoot` setting) and excluding the last period that began before that date, first check to see if excluding or including that period gets closer to a total of `max_days` of data. For example, with `max_days=365`, if an exact 365 period would targeted Feb 15, but the billing period went from Jan 20 to Feb 20, exclude that period for a total of ~360 days of data, because that's closer to 365 than ~390 days, which would be the total if that period was included. If, on the other hand, if that period started Feb 10 and went to Mar 10, include the period, because ~370 days of data is closer to than ~340. Returns ------- baseline_data, warnings : :any:`tuple` of (:any:`pandas.DataFrame` or :any:`pandas.Series`, :any:`list` of :any:`eemeter.EEMeterWarning`) Data for only the specified baseline period and any associated warnings. """ |
if max_days is not None:
if start is not None:
raise ValueError( # pragma: no cover
"If max_days is set, start cannot be set: start={}, max_days={}.".format(
start, max_days
)
)
start_inf = False
if start is None:
# py datetime min/max are out of range of pd.Timestamp min/max
start_target = pytz.UTC.localize(pd.Timestamp.min)
start_inf = True
else:
start_target = start
end_inf = False
if end is None:
end_limit = pytz.UTC.localize(pd.Timestamp.max)
end_inf = True
else:
end_limit = end
# copying prevents setting on slice warnings
data_before_end_limit = data[:end_limit].copy()
if ignore_billing_period_gap_for_day_count:
end_limit = data_before_end_limit.index.max()
if not end_inf and max_days is not None:
start_target = end_limit - timedelta(days=max_days)
if allow_billing_period_overshoot:
# adjust start limit to get a selection closest to max_days
# also consider ffill for get_loc method - always picks previous
try:
loc = data_before_end_limit.index.get_loc(start_target, method="nearest")
except (KeyError, IndexError): # pragma: no cover
baseline_data = data_before_end_limit
start_limit = start_target
else:
start_limit = data_before_end_limit.index[loc]
baseline_data = data_before_end_limit[start_limit:].copy()
else:
# use hard limit for baseline start
start_limit = start_target
baseline_data = data_before_end_limit[start_limit:].copy()
if baseline_data.dropna().empty:
raise NoBaselineDataError()
baseline_data.iloc[-1] = np.nan
data_end = data.index.max()
data_start = data.index.min()
return (
baseline_data,
_make_baseline_warnings(
end_inf, start_inf, data_start, data_end, start_limit, end_limit
),
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_reporting_data( data, start=None, end=None, max_days=365, allow_billing_period_overshoot=False, ignore_billing_period_gap_for_day_count=False, ):
""" Filter down to reporting period data. Parameters data : :any:`pandas.DataFrame` or :any:`pandas.Series` The data to filter to reporting data. This data will be filtered down to an acceptable reporting period according to the dates passed as `start` and `end`, or the maximum period specified with `max_days`. start : :any:`datetime.datetime` A timezone-aware datetime that represents the earliest allowable start date for the reporting data, i.e., the earliest date for which data is available after the intervention begins. end : :any:`datetime.datetime` A timezone-aware datetime that represents the latest allowable end date for the reporting data. The stricter of this or `max_days` is used to determine the latest allowable reporting period date. max_days : :any:`int`, default 365 The maximum length of the period. Ignored if `start` is not set. The stricter of this or `end` is used to determine the latest allowable reporting period date. allow_billing_period_overshoot : :any:`bool`, default False If True, count `max_days` from the start of the first billing data period that starts after the `start` date, rather than from the exact `start` date. Otherwise use the exact `start` date as the cutoff. ignore_billing_period_gap_for_day_count : :any:`bool`, default False If True, instead of going forward `max_days` from either the `start` date or the `start` of the first billing period after that date (depending on the value of the `allow_billing_period_overshoot` setting) and excluding the first period that ended after that date, first check to see if excluding or including that period gets closer to a total of `max_days` of data. For example, with `max_days=365`, if an exact 365 period would targeted Feb 15, but the billing period went from Jan 20 to Feb 20, include that period for a total of ~370 days of data, because that's closer to 365 than ~340 days, which would be the total if that period was excluded. If, on the other hand, if that period started Feb 10 and went to Mar 10, exclude the period, because ~360 days of data is closer to than ~390. Returns ------- reporting_data, warnings : :any:`tuple` of (:any:`pandas.DataFrame` or :any:`pandas.Series`, :any:`list` of :any:`eemeter.EEMeterWarning`) Data for only the specified reporting period and any associated warnings. """ |
if max_days is not None:
if end is not None:
raise ValueError( # pragma: no cover
"If max_days is set, end cannot be set: end={}, max_days={}.".format(
end, max_days
)
)
start_inf = False
if start is None:
# py datetime min/max are out of range of pd.Timestamp min/max
start_limit = pytz.UTC.localize(pd.Timestamp.min)
start_inf = True
else:
start_limit = start
end_inf = False
if end is None:
end_target = pytz.UTC.localize(pd.Timestamp.max)
end_inf = True
else:
end_target = end
# copying prevents setting on slice warnings
data_after_start_limit = data[start_limit:].copy()
if ignore_billing_period_gap_for_day_count:
start_limit = data_after_start_limit.index.min()
if not start_inf and max_days is not None:
end_target = start_limit + timedelta(days=max_days)
if allow_billing_period_overshoot:
# adjust start limit to get a selection closest to max_days
# also consider bfill for get_loc method - always picks next
try:
loc = data_after_start_limit.index.get_loc(end_target, method="nearest")
except (KeyError, IndexError): # pragma: no cover
reporting_data = data_after_start_limit
end_limit = end_target
else:
end_limit = data_after_start_limit.index[loc]
reporting_data = data_after_start_limit[:end_limit].copy()
else:
# use hard limit for baseline start
end_limit = end_target
reporting_data = data_after_start_limit[:end_limit].copy()
if reporting_data.dropna().empty:
raise NoReportingDataError()
reporting_data.iloc[-1] = np.nan
data_end = data.index.max()
data_start = data.index.min()
return (
reporting_data,
_make_reporting_warnings(
end_inf, start_inf, data_start, data_end, start_limit, end_limit
),
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modeled_savings( baseline_model, reporting_model, result_index, temperature_data, with_disaggregated=False, confidence_level=0.90, predict_kwargs=None, ):
""" Compute modeled savings, i.e., savings in which baseline and reporting usage values are based on models. This is appropriate for annualizing or weather normalizing models. Parameters baseline_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel` Model to use for predicting pre-intervention usage. reporting_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel` Model to use for predicting post-intervention usage. result_index : :any:`pandas.DatetimeIndex` The dates for which usage should be modeled. temperature_data : :any:`pandas.Series` Hourly-frequency timeseries of temperature data during the modeled period. with_disaggregated : :any:`bool`, optional If True, calculate modeled disaggregated usage estimates and savings. confidence_level : :any:`float`, optional The two-tailed confidence level used to calculate the t-statistic used in calculation of the error bands. Ignored if not computing error bands. predict_kwargs : :any:`dict`, optional Extra kwargs to pass to the baseline_model.predict and reporting_model.predict methods. Returns ------- results : :any:`pandas.DataFrame` DataFrame with modeled savings, indexed with the result_index. Will include the following columns: - ``modeled_baseline_usage`` - ``modeled_reporting_usage`` - ``modeled_savings`` If `with_disaggregated` is set to True, the following columns will also be in the results DataFrame: - ``modeled_baseline_base_load`` - ``modeled_baseline_cooling_load`` - ``modeled_baseline_heating_load`` - ``modeled_reporting_base_load`` - ``modeled_reporting_cooling_load`` - ``modeled_reporting_heating_load`` - ``modeled_base_load_savings`` - ``modeled_cooling_load_savings`` - ``modeled_heating_load_savings`` error_bands : :any:`dict`, optional If baseline_model and reporting_model are instances of CalTRACKUsagePerDayModelResults, will also return a dictionary of FSU and error bands for the aggregated energy savings over the normal year period. """ |
prediction_index = result_index
if predict_kwargs is None:
predict_kwargs = {}
model_type = None # generic
if isinstance(baseline_model, CalTRACKUsagePerDayModelResults):
model_type = "usage_per_day"
if model_type == "usage_per_day" and with_disaggregated:
predict_kwargs["with_disaggregated"] = True
def _predicted_usage(model):
model_prediction = model.predict(
prediction_index, temperature_data, **predict_kwargs
)
predicted_usage = model_prediction.result
return predicted_usage
predicted_baseline_usage = _predicted_usage(baseline_model)
predicted_reporting_usage = _predicted_usage(reporting_model)
modeled_baseline_usage = predicted_baseline_usage["predicted_usage"].to_frame(
"modeled_baseline_usage"
)
modeled_reporting_usage = predicted_reporting_usage["predicted_usage"].to_frame(
"modeled_reporting_usage"
)
def modeled_savings_func(row):
return row.modeled_baseline_usage - row.modeled_reporting_usage
results = modeled_baseline_usage.join(modeled_reporting_usage).assign(
modeled_savings=modeled_savings_func
)
if model_type == "usage_per_day" and with_disaggregated:
modeled_baseline_usage_disaggregated = predicted_baseline_usage[
["base_load", "heating_load", "cooling_load"]
].rename(
columns={
"base_load": "modeled_baseline_base_load",
"heating_load": "modeled_baseline_heating_load",
"cooling_load": "modeled_baseline_cooling_load",
}
)
modeled_reporting_usage_disaggregated = predicted_reporting_usage[
["base_load", "heating_load", "cooling_load"]
].rename(
columns={
"base_load": "modeled_reporting_base_load",
"heating_load": "modeled_reporting_heating_load",
"cooling_load": "modeled_reporting_cooling_load",
}
)
def modeled_base_load_savings_func(row):
return row.modeled_baseline_base_load - row.modeled_reporting_base_load
def modeled_heating_load_savings_func(row):
return (
row.modeled_baseline_heating_load - row.modeled_reporting_heating_load
)
def modeled_cooling_load_savings_func(row):
return (
row.modeled_baseline_cooling_load - row.modeled_reporting_cooling_load
)
results = (
results.join(modeled_baseline_usage_disaggregated)
.join(modeled_reporting_usage_disaggregated)
.assign(
modeled_base_load_savings=modeled_base_load_savings_func,
modeled_heating_load_savings=modeled_heating_load_savings_func,
modeled_cooling_load_savings=modeled_cooling_load_savings_func,
)
)
results = results.dropna().reindex(results.index) # carry NaNs
error_bands = None
if model_type == "usage_per_day": # has totals_metrics
error_bands = _compute_error_bands_modeled_savings(
baseline_model.totals_metrics,
reporting_model.totals_metrics,
results,
baseline_model.interval,
reporting_model.interval,
confidence_level,
)
return results, error_bands |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _caltrack_predict_design_matrix( model_type, model_params, data, disaggregated=False, input_averages=False, output_averages=False, ):
""" An internal CalTRACK predict method for use with a design matrix of the form used in model fitting. Given a set model type, parameters, and daily temperatures, return model predictions. Parameters model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). model_params : :any:`dict` Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`. data : :any:`pandas.DataFrame` Data over which to predict. Assumed to be like the format of the data used for fitting, although it need only have the columns. If not giving data with a `pandas.DatetimeIndex` it must have the column `n_days`, representing the number of days per prediction period (otherwise inferred from DatetimeIndex). disaggregated : :any:`bool`, optional If True, return results as a :any:`pandas.DataFrame` with columns ``'base_load'``, ``'heating_load'``, and ``'cooling_load'`` input_averages : :any:`bool`, optional If HDD and CDD columns expressed as period totals, select False. If HDD and CDD columns expressed as period averages, select True. If prediction period is daily, results should be the same either way. Matters for billing. output_averages : :any:`bool`, optional If True, prediction returned as averages (not totals). If False, returned as totals. Returns ------- prediction : :any:`pandas.Series` or :any:`pandas.DataFrame` Returns results as series unless ``disaggregated=True``. """ |
zeros = pd.Series(0, index=data.index)
ones = zeros + 1
if isinstance(data.index, pd.DatetimeIndex):
days_per_period = day_counts(data.index)
else:
try:
days_per_period = data["n_days"]
except KeyError:
raise ValueError("Data needs DatetimeIndex or an n_days column.")
# TODO(philngo): handle different degree day methods and hourly temperatures
if model_type in ["intercept_only", "hdd_only", "cdd_only", "cdd_hdd"]:
intercept = _get_parameter_or_raise(model_type, model_params, "intercept")
if output_averages == False:
base_load = intercept * days_per_period
else:
base_load = intercept * ones
elif model_type is None:
raise ValueError("Model not valid for prediction: model_type=None")
else:
raise UnrecognizedModelTypeError(
"invalid caltrack model type: {}".format(model_type)
)
if model_type in ["hdd_only", "cdd_hdd"]:
beta_hdd = _get_parameter_or_raise(model_type, model_params, "beta_hdd")
heating_balance_point = _get_parameter_or_raise(
model_type, model_params, "heating_balance_point"
)
hdd_column_name = "hdd_%s" % heating_balance_point
hdd = data[hdd_column_name]
if input_averages == True and output_averages == False:
heating_load = hdd * beta_hdd * days_per_period
elif input_averages == True and output_averages == True:
heating_load = hdd * beta_hdd
elif input_averages == False and output_averages == False:
heating_load = hdd * beta_hdd
else:
heating_load = hdd * beta_hdd / days_per_period
else:
heating_load = zeros
if model_type in ["cdd_only", "cdd_hdd"]:
beta_cdd = _get_parameter_or_raise(model_type, model_params, "beta_cdd")
cooling_balance_point = _get_parameter_or_raise(
model_type, model_params, "cooling_balance_point"
)
cdd_column_name = "cdd_%s" % cooling_balance_point
cdd = data[cdd_column_name]
if input_averages == True and output_averages == False:
cooling_load = cdd * beta_cdd * days_per_period
elif input_averages == True and output_averages == True:
cooling_load = cdd * beta_cdd
elif input_averages == False and output_averages == False:
cooling_load = cdd * beta_cdd
else:
cooling_load = cdd * beta_cdd / days_per_period
else:
cooling_load = zeros
# If any of the rows of input data contained NaNs, restore the NaNs
# Note: If data contains ANY NaNs at all, this declares the entire row a NaN.
# TODO(philngo): Consider making this more nuanced.
def _restore_nans(load):
load = load[data.sum(axis=1, skipna=False).notnull()].reindex(data.index)
return load
base_load = _restore_nans(base_load)
heating_load = _restore_nans(heating_load)
cooling_load = _restore_nans(cooling_load)
if disaggregated:
return pd.DataFrame(
{
"base_load": base_load,
"heating_load": heating_load,
"cooling_load": cooling_load,
}
)
else:
return base_load + heating_load + cooling_load |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def caltrack_usage_per_day_predict( model_type, model_params, prediction_index, temperature_data, degree_day_method="daily", with_disaggregated=False, with_design_matrix=False, ):
""" CalTRACK predict method. Given a model type, parameters, hourly temperatures, a :any:`pandas.DatetimeIndex` index over which to predict meter usage, return model predictions as totals for the period (so billing period totals, daily totals, etc.). Optionally include the computed design matrix or disaggregated usage in the output dataframe. Parameters model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). model_params : :any:`dict` Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`. temperature_data : :any:`pandas.DataFrame` Hourly temperature data to use for prediction. Time period should match the ``prediction_index`` argument. prediction_index : :any:`pandas.DatetimeIndex` Time period over which to predict. with_disaggregated : :any:`bool`, optional If True, return results as a :any:`pandas.DataFrame` with columns ``'base_load'``, ``'heating_load'``, and ``'cooling_load'``. with_design_matrix : :any:`bool`, optional If True, return results as a :any:`pandas.DataFrame` with columns ``'n_days'``, ``'n_days_dropped'``, ``n_days_kept``, and ``temperature_mean``. Returns ------- prediction : :any:`pandas.DataFrame` Columns are as follows: - ``predicted_usage``: Predicted usage values computed to match ``prediction_index``. - ``base_load``: modeled base load (only for ``with_disaggregated=True``). - ``cooling_load``: modeled cooling load (only for ``with_disaggregated=True``). - ``heating_load``: modeled heating load (only for ``with_disaggregated=True``). - ``n_days``: number of days in period (only for ``with_design_matrix=True``). - ``n_days_dropped``: number of days dropped because of insufficient data (only for ``with_design_matrix=True``). - ``n_days_kept``: number of days kept because of sufficient data (only for ``with_design_matrix=True``). - ``temperature_mean``: mean temperature during given period. (only for ``with_design_matrix=True``). predict_warnings: :any: list of EEMeterWarning if any. """ |
if model_params is None:
raise MissingModelParameterError("model_params is None.")
predict_warnings = []
cooling_balance_points = []
heating_balance_points = []
if "cooling_balance_point" in model_params:
cooling_balance_points.append(model_params["cooling_balance_point"])
if "heating_balance_point" in model_params:
heating_balance_points.append(model_params["heating_balance_point"])
design_matrix = compute_temperature_features(
prediction_index,
temperature_data,
heating_balance_points=heating_balance_points,
cooling_balance_points=cooling_balance_points,
degree_day_method=degree_day_method,
use_mean_daily_values=False,
)
if design_matrix.dropna().empty:
if with_disaggregated:
empty_columns = {
"predicted_usage": [],
"base_load": [],
"heating_load": [],
"cooling_load": [],
}
else:
empty_columns = {"predicted_usage": []}
predict_warnings.append(
EEMeterWarning(
qualified_name=("eemeter.caltrack.compute_temperature_features"),
description=(
"Design matrix empty, compute_temperature_features failed"
),
data={"temperature_data": temperature_data},
)
)
return ModelPrediction(
pd.DataFrame(empty_columns),
design_matrix=pd.DataFrame(),
warnings=predict_warnings,
)
if degree_day_method == "daily":
design_matrix["n_days"] = (
design_matrix.n_days_kept + design_matrix.n_days_dropped
)
else:
design_matrix["n_days"] = (
design_matrix.n_hours_kept + design_matrix.n_hours_dropped
) / 24
results = _caltrack_predict_design_matrix(
model_type,
model_params,
design_matrix,
input_averages=False,
output_averages=False,
).to_frame("predicted_usage")
if with_disaggregated:
disaggregated = _caltrack_predict_design_matrix(
model_type,
model_params,
design_matrix,
disaggregated=True,
input_averages=False,
output_averages=False,
)
results = results.join(disaggregated)
if with_design_matrix:
results = results.join(design_matrix)
return ModelPrediction(
result=results, design_matrix=design_matrix, warnings=predict_warnings
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_too_few_non_zero_degree_day_warning( model_type, balance_point, degree_day_type, degree_days, minimum_non_zero ):
""" Return an empty list or a single warning wrapped in a list regarding non-zero degree days for a set of degree days. Parameters model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). balance_point : :any:`float` The balance point in question. degree_day_type : :any:`str` The type of degree days (``'cdd'`` or ``'hdd'``). degree_days : :any:`pandas.Series` A series of degree day values. minimum_non_zero : :any:`int` Minimum allowable number of non-zero degree day values. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning. """ |
warnings = []
n_non_zero = int((degree_days > 0).sum())
if n_non_zero < minimum_non_zero:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.too_few_non_zero_{degree_day_type}".format(
model_type=model_type, degree_day_type=degree_day_type
)
),
description=(
"Number of non-zero daily {degree_day_type} values below accepted minimum."
" Candidate fit not attempted.".format(
degree_day_type=degree_day_type.upper()
)
),
data={
"n_non_zero_{degree_day_type}".format(
degree_day_type=degree_day_type
): n_non_zero,
"minimum_non_zero_{degree_day_type}".format(
degree_day_type=degree_day_type
): minimum_non_zero,
"{degree_day_type}_balance_point".format(
degree_day_type=degree_day_type
): balance_point,
},
)
)
return warnings |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_total_degree_day_too_low_warning( model_type, balance_point, degree_day_type, avg_degree_days, period_days, minimum_total, ):
""" Return an empty list or a single warning wrapped in a list regarding the total summed degree day values. Parameters model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). balance_point : :any:`float` The balance point in question. degree_day_type : :any:`str` The type of degree days (``'cdd'`` or ``'hdd'``). avg_degree_days : :any:`pandas.Series` A series of degree day values. period_days : :any:`pandas.Series` A series of containing day counts. minimum_total : :any:`float` Minimum allowable total sum of degree day values. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning. """ |
warnings = []
total_degree_days = (avg_degree_days * period_days).sum()
if total_degree_days < minimum_total:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.total_{degree_day_type}_too_low".format(
model_type=model_type, degree_day_type=degree_day_type
)
),
description=(
"Total {degree_day_type} below accepted minimum."
" Candidate fit not attempted.".format(
degree_day_type=degree_day_type.upper()
)
),
data={
"total_{degree_day_type}".format(
degree_day_type=degree_day_type
): total_degree_days,
"total_{degree_day_type}_minimum".format(
degree_day_type=degree_day_type
): minimum_total,
"{degree_day_type}_balance_point".format(
degree_day_type=degree_day_type
): balance_point,
},
)
)
return warnings |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_parameter_negative_warning(model_type, model_params, parameter):
""" Return an empty list or a single warning wrapped in a list indicating whether model parameter is negative. Parameters model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). model_params : :any:`dict` Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`. parameter : :any:`str` The name of the parameter, e.g., ``'intercept'``. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning. """ |
warnings = []
if model_params.get(parameter, 0) < 0:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.{parameter}_negative".format(
model_type=model_type, parameter=parameter
)
),
description=(
"Model fit {parameter} parameter is negative. Candidate model rejected.".format(
parameter=parameter
)
),
data=model_params,
)
)
return warnings |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_parameter_p_value_too_high_warning( model_type, model_params, parameter, p_value, maximum_p_value ):
""" Return an empty list or a single warning wrapped in a list indicating whether model parameter p-value is too high. Parameters model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). model_params : :any:`dict` Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`. parameter : :any:`str` The name of the parameter, e.g., ``'intercept'``. p_value : :any:`float` The p-value of the parameter. maximum_p_value : :any:`float` The maximum allowable p-value of the parameter. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning. """ |
warnings = []
if p_value > maximum_p_value:
data = {
"{}_p_value".format(parameter): p_value,
"{}_maximum_p_value".format(parameter): maximum_p_value,
}
data.update(model_params)
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high".format(
model_type=model_type, parameter=parameter
)
),
description=(
"Model fit {parameter} p-value is too high. Candidate model rejected.".format(
parameter=parameter
)
),
data=data,
)
)
return warnings |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_fit_failed_candidate_model(model_type, formula):
""" Return a Candidate model that indicates the fitting routine failed. Parameters model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). formula : :any:`float` The candidate model formula. Returns ------- candidate_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel` Candidate model instance with status ``'ERROR'``, and warning with traceback. """ |
warnings = [
EEMeterWarning(
qualified_name="eemeter.caltrack_daily.{}.model_results".format(model_type),
description=(
"Error encountered in statsmodels.formula.api.ols method. (Empty data?)"
),
data={"traceback": traceback.format_exc()},
)
]
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type, formula=formula, status="ERROR", warnings=warnings
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_intercept_only_candidate_models(data, weights_col):
""" Return a list of a single candidate intercept-only model. Parameters data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value``. DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. Returns ------- candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel` List containing a single intercept-only candidate model. """ |
model_type = "intercept_only"
formula = "meter_value ~ 1"
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return [get_fit_failed_candidate_model(model_type, formula)]
result = model.fit()
# CalTrack 3.3.1.3
model_params = {"intercept": result.params["Intercept"]}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return [
CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=0,
)
] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_single_cdd_only_candidate_model( data, minimum_non_zero_cdd, minimum_total_cdd, beta_cdd_maximum_p_value, weights_col, balance_point, ):
""" Return a single candidate cdd-only model for a particular balance point. Parameters data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and ``cdd_<balance_point>`` DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_cdd : :any:`int` Minimum allowable number of non-zero cooling degree day values. minimum_total_cdd : :any:`float` Minimum allowable total sum of cooling degree day values. beta_cdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta cdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. balance_point : :any:`float` The cooling balance point for this model. Returns ------- candidate_model : :any:`CalTRACKUsagePerDayCandidateModel` A single cdd-only candidate model, with any associated warnings. """ |
model_type = "cdd_only"
cdd_column = "cdd_%s" % balance_point
formula = "meter_value ~ %s" % cdd_column
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
period_days = weights
degree_day_warnings = []
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
balance_point,
"cdd",
data[cdd_column],
period_days,
minimum_total_cdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type, balance_point, "cdd", data[cdd_column], minimum_non_zero_cdd
)
)
if len(degree_day_warnings) > 0:
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status="NOT ATTEMPTED",
warnings=degree_day_warnings,
)
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return get_fit_failed_candidate_model(model_type, formula)
result = model.fit()
r_squared_adj = result.rsquared_adj
beta_cdd_p_value = result.pvalues[cdd_column]
# CalTrack 3.3.1.3
model_params = {
"intercept": result.params["Intercept"],
"beta_cdd": result.params[cdd_column],
"cooling_balance_point": balance_point,
}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept", "beta_cdd"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_cdd_p_value,
beta_cdd_maximum_p_value,
)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=r_squared_adj,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_cdd_only_candidate_models( data, minimum_non_zero_cdd, minimum_total_cdd, beta_cdd_maximum_p_value, weights_col ):
""" Return a list of all possible candidate cdd-only models. Parameters data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and 1 to n columns with names of the form ``cdd_<balance_point>``. All columns with names of this form will be used to fit a candidate model. DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_cdd : :any:`int` Minimum allowable number of non-zero cooling degree day values. minimum_total_cdd : :any:`float` Minimum allowable total sum of cooling degree day values. beta_cdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta cdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. Returns ------- candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel` A list of cdd-only candidate models, with any associated warnings. """ |
balance_points = [int(col[4:]) for col in data.columns if col.startswith("cdd")]
candidate_models = [
get_single_cdd_only_candidate_model(
data,
minimum_non_zero_cdd,
minimum_total_cdd,
beta_cdd_maximum_p_value,
weights_col,
balance_point,
)
for balance_point in balance_points
]
return candidate_models |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_single_hdd_only_candidate_model( data, minimum_non_zero_hdd, minimum_total_hdd, beta_hdd_maximum_p_value, weights_col, balance_point, ):
""" Return a single candidate hdd-only model for a particular balance point. Parameters data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and ``hdd_<balance_point>`` DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_hdd : :any:`int` Minimum allowable number of non-zero heating degree day values. minimum_total_hdd : :any:`float` Minimum allowable total sum of heating degree day values. beta_hdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta hdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. balance_point : :any:`float` The heating balance point for this model. Returns ------- candidate_model : :any:`CalTRACKUsagePerDayCandidateModel` A single hdd-only candidate model, with any associated warnings. """ |
model_type = "hdd_only"
hdd_column = "hdd_%s" % balance_point
formula = "meter_value ~ %s" % hdd_column
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
period_days = weights
degree_day_warnings = []
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
balance_point,
"hdd",
data[hdd_column],
period_days,
minimum_total_hdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type, balance_point, "hdd", data[hdd_column], minimum_non_zero_hdd
)
)
if len(degree_day_warnings) > 0:
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status="NOT ATTEMPTED",
warnings=degree_day_warnings,
)
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return get_fit_failed_candidate_model(model_type, formula)
result = model.fit()
r_squared_adj = result.rsquared_adj
beta_hdd_p_value = result.pvalues[hdd_column]
# CalTrack 3.3.1.3
model_params = {
"intercept": result.params["Intercept"],
"beta_hdd": result.params[hdd_column],
"heating_balance_point": balance_point,
}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept", "beta_hdd"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_hdd_p_value,
beta_hdd_maximum_p_value,
)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=r_squared_adj,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_single_cdd_hdd_candidate_model( data, minimum_non_zero_cdd, minimum_non_zero_hdd, minimum_total_cdd, minimum_total_hdd, beta_cdd_maximum_p_value, beta_hdd_maximum_p_value, weights_col, cooling_balance_point, heating_balance_point, ):
""" Return and fit a single candidate cdd_hdd model for a particular selection of cooling balance point and heating balance point Parameters data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and ``hdd_<heating_balance_point>`` and ``cdd_<cooling_balance_point>`` DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_cdd : :any:`int` Minimum allowable number of non-zero cooling degree day values. minimum_non_zero_hdd : :any:`int` Minimum allowable number of non-zero heating degree day values. minimum_total_cdd : :any:`float` Minimum allowable total sum of cooling degree day values. minimum_total_hdd : :any:`float` Minimum allowable total sum of heating degree day values. beta_cdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta cdd parameter. beta_hdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta hdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. cooling_balance_point : :any:`float` The cooling balance point for this model. heating_balance_point : :any:`float` The heating balance point for this model. Returns ------- candidate_model : :any:`CalTRACKUsagePerDayCandidateModel` A single cdd-hdd candidate model, with any associated warnings. """ |
model_type = "cdd_hdd"
cdd_column = "cdd_%s" % cooling_balance_point
hdd_column = "hdd_%s" % heating_balance_point
formula = "meter_value ~ %s + %s" % (cdd_column, hdd_column)
n_days_column = None
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
period_days = weights
degree_day_warnings = []
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
cooling_balance_point,
"cdd",
data[cdd_column],
period_days,
minimum_total_cdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type,
cooling_balance_point,
"cdd",
data[cdd_column],
minimum_non_zero_cdd,
)
)
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
heating_balance_point,
"hdd",
data[hdd_column],
period_days,
minimum_total_hdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type,
heating_balance_point,
"hdd",
data[hdd_column],
minimum_non_zero_hdd,
)
)
if len(degree_day_warnings) > 0:
return CalTRACKUsagePerDayCandidateModel(
model_type, formula, "NOT ATTEMPTED", warnings=degree_day_warnings
)
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return get_fit_failed_candidate_model(model_type, formula)
result = model.fit()
r_squared_adj = result.rsquared_adj
beta_cdd_p_value = result.pvalues[cdd_column]
beta_hdd_p_value = result.pvalues[hdd_column]
# CalTrack 3.3.1.3
model_params = {
"intercept": result.params["Intercept"],
"beta_cdd": result.params[cdd_column],
"beta_hdd": result.params[hdd_column],
"cooling_balance_point": cooling_balance_point,
"heating_balance_point": heating_balance_point,
}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept", "beta_cdd", "beta_hdd"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_cdd_p_value,
beta_cdd_maximum_p_value,
)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_hdd_p_value,
beta_hdd_maximum_p_value,
)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=r_squared_adj,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_cdd_hdd_candidate_models( data, minimum_non_zero_cdd, minimum_non_zero_hdd, minimum_total_cdd, minimum_total_hdd, beta_cdd_maximum_p_value, beta_hdd_maximum_p_value, weights_col, ):
""" Return a list of candidate cdd_hdd models for a particular selection of cooling balance point and heating balance point Parameters data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and 1 to n columns each of the form ``hdd_<heating_balance_point>`` and ``cdd_<cooling_balance_point>``. DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_cdd : :any:`int` Minimum allowable number of non-zero cooling degree day values. minimum_non_zero_hdd : :any:`int` Minimum allowable number of non-zero heating degree day values. minimum_total_cdd : :any:`float` Minimum allowable total sum of cooling degree day values. minimum_total_hdd : :any:`float` Minimum allowable total sum of heating degree day values. beta_cdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta cdd parameter. beta_hdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta hdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. Returns ------- candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel` A list of cdd_hdd candidate models, with any associated warnings. """ |
cooling_balance_points = [
int(col[4:]) for col in data.columns if col.startswith("cdd")
]
heating_balance_points = [
int(col[4:]) for col in data.columns if col.startswith("hdd")
]
# CalTrack 3.2.2.1
candidate_models = [
get_single_cdd_hdd_candidate_model(
data,
minimum_non_zero_cdd,
minimum_non_zero_hdd,
minimum_total_cdd,
minimum_total_hdd,
beta_cdd_maximum_p_value,
beta_hdd_maximum_p_value,
weights_col,
cooling_balance_point,
heating_balance_point,
)
for cooling_balance_point in cooling_balance_points
for heating_balance_point in heating_balance_points
if heating_balance_point <= cooling_balance_point
]
return candidate_models |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def select_best_candidate(candidate_models):
""" Select and return the best candidate model based on r-squared and qualification. Parameters candidate_models : :any:`list` of :any:`eemeter.CalTRACKUsagePerDayCandidateModel` Candidate models to select from. Returns ------- (best_candidate, warnings) : :any:`tuple` of :any:`eemeter.CalTRACKUsagePerDayCandidateModel` or :any:`None` and :any:`list` of `eemeter.EEMeterWarning` Return the candidate model with highest r-squared or None if none meet the requirements, and a list of warnings about this selection (or lack of selection). """ |
best_r_squared_adj = -np.inf
best_candidate = None
# CalTrack 3.4.3.3
for candidate in candidate_models:
if (
candidate.status == "QUALIFIED"
and candidate.r_squared_adj > best_r_squared_adj
):
best_candidate = candidate
best_r_squared_adj = candidate.r_squared_adj
if best_candidate is None:
warnings = [
EEMeterWarning(
qualified_name="eemeter.caltrack_daily.select_best_candidate.no_candidates",
description="No qualified model candidates available.",
data={
"status_count:{}".format(status): count
for status, count in Counter(
[c.status for c in candidate_models]
).items()
},
)
]
return None, warnings
return best_candidate, [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit_caltrack_usage_per_day_model( data, fit_cdd=True, use_billing_presets=False, minimum_non_zero_cdd=10, minimum_non_zero_hdd=10, minimum_total_cdd=20, minimum_total_hdd=20, beta_cdd_maximum_p_value=1, beta_hdd_maximum_p_value=1, weights_col=None, fit_intercept_only=True, fit_cdd_only=True, fit_hdd_only=True, fit_cdd_hdd=True, ):
""" CalTRACK daily and billing methods using a usage-per-day modeling strategy. Parameters data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and 1 to n columns each of the form ``hdd_<heating_balance_point>`` and ``cdd_<cooling_balance_point>``. DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. Should have a :any:`pandas.DatetimeIndex`. fit_cdd : :any:`bool`, optional If True, fit CDD models unless overridden by ``fit_cdd_only`` or ``fit_cdd_hdd`` flags. Should be set to ``False`` for gas meter data. use_billing_presets : :any:`bool`, optional Use presets appropriate for billing models. Otherwise defaults are appropriate for daily models. minimum_non_zero_cdd : :any:`int`, optional Minimum allowable number of non-zero cooling degree day values. minimum_non_zero_hdd : :any:`int`, optional Minimum allowable number of non-zero heating degree day values. minimum_total_cdd : :any:`float`, optional Minimum allowable total sum of cooling degree day values. minimum_total_hdd : :any:`float`, optional Minimum allowable total sum of heating degree day values. beta_cdd_maximum_p_value : :any:`float`, optional The maximum allowable p-value of the beta cdd parameter. The default value is the most permissive possible (i.e., 1). This is here for backwards compatibility with CalTRACK 1.0 methods. beta_hdd_maximum_p_value : :any:`float`, optional The maximum allowable p-value of the beta hdd parameter. The default value is the most permissive possible (i.e., 1). This is here for backwards compatibility with CalTRACK 1.0 methods. weights_col : :any:`str` or None, optional The name of the column (if any) in ``data`` to use as weights. Weight must be the number of days of data in the period. fit_intercept_only : :any:`bool`, optional If True, fit and consider intercept_only model candidates. fit_cdd_only : :any:`bool`, optional If True, fit and consider cdd_only model candidates. Ignored if ``fit_cdd=False``. fit_hdd_only : :any:`bool`, optional If True, fit and consider hdd_only model candidates. fit_cdd_hdd : :any:`bool`, optional If True, fit and consider cdd_hdd model candidates. Ignored if ``fit_cdd=False``. Returns ------- model_results : :any:`eemeter.CalTRACKUsagePerDayModelResults` Results of running CalTRACK daily method. See :any:`eemeter.CalTRACKUsagePerDayModelResults` for more details. """ |
if use_billing_presets:
# CalTrack 3.2.2.2.1
minimum_non_zero_cdd = 0
minimum_non_zero_hdd = 0
# CalTrack 3.2.2.2.2
minimum_total_cdd = 20
minimum_total_hdd = 20
# CalTrack 3.4.2
if weights_col is None:
raise ValueError(
"If using billing presets, the weights_col argument must be specified."
)
interval = "billing"
else:
interval = "daily"
# cleans data to fully NaN rows that have missing temp or meter data
data = overwrite_partial_rows_with_nan(data)
if data.dropna().empty:
return CalTRACKUsagePerDayModelResults(
status="NO DATA",
method_name="caltrack_usage_per_day",
warnings=[
EEMeterWarning(
qualified_name="eemeter.caltrack_usage_per_day.no_data",
description=("No data available. Cannot fit model."),
data={},
)
],
)
# collect all candidate results, then validate all at once
# CalTrack 3.4.3.1
candidates = []
if fit_intercept_only:
candidates.extend(
get_intercept_only_candidate_models(data, weights_col=weights_col)
)
if fit_hdd_only:
candidates.extend(
get_hdd_only_candidate_models(
data=data,
minimum_non_zero_hdd=minimum_non_zero_hdd,
minimum_total_hdd=minimum_total_hdd,
beta_hdd_maximum_p_value=beta_hdd_maximum_p_value,
weights_col=weights_col,
)
)
# cdd models ignored for gas
if fit_cdd:
if fit_cdd_only:
candidates.extend(
get_cdd_only_candidate_models(
data=data,
minimum_non_zero_cdd=minimum_non_zero_cdd,
minimum_total_cdd=minimum_total_cdd,
beta_cdd_maximum_p_value=beta_cdd_maximum_p_value,
weights_col=weights_col,
)
)
if fit_cdd_hdd:
candidates.extend(
get_cdd_hdd_candidate_models(
data=data,
minimum_non_zero_cdd=minimum_non_zero_cdd,
minimum_non_zero_hdd=minimum_non_zero_hdd,
minimum_total_cdd=minimum_total_cdd,
minimum_total_hdd=minimum_total_hdd,
beta_cdd_maximum_p_value=beta_cdd_maximum_p_value,
beta_hdd_maximum_p_value=beta_hdd_maximum_p_value,
weights_col=weights_col,
)
)
# find best candidate result
best_candidate, candidate_warnings = select_best_candidate(candidates)
warnings = candidate_warnings
if best_candidate is None:
status = "NO MODEL"
r_squared_adj = None
else:
status = "SUCCESS"
r_squared_adj = best_candidate.r_squared_adj
model_result = CalTRACKUsagePerDayModelResults(
status=status,
method_name="caltrack_usage_per_day",
interval=interval,
model=best_candidate,
candidates=candidates,
r_squared_adj=r_squared_adj,
warnings=warnings,
settings={
"fit_cdd": fit_cdd,
"minimum_non_zero_cdd": minimum_non_zero_cdd,
"minimum_non_zero_hdd": minimum_non_zero_hdd,
"minimum_total_cdd": minimum_total_cdd,
"minimum_total_hdd": minimum_total_hdd,
"beta_cdd_maximum_p_value": beta_cdd_maximum_p_value,
"beta_hdd_maximum_p_value": beta_hdd_maximum_p_value,
},
)
if best_candidate is not None:
if best_candidate.model_type in ["cdd_hdd"]:
num_parameters = 2
elif best_candidate.model_type in ["hdd_only", "cdd_only"]:
num_parameters = 1
else:
num_parameters = 0
predicted_avgs = _caltrack_predict_design_matrix(
best_candidate.model_type,
best_candidate.model_params,
data,
input_averages=True,
output_averages=True,
)
model_result.avgs_metrics = ModelMetrics(
data.meter_value, predicted_avgs, num_parameters
)
predicted_totals = _caltrack_predict_design_matrix(
best_candidate.model_type,
best_candidate.model_params,
data,
input_averages=True,
output_averages=False,
)
days_per_period = day_counts(data.index)
data_totals = data.meter_value * days_per_period
model_result.totals_metrics = ModelMetrics(
data_totals, predicted_totals, num_parameters
)
return model_result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_caltrack_candidate( candidate, best=False, ax=None, title=None, figsize=None, temp_range=None, alpha=None, **kwargs ):
""" Plot a CalTRACK candidate model. Parameters candidate : :any:`eemeter.CalTRACKUsagePerDayCandidateModel` A candidate model with a predict function. best : :any:`bool`, optional Whether this is the best candidate or not. ax : :any:`matplotlib.axes.Axes`, optional Existing axes to plot on. title : :any:`str`, optional Chart title. figsize : :any:`tuple`, optional (width, height) of chart. temp_range : :any:`tuple`, optional (min, max) temperatures to plot model. alpha : :any:`float` between 0 and 1, optional Transparency, 0 fully transparent, 1 fully opaque. **kwargs Keyword arguments for :any:`matplotlib.axes.Axes.plot` Returns ------- ax : :any:`matplotlib.axes.Axes` Matplotlib axes. """ |
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
if figsize is None:
figsize = (10, 4)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if candidate.status == "QUALIFIED":
color = "C2"
elif candidate.status == "DISQUALIFIED":
color = "C3"
else:
return
if best:
color = "C1"
alpha = 1
temp_min, temp_max = (30, 90) if temp_range is None else temp_range
temps = np.arange(temp_min, temp_max)
data = {"n_days": np.ones(temps.shape)}
prediction_index = pd.date_range(
"2017-01-01T00:00:00Z", periods=len(temps), freq="D"
)
temps_hourly = pd.Series(temps, index=prediction_index).resample("H").ffill()
prediction = candidate.predict(
prediction_index, temps_hourly, "daily"
).result.predicted_usage
plot_kwargs = {"color": color, "alpha": alpha or 0.3}
plot_kwargs.update(kwargs)
ax.plot(temps, prediction, **plot_kwargs)
if title is not None:
ax.set_title(title)
return ax |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot( self, ax=None, title=None, figsize=None, with_candidates=False, candidate_alpha=None, temp_range=None, ):
""" Plot a model fit. Parameters ax : :any:`matplotlib.axes.Axes`, optional Existing axes to plot on. title : :any:`str`, optional Chart title. figsize : :any:`tuple`, optional (width, height) of chart. with_candidates : :any:`bool` If True, also plot candidate models. candidate_alpha : :any:`float` between 0 and 1 Transparency at which to plot candidate models. 0 fully transparent, 1 fully opaque. Returns ------- ax : :any:`matplotlib.axes.Axes` Matplotlib axes. """ |
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
if figsize is None:
figsize = (10, 4)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if temp_range is None:
temp_range = (20, 90)
if with_candidates:
for candidate in self.candidates:
candidate.plot(ax=ax, temp_range=temp_range, alpha=candidate_alpha)
self.model.plot(ax=ax, best=True, temp_range=temp_range)
if title is not None:
ax.set_title(title)
return ax |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_time_series(meter_data, temperature_data, **kwargs):
""" Plot meter and temperature data in dual-axes time series. Parameters meter_data : :any:`pandas.DataFrame` A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``. temperature_data : :any:`pandas.Series` A :any:`pandas.DatetimeIndex`-indexed Series of temperature data. **kwargs Arbitrary keyword arguments to pass to :any:`plt.subplots <matplotlib.pyplot.subplots>` Returns ------- axes : :any:`tuple` of :any:`matplotlib.axes.Axes` Tuple of ``(ax_meter_data, ax_temperature_data)``. """ |
# TODO(philngo): include image in docs.
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
default_kwargs = {"figsize": (16, 4)}
default_kwargs.update(kwargs)
fig, ax1 = plt.subplots(**default_kwargs)
ax1.plot(
meter_data.index,
meter_data.value,
color="C0",
label="Energy Use",
drawstyle="steps-post",
)
ax1.set_ylabel("Energy Use")
ax2 = ax1.twinx()
ax2.plot(
temperature_data.index,
temperature_data,
color="C1",
label="Temperature",
alpha=0.8,
)
ax2.set_ylabel("Temperature")
fig.legend()
return ax1, ax2 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_energy_signature( meter_data, temperature_data, temp_col=None, ax=None, title=None, figsize=None, **kwargs ):
""" Plot meter and temperature data in energy signature. Parameters meter_data : :any:`pandas.DataFrame` A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``. temperature_data : :any:`pandas.Series` A :any:`pandas.DatetimeIndex`-indexed Series of temperature data. temp_col : :any:`str`, default ``'temperature_mean'`` The name of the temperature column. ax : :any:`matplotlib.axes.Axes` The axis on which to plot. title : :any:`str`, optional Chart title. figsize : :any:`tuple`, optional (width, height) of chart. **kwargs Arbitrary keyword arguments to pass to :any:`matplotlib.axes.Axes.scatter`. Returns ------- ax : :any:`matplotlib.axes.Axes` Matplotlib axes. """ |
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
# format data
temperature_mean = compute_temperature_features(meter_data.index, temperature_data)
usage_per_day = compute_usage_per_day_feature(meter_data, series_name="meter_value")
df = merge_features([usage_per_day, temperature_mean.temperature_mean])
if figsize is None:
figsize = (10, 4)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if temp_col is None:
temp_col = "temperature_mean"
ax.scatter(df[temp_col], df.meter_value, **kwargs)
ax.set_xlabel("Temperature")
ax.set_ylabel("Energy Use per Day")
if title is not None:
ax.set_title(title)
return ax |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def meter_data_from_csv( filepath_or_buffer, tz=None, start_col="start", value_col="value", gzipped=False, freq=None, **kwargs ):
""" Load meter data from a CSV file. Default format:: start,value 2017-01-01T00:00:00+00:00,0.31 2017-01-02T00:00:00+00:00,0.4 2017-01-03T00:00:00+00:00,0.58 Parameters filepath_or_buffer : :any:`str` or file-handle File path or object. tz : :any:`str`, optional E.g., ``'UTC'`` or ``'US/Pacific'`` start_col : :any:`str`, optional, default ``'start'`` Date period start column. value_col : :any:`str`, optional, default ``'value'`` Value column, can be in any unit. gzipped : :any:`bool`, optional Whether file is gzipped. freq : :any:`str`, optional If given, apply frequency to data using :any:`pandas.DataFrame.resample`. **kwargs Extra keyword arguments to pass to :any:`pandas.read_csv`, such as ``sep='|'``. """ |
read_csv_kwargs = {
"usecols": [start_col, value_col],
"dtype": {value_col: np.float64},
"parse_dates": [start_col],
"index_col": start_col,
}
if gzipped:
read_csv_kwargs.update({"compression": "gzip"})
# allow passing extra kwargs
read_csv_kwargs.update(kwargs)
df = pd.read_csv(filepath_or_buffer, **read_csv_kwargs).tz_localize("UTC")
if tz is not None:
df = df.tz_convert(tz)
if freq == "hourly":
df = df.resample("H").sum()
elif freq == "daily":
df = df.resample("D").sum()
return df |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def temperature_data_from_csv( filepath_or_buffer, tz=None, date_col="dt", temp_col="tempF", gzipped=False, freq=None, **kwargs ):
""" Load temperature data from a CSV file. Default format:: dt,tempF 2017-01-01T00:00:00+00:00,21 2017-01-01T01:00:00+00:00,22.5 2017-01-01T02:00:00+00:00,23.5 Parameters filepath_or_buffer : :any:`str` or file-handle File path or object. tz : :any:`str`, optional E.g., ``'UTC'`` or ``'US/Pacific'`` date_col : :any:`str`, optional, default ``'dt'`` Date period start column. temp_col : :any:`str`, optional, default ``'tempF'`` Temperature column. gzipped : :any:`bool`, optional Whether file is gzipped. freq : :any:`str`, optional If given, apply frequency to data using :any:`pandas.Series.resample`. **kwargs Extra keyword arguments to pass to :any:`pandas.read_csv`, such as ``sep='|'``. """ |
read_csv_kwargs = {
"usecols": [date_col, temp_col],
"dtype": {temp_col: np.float64},
"parse_dates": [date_col],
"index_col": date_col,
}
if gzipped:
read_csv_kwargs.update({"compression": "gzip"})
# allow passing extra kwargs
read_csv_kwargs.update(kwargs)
if tz is None:
tz = "UTC"
df = pd.read_csv(filepath_or_buffer, **read_csv_kwargs).tz_localize(tz)
if freq == "hourly":
df = df.resample("H").sum()
return df[temp_col] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def meter_data_from_json(data, orient="list"):
""" Load meter data from json. Default format:: [ ['2017-01-01T00:00:00+00:00', 3.5], ['2017-02-01T00:00:00+00:00', 0.4], ['2017-03-01T00:00:00+00:00', 0.46], ] Parameters data : :any:`list` List elements are each a rows of data. Returns ------- df : :any:`pandas.DataFrame` DataFrame with a single column (``'value'``) and a :any:`pandas.DatetimeIndex`. """ |
if orient == "list":
df = pd.DataFrame(data, columns=["start", "value"])
df["start"] = pd.DatetimeIndex(df.start).tz_localize("UTC")
df = df.set_index("start")
return df
else:
raise ValueError("orientation not recognized.") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def notify(self):
"""Notify the client. The function passed to ``App.respond`` will get called. """ |
if flask.has_request_context():
emit(_NAME + str(self._uuid))
else:
sio = flask.current_app.extensions['socketio']
sio.emit(_NAME + str(self._uuid))
eventlet.sleep() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(key):
"""Check that the key is a string or bytestring. That's the only valid type of key. """ |
if not isinstance(key, (str, bytes)):
raise KeyError('Key must be of type str or bytes, found type {}'.format(type(key))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pack(x: Any) -> bytes: """Encode ``x`` into msgpack with additional encoders.""" |
try:
return msgpack.packb(x, default=encoders)
except TypeError as exc:
message = ('Serialization error, check the data passed to a do_ command. '
'Cannot serialize this object:\n') + str(exc)[16:]
raise SerializationError(message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_event(event: Callable) -> Callable: """Create an event from a method signature.""" |
@property # type: ignore
@wraps(event)
def actualevent(self): # pylint: disable=missing-docstring
name = event.__name__[3:]
try:
# the getter post processing function
# is preserved with an underscore
getter = event(self).__name__
except AttributeError:
getter = None
return Event(name, self._uuid, getter) # pylint: disable=protected-access
return actualevent |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _insert(wrap: str, tag: Optional[str]) -> str: """Insert the component tag into the wrapper html. This ignores other tags already created like ``{socket}``. https://stackoverflow.com/a/11284026/744520 """ |
if tag is None:
raise ValueError('tag cannot be None')
formatter = string.Formatter()
mapping = FormatDict(component=tag)
return formatter.vformat(wrap, (), mapping) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_options(self, labels, values):
"""Replace the drop down fields. Parameters labels : array-like List of strings which will be visible to the user. values : array-like List of values associated with the labels that are hidden from the user. Returns ------- None """ |
return [dict(label=l, value=v) for l, v in zip(labels, values)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_options(self, labels: Sequence[str], values: Sequence[Union[str, int]]) -> Sequence[Dict]: """Replace the checkbox options. Parameters labels : array-like List of strings which will be visible to the user. values : array-like List of values associated with the labels that are hidden from the user. Returns ------- None """ |
return [{'label': label, 'value': value} for label, value in zip(labels, values)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_options(self, labels, values):
"""Replace the radio button options. Parameters labels : Sequence List of strings which will be visible to the user. values : Sequence List of values associated with the labels that are hidden from the user. Returns ------- None """ |
return [{'label': label, 'value': value} for label, value in zip(labels, values)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def node_version():
"""Get node version.""" |
version = check_output(('node', '--version'))
return tuple(int(x) for x in version.strip()[1:].split(b'.')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Invoke the function repeatedly on a timer.""" |
ret = eventlet.spawn(self.context(self.func))
eventlet.sleep(self.seconds)
try:
ret.wait()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
self.thread = eventlet.spawn(self.run) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def overlap(self, other: 'Span'):
"""Detect if two spans overlap.""" |
return not (
# if one rectangle is left of other
other.column_end <= self.column_start
or self.column_end <= other.column_start
# if one rectangle is above other
or other.row_end <= self.row_start
or self.row_end <= other.row_start
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cells(self) -> Generator[Tuple[int, int], None, None]: """Generate cells in span.""" |
yield from itertools.product(
range(self.row_start, self.row_end),
range(self.column_start, self.column_end)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pixels(self, value: float) -> 'Size': """Set the size in pixels.""" |
raise_not_number(value)
self.maximum = '{}px'.format(value)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def min_pixels(self, value: float) -> 'Size': """Set the minimum size in pixels.""" |
raise_not_number(value)
self.minimum = '{}px'.format(value)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ems(self, value: float) -> 'Size': """Set the size in ems.""" |
raise_not_number(value)
self.maximum = '{}em'.format(value)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def min_ems(self, value: float) -> 'Size': """Set the minimum size in ems.""" |
raise_not_number(value)
self.minimum = '{}em'.format(value)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fraction(self, value: float) -> 'Size': """Set the fraction of free space to use.""" |
raise_not_number(value)
self.maximum = '{}fr'.format(value)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def percent(self, value: float) -> 'Size': """Set the percentage of free space to use.""" |
raise_not_number(value)
self.maximum = '{}%'.format(value)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def min_percent(self, value: float) -> 'Size': """Set the minimum percentage of free space to use.""" |
raise_not_number(value)
self.minimum = '{}%'.format(value)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pixels(self, value: int) -> 'Gap': """Set the margin in pixels.""" |
raise_not_number(value)
self.gap = '{}px'.format(value)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ems(self, value: int) -> 'Gap': """Set the margin in ems.""" |
raise_not_number(value)
self.gap = '{}em'.format(value)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def percent(self, value) -> 'Gap': """Set the margin as a percentage.""" |
raise_not_number(value)
self.gap = '{}%'.format(value)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, component: Union[Component, Sequence[Component]]) -> None: """Add a widget to the grid in the next available cell. Searches over columns then rows for available cells. Parameters components : bowtie._Component A Bowtie widget instance. """ |
try:
self[Span(*self._available_cell())] = component
except NoUnusedCellsError:
span = list(self._spans.keys())[-1]
self._spans[span] += component |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _available_cell(self) -> Tuple[int, int]: """Find next available cell first by row then column. First, construct a set containing all cells. Then iterate over the spans and remove occupied cells. """ |
cells = set(itertools.product(range(len(self.rows)), range(len(self.columns))))
for span in self._spans:
for cell in span.cells:
cells.remove(cell)
if not cells:
raise NoUnusedCellsError('No available cells')
return min(cells) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_sidebar(self, component: Component) -> None: """Add a widget to the sidebar. Parameters component : bowtie._Component Add this component to the sidebar, it will be appended to the end. """ |
if not self.sidebar:
raise NoSidebarError('Set `sidebar=True` if you want to use the sidebar.')
if not isinstance(component, Component):
raise ValueError('component must be Component type, found {}'.format(component))
# self._track_widget(widget)
self._controllers.append(component) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_route(self, view: View, path: str, exact: bool = True) -> None: """Add a view to the app. Parameters view : View path : str exact : bool, optional """ |
if path[0] != '/':
path = '/' + path
for route in self._routes:
assert path != route.path, 'Cannot use the same path twice'
self._routes.append(Route(view=view, path=path, exact=exact))
self.app.add_url_rule(
path, path[1:], lambda: render_template('bowtie.html', title=self.title)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def subscribe(self, *events: Union[Event, Pager]) -> Callable: """Call a function in response to an event. If more than one event is given, `func` will be given as many arguments as there are events. If the pager calls notify, the decorated function will be called. Parameters *event : event or pager Bowtie event, must have at least one. Examples -------- Subscribing a function to multiple events. Using the pager to run a callback function. """ |
try:
first_event = events[0]
except IndexError:
raise IndexError('Must subscribe to at least one event.')
if len(events) != len(set(events)):
raise ValueError(
'Subscribed to the same event multiple times. All events must be unique.'
)
if len(events) > 1:
# check if we are using any non stateful events
for event in events:
if isinstance(event, Pager):
raise NotStatefulEvent('Pagers must be subscribed by itself.')
if event.getter is None:
raise NotStatefulEvent(
f'{event.uuid}.on_{event.name} is not a stateful event. '
'It must be used alone.'
)
def decorator(func: Callable) -> Callable:
"""Handle three types of events: pages, uploads, and normal events."""
if isinstance(first_event, Pager):
self._pages[first_event] = func
elif first_event.name == 'upload':
if first_event.uuid in self._uploads:
warnings.warn(
('Overwriting function "{func1}" with function '
'"{func2}" for upload object "{obj}".').format(
func1=self._uploads[first_event.uuid],
func2=func.__name__,
obj=COMPONENT_REGISTRY[first_event.uuid]
), Warning)
self._uploads[first_event.uuid] = func
else:
for event in events:
# need to have `events` here to maintain order of arguments
# not sure how to deal with mypy typing errors on events so ignoring
self._subscriptions[event].append((events, func)) # type: ignore
return func
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def schedule(self, seconds: float):
"""Call a function periodically. Parameters seconds : float Minimum interval of function calls. func : callable Function to be called. """ |
def wrap(func: Callable):
self._schedules.append(Scheduler(self.app, seconds, func))
return wrap |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _build(self, notebook: Optional[str] = None) -> None: """Compile the Bowtie application.""" |
if node_version() < _MIN_NODE_VERSION:
raise WebpackError(
f'Webpack requires at least version {_MIN_NODE_VERSION} of Node, '
f'found version {node_version}.'
)
packages = self._write_templates()
for filename in ['package.json', 'webpack.prod.js', 'webpack.dev.js']:
if not (self._build_dir / filename).is_file():
sourcefile = self._package_dir / 'src' / filename
shutil.copy(sourcefile, self._build_dir)
if self._run(['yarn', '--ignore-engines', 'install'], notebook=notebook) > 1:
raise YarnError('Error installing node packages')
if packages:
installed = self._installed_packages()
new_packages = [x for x in packages if x.split('@')[0] not in installed]
if new_packages:
retval = self._run(
['yarn', '--ignore-engines', 'add'] + new_packages, notebook=notebook
)
if retval > 1:
raise YarnError('Error installing node packages')
elif retval == 1:
print('Yarn error but trying to continue build')
retval = self._run([_WEBPACK, '--config', 'webpack.dev.js'], notebook=notebook)
if retval != 0:
raise WebpackError('Error building with webpack') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _installed_packages(self) -> Generator[str, None, None]: """Extract installed packages as list from `package.json`.""" |
with (self._build_dir / 'package.json').open('r') as f:
packages = json.load(f)
yield from packages['dependencies'].keys() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_jspath(self) -> Path: """Create the source directory for the build.""" |
src = self._build_dir / 'bowtiejs'
os.makedirs(src, exist_ok=True)
return src |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _run(self, command: List[str], notebook: Optional[str] = None) -> int: """Run command from terminal and notebook and view output from subprocess.""" |
if notebook is None:
return Popen(command, cwd=self._build_dir).wait()
cmd = Popen(command, cwd=self._build_dir, stdout=PIPE, stderr=STDOUT)
while True:
line = cmd.stdout.readline()
if line == b'' and cmd.poll() is not None:
return cmd.poll()
print(line.decode('utf-8'), end='')
raise Exception() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def before_request(self) -> Optional[Response]: """Determine if a user is allowed to view this route.""" |
auth = request.authorization
if not auth or not self._check_auth(auth.username, auth.password):
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'}
)
session['logged_in'] = auth.username
# pylint wants this return statement
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_notebook(fullname: str):
"""Import a notebook as a module.""" |
shell = InteractiveShell.instance()
path = fullname
# load the notebook object
with open(path, 'r', encoding='utf-8') as f:
notebook = read(f, 4)
# create the module and add it to sys.modules
mod = types.ModuleType(fullname)
mod.__file__ = path
# mod.__loader__ = self
mod.__dict__['get_ipython'] = get_ipython
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = shell.user_ns
shell.user_ns = mod.__dict__
try:
for cell in notebook.cells:
if cell.cell_type == 'code':
try:
# only run valid python code
ast.parse(cell.source)
except SyntaxError:
continue
try:
# pylint: disable=exec-used
exec(cell.source, mod.__dict__)
except NameError:
print(cell.source)
raise
finally:
shell.user_ns = save_user_ns
return mod |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _message(status, content):
"""Send message interface. Parameters status : str The type of message content : str """ |
event = f'message.{status}'
if flask.has_request_context():
emit(event, dict(data=pack(content)))
else:
sio = flask.current_app.extensions['socketio']
sio.emit(event, dict(data=pack(content)))
eventlet.sleep() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _make_columns(columns: List[Union[int, str]]) -> List[Dict]: """Transform list of columns into AntTable format.""" |
return [dict(title=str(c),
dataIndex=str(c),
key=str(c))
for c in columns] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _make_data(data) -> Tuple[List[Dict], List[Dict]]: """Transform table data into JSON.""" |
jsdata = []
for idx, row in data.iterrows():
row.index = row.index.astype(str)
rdict = row.to_dict()
rdict.update(dict(key=str(idx)))
jsdata.append(rdict)
return jsdata, Table._make_columns(data.columns) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def toParallelTargets(suite, targets):
""" Produce a list of targets which should be tested in parallel. For the most part this will be a list of test modules. The exception is when a dotted name representing something more granular than a module was input (like an individal test case or test method) """ |
targets = filter(lambda x: x != '.', targets)
# First, convert the suite to a proto test list - proto tests nicely
# parse things like the fully dotted name of the test and the
# finest-grained module it belongs to, which simplifies our job.
proto_test_list = toProtoTestList(suite)
# Extract a list of the modules that all of the discovered tests are in
modules = set([x.module for x in proto_test_list])
# Get the list of user-specified targets that are NOT modules
non_module_targets = []
for target in targets:
if not list(filter(None, [target in x for x in modules])):
non_module_targets.append(target)
# Main loop -- iterating through all loaded test methods
parallel_targets = []
for test in proto_test_list:
found = False
for target in non_module_targets:
# target is a dotted name of either a test case or test method
# here test.dotted name is always a dotted name of a method
if (target in test.dotted_name):
if target not in parallel_targets:
# Explicitly specified targets get their own entry to
# run parallel to everything else
parallel_targets.append(target)
found = True
break
if found:
continue
# This test does not appear to be part of a specified target, so
# its entire module must have been discovered, so just add the
# whole module to the list if we haven't already.
if test.module not in parallel_targets:
parallel_targets.append(test.module)
return parallel_targets |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getConfig(filepath=None):
# pragma: no cover """ Get the Green config file settings. All available config files are read. If settings are in multiple configs, the last value encountered wins. Values specified on the command-line take precedence over all config file settings. Returns: A ConfigParser object. """ |
parser = configparser.ConfigParser()
filepaths = []
# Lowest priority goes first in the list
home = os.getenv("HOME")
if home:
default_filepath = os.path.join(home, ".green")
if os.path.isfile(default_filepath):
filepaths.append(default_filepath)
# Low priority
env_filepath = os.getenv("GREEN_CONFIG")
if env_filepath and os.path.isfile(env_filepath):
filepaths.append(env_filepath)
# Medium priority
for cfg_file in ("setup.cfg", ".green"):
cwd_filepath = os.path.join(os.getcwd(), cfg_file)
if os.path.isfile(cwd_filepath):
filepaths.append(cwd_filepath)
# High priority
if filepath and os.path.isfile(filepath):
filepaths.append(filepath)
if filepaths:
global files_loaded
files_loaded = filepaths
# Python 3 has parser.read_file(iterator) while Python2 has
# parser.readfp(obj_with_readline)
read_func = getattr(parser, 'read_file', getattr(parser, 'readfp'))
for filepath in filepaths:
# Users are expected to put a [green] section
# only if they use setup.cfg
if filepath.endswith('setup.cfg'):
with open(filepath) as f:
read_func(f)
else:
read_func(ConfigFile(filepath))
return parser |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def debug(message, level=1):
""" So we can tune how much debug output we get when we turn it on. """ |
if level <= debug_level:
logging.debug(' ' * (level - 1) * 2 + str(message)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(suite, stream, args, testing=False):
""" Run the given test case or test suite with the specified arguments. Any args.stream passed in will be wrapped in a GreenStream """ |
if not issubclass(GreenStream, type(stream)):
stream = GreenStream(stream, disable_windows=args.disable_windows,
disable_unidecode=args.disable_unidecode)
result = GreenTestResult(args, stream)
# Note: Catching SIGINT isn't supported by Python on windows (python
# "WONTFIX" issue 18040)
installHandler()
registerResult(result)
with warnings.catch_warnings():
if args.warnings: # pragma: no cover
# if args.warnings is set, use it to filter all the warnings
warnings.simplefilter(args.warnings)
# if the filter is 'default' or 'always', special-case the
# warnings from the deprecated unittest methods to show them
# no more than once per module, because they can be fairly
# noisy. The -Wd and -Wa flags can be used to bypass this
# only when args.warnings is None.
if args.warnings in ['default', 'always']:
warnings.filterwarnings('module',
category=DeprecationWarning,
message='Please use assert\w+ instead.')
result.startTestRun()
pool = LoggingDaemonlessPool(processes=args.processes or None,
initializer=InitializerOrFinalizer(args.initializer),
finalizer=InitializerOrFinalizer(args.finalizer))
manager = multiprocessing.Manager()
targets = [(target, manager.Queue())
for target in toParallelTargets(suite, args.targets)]
if targets:
for index, (target, queue) in enumerate(targets):
if args.run_coverage:
coverage_number = index + 1
else:
coverage_number = None
debug("Sending {} to runner {}".format(target, poolRunner))
pool.apply_async(
poolRunner,
(target, queue, coverage_number, args.omit_patterns, args.cov_config_file))
pool.close()
for target, queue in targets:
abort = False
while True:
msg = queue.get()
# Sentinel value, we're done
if not msg:
break
else:
# Result guaranteed after this message, we're
# currently waiting on this test, so print out
# the white 'processing...' version of the output
result.startTest(msg)
proto_test_result = queue.get()
result.addProtoTestResult(proto_test_result)
if result.shouldStop:
abort = True
break
if abort:
break
pool.close()
pool.join()
result.stopTestRun()
removeResult(result)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def solve_kkt_ir(Q, D, G, A, rx, rs, rz, ry, niter=1):
"""Inefficient iterative refinement.""" |
nineq, nz, neq, nBatch = get_sizes(G, A)
eps = 1e-7
Q_tilde = Q + eps * torch.eye(nz).type_as(Q).repeat(nBatch, 1, 1)
D_tilde = D + eps * torch.eye(nineq).type_as(Q).repeat(nBatch, 1, 1)
dx, ds, dz, dy = factor_solve_kkt_reg(
Q_tilde, D_tilde, G, A, rx, rs, rz, ry, eps)
res = kkt_resid_reg(Q, D, G, A, eps,
dx, ds, dz, dy, rx, rs, rz, ry)
resx, ress, resz, resy = res
res = resx
for k in range(niter):
ddx, dds, ddz, ddy = factor_solve_kkt_reg(Q_tilde, D_tilde, G, A, -resx, -ress, -resz,
-resy if resy is not None else None,
eps)
dx, ds, dz, dy = [v + dv if v is not None else None
for v, dv in zip((dx, ds, dz, dy), (ddx, dds, ddz, ddy))]
res = kkt_resid_reg(Q, D, G, A, eps,
dx, ds, dz, dy, rx, rs, rz, ry)
resx, ress, resz, resy = res
# res = torch.cat(resx)
res = resx
return dx, ds, dz, dy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def timeout(seconds=None, use_signals=True, timeout_exception=TimeoutError, exception_message=None):
"""Add a timeout parameter to a function and return it. :param seconds: optional time limit in seconds or fractions of a second. If None is passed, no timeout is applied. This adds some flexibility to the usage: you can disable timing out depending on the settings. :type seconds: float :param use_signals: flag indicating whether signals should be used for timing function out or the multiprocessing When using multiprocessing, timeout granularity is limited to 10ths of a second. :type use_signals: bool :raises: TimeoutError if time limit is reached It is illegal to pass anything other than a function as the first parameter. The function is wrapped and returned to the caller. """ |
def decorate(function):
if not seconds:
return function
if use_signals:
def handler(signum, frame):
_raise_exception(timeout_exception, exception_message)
@wraps(function)
def new_function(*args, **kwargs):
new_seconds = kwargs.pop('timeout', seconds)
if new_seconds:
old = signal.signal(signal.SIGALRM, handler)
signal.setitimer(signal.ITIMER_REAL, new_seconds)
try:
return function(*args, **kwargs)
finally:
if new_seconds:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old)
return new_function
else:
@wraps(function)
def new_function(*args, **kwargs):
timeout_wrapper = _Timeout(function, timeout_exception, exception_message, seconds)
return timeout_wrapper(*args, **kwargs)
return new_function
return decorate |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _target(queue, function, *args, **kwargs):
"""Run a function with arguments and return output via a queue. This is a helper function for the Process created in _Timeout. It runs the function with positional arguments and keyword arguments and then returns the function's output by way of a queue. If an exception gets raised, it is returned to _Timeout to be raised by the value property. """ |
try:
queue.put((True, function(*args, **kwargs)))
except:
queue.put((False, sys.exc_info()[1])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cancel(self):
"""Terminate any possible execution of the embedded function.""" |
if self.__process.is_alive():
self.__process.terminate()
_raise_exception(self.__timeout_exception, self.__exception_message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ready(self):
"""Read-only property indicating status of "value" property.""" |
if self.__timeout < time.time():
self.cancel()
return self.__queue.full() and not self.__queue.empty() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def value(self):
"""Read-only property containing data returned from function.""" |
if self.ready is True:
flag, load = self.__queue.get()
if flag:
return load
raise load |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bbox2path(xmin, xmax, ymin, ymax):
"""Converts a bounding box 4-tuple to a Path object.""" |
b = Line(xmin + 1j*ymin, xmax + 1j*ymin)
t = Line(xmin + 1j*ymax, xmax + 1j*ymax)
r = Line(xmax + 1j*ymin, xmax + 1j*ymax)
l = Line(xmin + 1j*ymin, xmin + 1j*ymax)
return Path(b, r, t.reversed(), l.reversed()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transform(curve, tf):
"""Transforms the curve by the homogeneous transformation matrix tf""" |
def to_point(p):
return np.array([[p.real], [p.imag], [1.0]])
def to_vector(z):
return np.array([[z.real], [z.imag], [0.0]])
def to_complex(v):
return v.item(0) + 1j * v.item(1)
if isinstance(curve, Path):
return Path(*[transform(segment, tf) for segment in curve])
elif is_bezier_segment(curve):
return bpoints2bezier([to_complex(tf.dot(to_point(p)))
for p in curve.bpoints()])
elif isinstance(curve, Arc):
new_start = to_complex(tf.dot(to_point(curve.start)))
new_end = to_complex(tf.dot(to_point(curve.end)))
new_radius = to_complex(tf.dot(to_vector(curve.radius)))
return Arc(new_start, radius=new_radius, rotation=curve.rotation,
large_arc=curve.large_arc, sweep=curve.sweep, end=new_end)
else:
raise TypeError("Input `curve` should be a Path, Line, "
"QuadraticBezier, CubicBezier, or Arc object.") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bezier_unit_tangent(seg, t):
"""Returns the unit tangent of the segment at t. Notes ----- If you receive a RuntimeWarning, try the following: This can be undone with: """ |
assert 0 <= t <= 1
dseg = seg.derivative(t)
# Note: dseg might be numpy value, use np.seterr(invalid='raise')
try:
unit_tangent = dseg/abs(dseg)
except (ZeroDivisionError, FloatingPointError):
# This may be a removable singularity, if so we just need to compute
# the limit.
# Note: limit{{dseg / abs(dseg)} = sqrt(limit{dseg**2 / abs(dseg)**2})
dseg_poly = seg.poly().deriv()
dseg_abs_squared_poly = (real(dseg_poly) ** 2 +
imag(dseg_poly) ** 2)
try:
unit_tangent = csqrt(rational_limit(dseg_poly**2,
dseg_abs_squared_poly, t))
except ValueError:
bef = seg.poly().deriv()(t - 1e-4)
aft = seg.poly().deriv()(t + 1e-4)
mes = ("Unit tangent appears to not be well-defined at "
"t = {}, \n".format(t) +
"seg.poly().deriv()(t - 1e-4) = {}\n".format(bef) +
"seg.poly().deriv()(t + 1e-4) = {}".format(aft))
raise ValueError(mes)
return unit_tangent |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def segment_curvature(self, t, use_inf=False):
"""returns the curvature of the segment at t. Notes ----- If you receive a RuntimeWarning, run command This can be undone with """ |
dz = self.derivative(t)
ddz = self.derivative(t, n=2)
dx, dy = dz.real, dz.imag
ddx, ddy = ddz.real, ddz.imag
old_np_seterr = np.seterr(invalid='raise')
try:
kappa = abs(dx*ddy - dy*ddx)/sqrt(dx*dx + dy*dy)**3
except (ZeroDivisionError, FloatingPointError):
# tangent vector is zero at t, use polytools to find limit
p = self.poly()
dp = p.deriv()
ddp = dp.deriv()
dx, dy = real(dp), imag(dp)
ddx, ddy = real(ddp), imag(ddp)
f2 = (dx*ddy - dy*ddx)**2
g2 = (dx*dx + dy*dy)**3
lim2 = rational_limit(f2, g2, t)
if lim2 < 0: # impossible, must be numerical error
return 0
kappa = sqrt(lim2)
finally:
np.seterr(**old_np_seterr)
return kappa |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def segment_length(curve, start, end, start_point, end_point, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH, depth=0):
"""Recursively approximates the length by straight lines""" |
mid = (start + end)/2
mid_point = curve.point(mid)
length = abs(end_point - start_point)
first_half = abs(mid_point - start_point)
second_half = abs(end_point - mid_point)
length2 = first_half + second_half
if (length2 - length > error) or (depth < min_depth):
# Calculate the length of each segment:
depth += 1
return (segment_length(curve, start, mid, start_point, mid_point,
error, min_depth, depth) +
segment_length(curve, mid, end, mid_point, end_point,
error, min_depth, depth))
# This is accurate enough.
return length2 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def length(self, t0=0, t1=1, error=None, min_depth=None):
"""returns the length of the line segment between t0 and t1.""" |
return abs(self.end - self.start)*(t1-t0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unit_tangent(self, t=None):
"""returns the unit tangent of the segment at t.""" |
assert self.end != self.start
dseg = self.end - self.start
return dseg/abs(dseg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def point_to_t(self, point):
"""If the point lies on the Line, returns its `t` parameter. If the point does not lie on the Line, returns None.""" |
# Single-precision floats have only 7 significant figures of
# resolution, so test that we're within 6 sig figs.
if np.isclose(point, self.start, rtol=0, atol=1e-6):
return 0.0
elif np.isclose(point, self.end, rtol=0, atol=1e-6):
return 1.0
# Finding the point "by hand" here is much faster than calling
# radialrange(), see the discussion on PR #40:
# https://github.com/mathandy/svgpathtools/pull/40#issuecomment-358134261
p = self.poly()
# p(t) = (p_1 * t) + p_0 = point
# t = (point - p_0) / p_1
t = (point - p[0]) / p[1]
if np.isclose(t.imag, 0) and (t.real >= 0.0) and (t.real <= 1.0):
return t.real
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scaled(self, sx, sy=None, origin=0j):
"""Scale transform. See `scale` function for further explanation.""" |
return scale(self, sx=sx, sy=sy, origin=origin) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def poly(self, return_coeffs=False):
"""returns the quadratic as a Polynomial object.""" |
p = self.bpoints()
coeffs = (p[0] - 2*p[1] + p[2], 2*(p[1] - p[0]), p[0])
if return_coeffs:
return coeffs
else:
return np.poly1d(coeffs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reversed(self):
"""returns a copy of the QuadraticBezier object with its orientation reversed.""" |
new_quad = QuadraticBezier(self.end, self.control, self.start)
if self._length_info['length']:
new_quad._length_info = self._length_info
new_quad._length_info['bpoints'] = (
self.end, self.control, self.start)
return new_quad |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def point(self, t):
"""Evaluate the cubic Bezier curve at t using Horner's rule.""" |
# algebraically equivalent to
# P0*(1-t)**3 + 3*P1*t*(1-t)**2 + 3*P2*(1-t)*t**2 + P3*t**3
# for (P0, P1, P2, P3) = self.bpoints()
return self.start + t*(
3*(self.control1 - self.start) + t*(
3*(self.start + self.control2) - 6*self.control1 + t*(
-self.start + 3*(self.control1 - self.control2) + self.end
))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bpoints(self):
"""returns the Bezier control points of the segment.""" |
return self.start, self.control1, self.control2, self.end |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reversed(self):
"""returns a copy of the CubicBezier object with its orientation reversed.""" |
new_cub = CubicBezier(self.end, self.control2, self.control1,
self.start)
if self._length_info['length']:
new_cub._length_info = self._length_info
new_cub._length_info['bpoints'] = (
self.end, self.control2, self.control1, self.start)
return new_cub |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def length(self, t0=0, t1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH):
"""The length of an elliptical large_arc segment requires numerical integration, and in that case it's simpler to just do a geometric approximation, as for cubic bezier curves.""" |
assert 0 <= t0 <= 1 and 0 <= t1 <= 1
if _quad_available:
return quad(lambda tau: abs(self.derivative(tau)), t0, t1,
epsabs=error, limit=1000)[0]
else:
return segment_length(self, t0, t1, self.point(t0), self.point(t1),
error, min_depth, 0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reversed(self):
"""returns a copy of the Arc object with its orientation reversed.""" |
return Arc(self.end, self.radius, self.rotation, self.large_arc,
not self.sweep, self.start) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reversed(self):
"""returns a copy of the Path object with its orientation reversed.""" |
newpath = [seg.reversed() for seg in self]
newpath.reverse()
return Path(*newpath) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iscontinuous(self):
"""Checks if a path is continuous with respect to its parameterization.""" |
return all(self[i].end == self[i+1].start for i in range(len(self) - 1)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isclosed(self):
"""This function determines if a connected path is closed.""" |
assert len(self) != 0
assert self.iscontinuous()
return self.start == self.end |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def d(self, useSandT=False, use_closed_attrib=False):
"""Returns a path d-string for the path object. For an explanation of useSandT and use_closed_attrib, see the compatibility notes in the README.""" |
if use_closed_attrib:
self_closed = self.closed(warning_on=False)
if self_closed:
segments = self[:-1]
else:
segments = self[:]
else:
self_closed = False
segments = self[:]
current_pos = None
parts = []
previous_segment = None
end = self[-1].end
for segment in segments:
seg_start = segment.start
# If the start of this segment does not coincide with the end of
# the last segment or if this segment is actually the close point
# of a closed path, then we should start a new subpath here.
if current_pos != seg_start or \
(self_closed and seg_start == end and use_closed_attrib):
parts.append('M {},{}'.format(seg_start.real, seg_start.imag))
if isinstance(segment, Line):
args = segment.end.real, segment.end.imag
parts.append('L {},{}'.format(*args))
elif isinstance(segment, CubicBezier):
if useSandT and segment.is_smooth_from(previous_segment,
warning_on=False):
args = (segment.control2.real, segment.control2.imag,
segment.end.real, segment.end.imag)
parts.append('S {},{} {},{}'.format(*args))
else:
args = (segment.control1.real, segment.control1.imag,
segment.control2.real, segment.control2.imag,
segment.end.real, segment.end.imag)
parts.append('C {},{} {},{} {},{}'.format(*args))
elif isinstance(segment, QuadraticBezier):
if useSandT and segment.is_smooth_from(previous_segment,
warning_on=False):
args = segment.end.real, segment.end.imag
parts.append('T {},{}'.format(*args))
else:
args = (segment.control.real, segment.control.imag,
segment.end.real, segment.end.imag)
parts.append('Q {},{} {},{}'.format(*args))
elif isinstance(segment, Arc):
args = (segment.radius.real, segment.radius.imag,
segment.rotation,int(segment.large_arc),
int(segment.sweep),segment.end.real, segment.end.imag)
parts.append('A {},{} {} {:d},{:d} {},{}'.format(*args))
current_pos = segment.end
previous_segment = segment
if self_closed:
parts.append('Z')
return ' '.join(parts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cropped(self, T0, T1):
"""returns a cropped copy of the path.""" |
assert 0 <= T0 <= 1 and 0 <= T1<= 1
assert T0 != T1
assert not (T0 == 1 and T1 == 0)
if T0 == 1 and 0 < T1 < 1 and self.isclosed():
return self.cropped(0, T1)
if T1 == 1:
seg1 = self[-1]
t_seg1 = 1
i1 = len(self) - 1
else:
seg1_idx, t_seg1 = self.T2t(T1)
seg1 = self[seg1_idx]
if np.isclose(t_seg1, 0):
i1 = (self.index(seg1) - 1) % len(self)
seg1 = self[i1]
t_seg1 = 1
else:
i1 = self.index(seg1)
if T0 == 0:
seg0 = self[0]
t_seg0 = 0
i0 = 0
else:
seg0_idx, t_seg0 = self.T2t(T0)
seg0 = self[seg0_idx]
if np.isclose(t_seg0, 1):
i0 = (self.index(seg0) + 1) % len(self)
seg0 = self[i0]
t_seg0 = 0
else:
i0 = self.index(seg0)
if T0 < T1 and i0 == i1:
new_path = Path(seg0.cropped(t_seg0, t_seg1))
else:
new_path = Path(seg0.cropped(t_seg0, 1))
# T1<T0 must cross discontinuity case
if T1 < T0:
if not self.isclosed():
raise ValueError("This path is not closed, thus T0 must "
"be less than T1.")
else:
for i in range(i0 + 1, len(self)):
new_path.append(self[i])
for i in range(0, i1):
new_path.append(self[i])
# T0<T1 straight-forward case
else:
for i in range(i0 + 1, i1):
new_path.append(self[i])
if t_seg1 != 0:
new_path.append(seg1.cropped(0, t_seg1))
return new_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split_bezier(bpoints, t):
"""Uses deCasteljau's recursion to split the Bezier curve at t into two Bezier curves of the same order.""" |
def split_bezier_recursion(bpoints_left_, bpoints_right_, bpoints_, t_):
if len(bpoints_) == 1:
bpoints_left_.append(bpoints_[0])
bpoints_right_.append(bpoints_[0])
else:
new_points = [None]*(len(bpoints_) - 1)
bpoints_left_.append(bpoints_[0])
bpoints_right_.append(bpoints_[-1])
for i in range(len(bpoints_) - 1):
new_points[i] = (1 - t_)*bpoints_[i] + t_*bpoints_[i + 1]
bpoints_left_, bpoints_right_ = split_bezier_recursion(
bpoints_left_, bpoints_right_, new_points, t_)
return bpoints_left_, bpoints_right_
bpoints_left = []
bpoints_right = []
bpoints_left, bpoints_right = \
split_bezier_recursion(bpoints_left, bpoints_right, bpoints, t)
bpoints_right.reverse()
return bpoints_left, bpoints_right |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.