repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
sampsyo/confuse | confuse.py | namespace_to_dict | def namespace_to_dict(obj):
"""If obj is argparse.Namespace or optparse.Values we'll return
a dict representation of it, else return the original object.
Redefine this method if using other parsers.
:param obj: *
:return:
:rtype: dict or *
"""
if isinstance(obj, (argparse.Namespace, optparse.Values)):
return vars(obj)
return obj | python | def namespace_to_dict(obj):
"""If obj is argparse.Namespace or optparse.Values we'll return
a dict representation of it, else return the original object.
Redefine this method if using other parsers.
:param obj: *
:return:
:rtype: dict or *
"""
if isinstance(obj, (argparse.Namespace, optparse.Values)):
return vars(obj)
return obj | [
"def",
"namespace_to_dict",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"argparse",
".",
"Namespace",
",",
"optparse",
".",
"Values",
")",
")",
":",
"return",
"vars",
"(",
"obj",
")",
"return",
"obj"
] | If obj is argparse.Namespace or optparse.Values we'll return
a dict representation of it, else return the original object.
Redefine this method if using other parsers.
:param obj: *
:return:
:rtype: dict or * | [
"If",
"obj",
"is",
"argparse",
".",
"Namespace",
"or",
"optparse",
".",
"Values",
"we",
"ll",
"return",
"a",
"dict",
"representation",
"of",
"it",
"else",
"return",
"the",
"original",
"object",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L73-L85 | train | 199,900 |
sampsyo/confuse | confuse.py | xdg_config_dirs | def xdg_config_dirs():
"""Returns a list of paths taken from the XDG_CONFIG_DIRS
and XDG_CONFIG_HOME environment varibables if they exist
"""
paths = []
if 'XDG_CONFIG_HOME' in os.environ:
paths.append(os.environ['XDG_CONFIG_HOME'])
if 'XDG_CONFIG_DIRS' in os.environ:
paths.extend(os.environ['XDG_CONFIG_DIRS'].split(':'))
else:
paths.append('/etc/xdg')
paths.append('/etc')
return paths | python | def xdg_config_dirs():
"""Returns a list of paths taken from the XDG_CONFIG_DIRS
and XDG_CONFIG_HOME environment varibables if they exist
"""
paths = []
if 'XDG_CONFIG_HOME' in os.environ:
paths.append(os.environ['XDG_CONFIG_HOME'])
if 'XDG_CONFIG_DIRS' in os.environ:
paths.extend(os.environ['XDG_CONFIG_DIRS'].split(':'))
else:
paths.append('/etc/xdg')
paths.append('/etc')
return paths | [
"def",
"xdg_config_dirs",
"(",
")",
":",
"paths",
"=",
"[",
"]",
"if",
"'XDG_CONFIG_HOME'",
"in",
"os",
".",
"environ",
":",
"paths",
".",
"append",
"(",
"os",
".",
"environ",
"[",
"'XDG_CONFIG_HOME'",
"]",
")",
"if",
"'XDG_CONFIG_DIRS'",
"in",
"os",
"."... | Returns a list of paths taken from the XDG_CONFIG_DIRS
and XDG_CONFIG_HOME environment varibables if they exist | [
"Returns",
"a",
"list",
"of",
"paths",
"taken",
"from",
"the",
"XDG_CONFIG_DIRS",
"and",
"XDG_CONFIG_HOME",
"environment",
"varibables",
"if",
"they",
"exist"
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L663-L675 | train | 199,901 |
sampsyo/confuse | confuse.py | config_dirs | def config_dirs():
"""Return a platform-specific list of candidates for user
configuration directories on the system.
The candidates are in order of priority, from highest to lowest. The
last element is the "fallback" location to be used when no
higher-priority config file exists.
"""
paths = []
if platform.system() == 'Darwin':
paths.append(MAC_DIR)
paths.append(UNIX_DIR_FALLBACK)
paths.extend(xdg_config_dirs())
elif platform.system() == 'Windows':
paths.append(WINDOWS_DIR_FALLBACK)
if WINDOWS_DIR_VAR in os.environ:
paths.append(os.environ[WINDOWS_DIR_VAR])
else:
# Assume Unix.
paths.append(UNIX_DIR_FALLBACK)
paths.extend(xdg_config_dirs())
# Expand and deduplicate paths.
out = []
for path in paths:
path = os.path.abspath(os.path.expanduser(path))
if path not in out:
out.append(path)
return out | python | def config_dirs():
"""Return a platform-specific list of candidates for user
configuration directories on the system.
The candidates are in order of priority, from highest to lowest. The
last element is the "fallback" location to be used when no
higher-priority config file exists.
"""
paths = []
if platform.system() == 'Darwin':
paths.append(MAC_DIR)
paths.append(UNIX_DIR_FALLBACK)
paths.extend(xdg_config_dirs())
elif platform.system() == 'Windows':
paths.append(WINDOWS_DIR_FALLBACK)
if WINDOWS_DIR_VAR in os.environ:
paths.append(os.environ[WINDOWS_DIR_VAR])
else:
# Assume Unix.
paths.append(UNIX_DIR_FALLBACK)
paths.extend(xdg_config_dirs())
# Expand and deduplicate paths.
out = []
for path in paths:
path = os.path.abspath(os.path.expanduser(path))
if path not in out:
out.append(path)
return out | [
"def",
"config_dirs",
"(",
")",
":",
"paths",
"=",
"[",
"]",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Darwin'",
":",
"paths",
".",
"append",
"(",
"MAC_DIR",
")",
"paths",
".",
"append",
"(",
"UNIX_DIR_FALLBACK",
")",
"paths",
".",
"extend",
... | Return a platform-specific list of candidates for user
configuration directories on the system.
The candidates are in order of priority, from highest to lowest. The
last element is the "fallback" location to be used when no
higher-priority config file exists. | [
"Return",
"a",
"platform",
"-",
"specific",
"list",
"of",
"candidates",
"for",
"user",
"configuration",
"directories",
"on",
"the",
"system",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L678-L709 | train | 199,902 |
sampsyo/confuse | confuse.py | load_yaml | def load_yaml(filename):
"""Read a YAML document from a file. If the file cannot be read or
parsed, a ConfigReadError is raised.
"""
try:
with open(filename, 'rb') as f:
return yaml.load(f, Loader=Loader)
except (IOError, yaml.error.YAMLError) as exc:
raise ConfigReadError(filename, exc) | python | def load_yaml(filename):
"""Read a YAML document from a file. If the file cannot be read or
parsed, a ConfigReadError is raised.
"""
try:
with open(filename, 'rb') as f:
return yaml.load(f, Loader=Loader)
except (IOError, yaml.error.YAMLError) as exc:
raise ConfigReadError(filename, exc) | [
"def",
"load_yaml",
"(",
"filename",
")",
":",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"yaml",
".",
"load",
"(",
"f",
",",
"Loader",
"=",
"Loader",
")",
"except",
"(",
"IOError",
",",
"yaml",
".",
... | Read a YAML document from a file. If the file cannot be read or
parsed, a ConfigReadError is raised. | [
"Read",
"a",
"YAML",
"document",
"from",
"a",
"file",
".",
"If",
"the",
"file",
"cannot",
"be",
"read",
"or",
"parsed",
"a",
"ConfigReadError",
"is",
"raised",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L770-L778 | train | 199,903 |
sampsyo/confuse | confuse.py | as_template | def as_template(value):
"""Convert a simple "shorthand" Python value to a `Template`.
"""
if isinstance(value, Template):
# If it's already a Template, pass it through.
return value
elif isinstance(value, abc.Mapping):
# Dictionaries work as templates.
return MappingTemplate(value)
elif value is int:
return Integer()
elif isinstance(value, int):
return Integer(value)
elif isinstance(value, type) and issubclass(value, BASESTRING):
return String()
elif isinstance(value, BASESTRING):
return String(value)
elif isinstance(value, set):
# convert to list to avoid hash related problems
return Choice(list(value))
elif (SUPPORTS_ENUM and isinstance(value, type)
and issubclass(value, enum.Enum)):
return Choice(value)
elif isinstance(value, list):
return OneOf(value)
elif value is float:
return Number()
elif value is None:
return Template()
elif value is dict:
return TypeTemplate(abc.Mapping)
elif value is list:
return TypeTemplate(abc.Sequence)
elif isinstance(value, type):
return TypeTemplate(value)
else:
raise ValueError(u'cannot convert to template: {0!r}'.format(value)) | python | def as_template(value):
"""Convert a simple "shorthand" Python value to a `Template`.
"""
if isinstance(value, Template):
# If it's already a Template, pass it through.
return value
elif isinstance(value, abc.Mapping):
# Dictionaries work as templates.
return MappingTemplate(value)
elif value is int:
return Integer()
elif isinstance(value, int):
return Integer(value)
elif isinstance(value, type) and issubclass(value, BASESTRING):
return String()
elif isinstance(value, BASESTRING):
return String(value)
elif isinstance(value, set):
# convert to list to avoid hash related problems
return Choice(list(value))
elif (SUPPORTS_ENUM and isinstance(value, type)
and issubclass(value, enum.Enum)):
return Choice(value)
elif isinstance(value, list):
return OneOf(value)
elif value is float:
return Number()
elif value is None:
return Template()
elif value is dict:
return TypeTemplate(abc.Mapping)
elif value is list:
return TypeTemplate(abc.Sequence)
elif isinstance(value, type):
return TypeTemplate(value)
else:
raise ValueError(u'cannot convert to template: {0!r}'.format(value)) | [
"def",
"as_template",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Template",
")",
":",
"# If it's already a Template, pass it through.",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"abc",
".",
"Mapping",
")",
":",
"# Dictionar... | Convert a simple "shorthand" Python value to a `Template`. | [
"Convert",
"a",
"simple",
"shorthand",
"Python",
"value",
"to",
"a",
"Template",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L1578-L1614 | train | 199,904 |
sampsyo/confuse | confuse.py | ConfigSource.of | def of(cls, value):
"""Given either a dictionary or a `ConfigSource` object, return
a `ConfigSource` object. This lets a function accept either type
of object as an argument.
"""
if isinstance(value, ConfigSource):
return value
elif isinstance(value, dict):
return ConfigSource(value)
else:
raise TypeError(u'source value must be a dict') | python | def of(cls, value):
"""Given either a dictionary or a `ConfigSource` object, return
a `ConfigSource` object. This lets a function accept either type
of object as an argument.
"""
if isinstance(value, ConfigSource):
return value
elif isinstance(value, dict):
return ConfigSource(value)
else:
raise TypeError(u'source value must be a dict') | [
"def",
"of",
"(",
"cls",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"ConfigSource",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"return",
"ConfigSource",
"(",
"value",
")",
"else",
":",
... | Given either a dictionary or a `ConfigSource` object, return
a `ConfigSource` object. This lets a function accept either type
of object as an argument. | [
"Given",
"either",
"a",
"dictionary",
"or",
"a",
"ConfigSource",
"object",
"return",
"a",
"ConfigSource",
"object",
".",
"This",
"lets",
"a",
"function",
"accept",
"either",
"type",
"of",
"object",
"as",
"an",
"argument",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L156-L166 | train | 199,905 |
sampsyo/confuse | confuse.py | ConfigView._build_namespace_dict | def _build_namespace_dict(cls, obj, dots=False):
"""Recursively replaces all argparse.Namespace and optparse.Values
with dicts and drops any keys with None values.
Additionally, if dots is True, will expand any dot delimited
keys.
:param obj: Namespace, Values, or dict to iterate over. Other
values will simply be returned.
:type obj: argparse.Namespace or optparse.Values or dict or *
:param dots: If True, any properties on obj that contain dots (.)
will be broken down into child dictionaries.
:return: A new dictionary or the value passed if obj was not a
dict, Namespace, or Values.
:rtype: dict or *
"""
# We expect our root object to be a dict, but it may come in as
# a namespace
obj = namespace_to_dict(obj)
# We only deal with dictionaries
if not isinstance(obj, dict):
return obj
# Get keys iterator
keys = obj.keys() if PY3 else obj.iterkeys()
if dots:
# Dots needs sorted keys to prevent parents from
# clobbering children
keys = sorted(list(keys))
output = {}
for key in keys:
value = obj[key]
if value is None: # Avoid unset options.
continue
save_to = output
result = cls._build_namespace_dict(value, dots)
if dots:
# Split keys by dots as this signifies nesting
split = key.split('.')
if len(split) > 1:
# The last index will be the key we assign result to
key = split.pop()
# Build the dict tree if needed and change where
# we're saving to
for child_key in split:
if child_key in save_to and \
isinstance(save_to[child_key], dict):
save_to = save_to[child_key]
else:
# Clobber or create
save_to[child_key] = {}
save_to = save_to[child_key]
# Save
if key in save_to:
save_to[key].update(result)
else:
save_to[key] = result
return output | python | def _build_namespace_dict(cls, obj, dots=False):
"""Recursively replaces all argparse.Namespace and optparse.Values
with dicts and drops any keys with None values.
Additionally, if dots is True, will expand any dot delimited
keys.
:param obj: Namespace, Values, or dict to iterate over. Other
values will simply be returned.
:type obj: argparse.Namespace or optparse.Values or dict or *
:param dots: If True, any properties on obj that contain dots (.)
will be broken down into child dictionaries.
:return: A new dictionary or the value passed if obj was not a
dict, Namespace, or Values.
:rtype: dict or *
"""
# We expect our root object to be a dict, but it may come in as
# a namespace
obj = namespace_to_dict(obj)
# We only deal with dictionaries
if not isinstance(obj, dict):
return obj
# Get keys iterator
keys = obj.keys() if PY3 else obj.iterkeys()
if dots:
# Dots needs sorted keys to prevent parents from
# clobbering children
keys = sorted(list(keys))
output = {}
for key in keys:
value = obj[key]
if value is None: # Avoid unset options.
continue
save_to = output
result = cls._build_namespace_dict(value, dots)
if dots:
# Split keys by dots as this signifies nesting
split = key.split('.')
if len(split) > 1:
# The last index will be the key we assign result to
key = split.pop()
# Build the dict tree if needed and change where
# we're saving to
for child_key in split:
if child_key in save_to and \
isinstance(save_to[child_key], dict):
save_to = save_to[child_key]
else:
# Clobber or create
save_to[child_key] = {}
save_to = save_to[child_key]
# Save
if key in save_to:
save_to[key].update(result)
else:
save_to[key] = result
return output | [
"def",
"_build_namespace_dict",
"(",
"cls",
",",
"obj",
",",
"dots",
"=",
"False",
")",
":",
"# We expect our root object to be a dict, but it may come in as",
"# a namespace",
"obj",
"=",
"namespace_to_dict",
"(",
"obj",
")",
"# We only deal with dictionaries",
"if",
"no... | Recursively replaces all argparse.Namespace and optparse.Values
with dicts and drops any keys with None values.
Additionally, if dots is True, will expand any dot delimited
keys.
:param obj: Namespace, Values, or dict to iterate over. Other
values will simply be returned.
:type obj: argparse.Namespace or optparse.Values or dict or *
:param dots: If True, any properties on obj that contain dots (.)
will be broken down into child dictionaries.
:return: A new dictionary or the value passed if obj was not a
dict, Namespace, or Values.
:rtype: dict or * | [
"Recursively",
"replaces",
"all",
"argparse",
".",
"Namespace",
"and",
"optparse",
".",
"Values",
"with",
"dicts",
"and",
"drops",
"any",
"keys",
"with",
"None",
"values",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L272-L332 | train | 199,906 |
sampsyo/confuse | confuse.py | ConfigView.set_args | def set_args(self, namespace, dots=False):
"""Overlay parsed command-line arguments, generated by a library
like argparse or optparse, onto this view's value.
:param namespace: Dictionary or Namespace to overlay this config with.
Supports nested Dictionaries and Namespaces.
:type namespace: dict or Namespace
:param dots: If True, any properties on namespace that contain dots (.)
will be broken down into child dictionaries.
:Example:
{'foo.bar': 'car'}
# Will be turned into
{'foo': {'bar': 'car'}}
:type dots: bool
"""
self.set(self._build_namespace_dict(namespace, dots)) | python | def set_args(self, namespace, dots=False):
"""Overlay parsed command-line arguments, generated by a library
like argparse or optparse, onto this view's value.
:param namespace: Dictionary or Namespace to overlay this config with.
Supports nested Dictionaries and Namespaces.
:type namespace: dict or Namespace
:param dots: If True, any properties on namespace that contain dots (.)
will be broken down into child dictionaries.
:Example:
{'foo.bar': 'car'}
# Will be turned into
{'foo': {'bar': 'car'}}
:type dots: bool
"""
self.set(self._build_namespace_dict(namespace, dots)) | [
"def",
"set_args",
"(",
"self",
",",
"namespace",
",",
"dots",
"=",
"False",
")",
":",
"self",
".",
"set",
"(",
"self",
".",
"_build_namespace_dict",
"(",
"namespace",
",",
"dots",
")",
")"
] | Overlay parsed command-line arguments, generated by a library
like argparse or optparse, onto this view's value.
:param namespace: Dictionary or Namespace to overlay this config with.
Supports nested Dictionaries and Namespaces.
:type namespace: dict or Namespace
:param dots: If True, any properties on namespace that contain dots (.)
will be broken down into child dictionaries.
:Example:
{'foo.bar': 'car'}
# Will be turned into
{'foo': {'bar': 'car'}}
:type dots: bool | [
"Overlay",
"parsed",
"command",
"-",
"line",
"arguments",
"generated",
"by",
"a",
"library",
"like",
"argparse",
"or",
"optparse",
"onto",
"this",
"view",
"s",
"value",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L334-L350 | train | 199,907 |
sampsyo/confuse | confuse.py | ConfigView.flatten | def flatten(self, redact=False):
"""Create a hierarchy of OrderedDicts containing the data from
this view, recursively reifying all views to get their
represented values.
If `redact` is set, then sensitive values are replaced with
the string "REDACTED".
"""
od = OrderedDict()
for key, view in self.items():
if redact and view.redact:
od[key] = REDACTED_TOMBSTONE
else:
try:
od[key] = view.flatten(redact=redact)
except ConfigTypeError:
od[key] = view.get()
return od | python | def flatten(self, redact=False):
"""Create a hierarchy of OrderedDicts containing the data from
this view, recursively reifying all views to get their
represented values.
If `redact` is set, then sensitive values are replaced with
the string "REDACTED".
"""
od = OrderedDict()
for key, view in self.items():
if redact and view.redact:
od[key] = REDACTED_TOMBSTONE
else:
try:
od[key] = view.flatten(redact=redact)
except ConfigTypeError:
od[key] = view.get()
return od | [
"def",
"flatten",
"(",
"self",
",",
"redact",
"=",
"False",
")",
":",
"od",
"=",
"OrderedDict",
"(",
")",
"for",
"key",
",",
"view",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"redact",
"and",
"view",
".",
"redact",
":",
"od",
"[",
"key",
... | Create a hierarchy of OrderedDicts containing the data from
this view, recursively reifying all views to get their
represented values.
If `redact` is set, then sensitive values are replaced with
the string "REDACTED". | [
"Create",
"a",
"hierarchy",
"of",
"OrderedDicts",
"containing",
"the",
"data",
"from",
"this",
"view",
"recursively",
"reifying",
"all",
"views",
"to",
"get",
"their",
"represented",
"values",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L448-L465 | train | 199,908 |
sampsyo/confuse | confuse.py | Dumper.represent_bool | def represent_bool(self, data):
"""Represent bool as 'yes' or 'no' instead of 'true' or 'false'.
"""
if data:
value = u'yes'
else:
value = u'no'
return self.represent_scalar('tag:yaml.org,2002:bool', value) | python | def represent_bool(self, data):
"""Represent bool as 'yes' or 'no' instead of 'true' or 'false'.
"""
if data:
value = u'yes'
else:
value = u'no'
return self.represent_scalar('tag:yaml.org,2002:bool', value) | [
"def",
"represent_bool",
"(",
"self",
",",
"data",
")",
":",
"if",
"data",
":",
"value",
"=",
"u'yes'",
"else",
":",
"value",
"=",
"u'no'",
"return",
"self",
".",
"represent_scalar",
"(",
"'tag:yaml.org,2002:bool'",
",",
"value",
")"
] | Represent bool as 'yes' or 'no' instead of 'true' or 'false'. | [
"Represent",
"bool",
"as",
"yes",
"or",
"no",
"instead",
"of",
"true",
"or",
"false",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L825-L832 | train | 199,909 |
sampsyo/confuse | confuse.py | Configuration._add_default_source | def _add_default_source(self):
"""Add the package's default configuration settings. This looks
for a YAML file located inside the package for the module
`modname` if it was given.
"""
if self.modname:
if self._package_path:
filename = os.path.join(self._package_path, DEFAULT_FILENAME)
if os.path.isfile(filename):
self.add(ConfigSource(load_yaml(filename), filename, True)) | python | def _add_default_source(self):
"""Add the package's default configuration settings. This looks
for a YAML file located inside the package for the module
`modname` if it was given.
"""
if self.modname:
if self._package_path:
filename = os.path.join(self._package_path, DEFAULT_FILENAME)
if os.path.isfile(filename):
self.add(ConfigSource(load_yaml(filename), filename, True)) | [
"def",
"_add_default_source",
"(",
"self",
")",
":",
"if",
"self",
".",
"modname",
":",
"if",
"self",
".",
"_package_path",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_package_path",
",",
"DEFAULT_FILENAME",
")",
"if",
"os",... | Add the package's default configuration settings. This looks
for a YAML file located inside the package for the module
`modname` if it was given. | [
"Add",
"the",
"package",
"s",
"default",
"configuration",
"settings",
".",
"This",
"looks",
"for",
"a",
"YAML",
"file",
"located",
"inside",
"the",
"package",
"for",
"the",
"module",
"modname",
"if",
"it",
"was",
"given",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L924-L933 | train | 199,910 |
sampsyo/confuse | confuse.py | Configuration.read | def read(self, user=True, defaults=True):
"""Find and read the files for this configuration and set them
as the sources for this configuration. To disable either
discovered user configuration files or the in-package defaults,
set `user` or `defaults` to `False`.
"""
if user:
self._add_user_source()
if defaults:
self._add_default_source() | python | def read(self, user=True, defaults=True):
"""Find and read the files for this configuration and set them
as the sources for this configuration. To disable either
discovered user configuration files or the in-package defaults,
set `user` or `defaults` to `False`.
"""
if user:
self._add_user_source()
if defaults:
self._add_default_source() | [
"def",
"read",
"(",
"self",
",",
"user",
"=",
"True",
",",
"defaults",
"=",
"True",
")",
":",
"if",
"user",
":",
"self",
".",
"_add_user_source",
"(",
")",
"if",
"defaults",
":",
"self",
".",
"_add_default_source",
"(",
")"
] | Find and read the files for this configuration and set them
as the sources for this configuration. To disable either
discovered user configuration files or the in-package defaults,
set `user` or `defaults` to `False`. | [
"Find",
"and",
"read",
"the",
"files",
"for",
"this",
"configuration",
"and",
"set",
"them",
"as",
"the",
"sources",
"for",
"this",
"configuration",
".",
"To",
"disable",
"either",
"discovered",
"user",
"configuration",
"files",
"or",
"the",
"in",
"-",
"pack... | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L935-L944 | train | 199,911 |
sampsyo/confuse | confuse.py | Configuration.set_file | def set_file(self, filename):
"""Parses the file as YAML and inserts it into the configuration
sources with highest priority.
"""
filename = os.path.abspath(filename)
self.set(ConfigSource(load_yaml(filename), filename)) | python | def set_file(self, filename):
"""Parses the file as YAML and inserts it into the configuration
sources with highest priority.
"""
filename = os.path.abspath(filename)
self.set(ConfigSource(load_yaml(filename), filename)) | [
"def",
"set_file",
"(",
"self",
",",
"filename",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"filename",
")",
"self",
".",
"set",
"(",
"ConfigSource",
"(",
"load_yaml",
"(",
"filename",
")",
",",
"filename",
")",
")"
] | Parses the file as YAML and inserts it into the configuration
sources with highest priority. | [
"Parses",
"the",
"file",
"as",
"YAML",
"and",
"inserts",
"it",
"into",
"the",
"configuration",
"sources",
"with",
"highest",
"priority",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L982-L987 | train | 199,912 |
sampsyo/confuse | confuse.py | Configuration.dump | def dump(self, full=True, redact=False):
"""Dump the Configuration object to a YAML file.
The order of the keys is determined from the default
configuration file. All keys not in the default configuration
will be appended to the end of the file.
:param filename: The file to dump the configuration to, or None
if the YAML string should be returned instead
:type filename: unicode
:param full: Dump settings that don't differ from the defaults
as well
:param redact: Remove sensitive information (views with the `redact`
flag set) from the output
"""
if full:
out_dict = self.flatten(redact=redact)
else:
# Exclude defaults when flattening.
sources = [s for s in self.sources if not s.default]
temp_root = RootView(sources)
temp_root.redactions = self.redactions
out_dict = temp_root.flatten(redact=redact)
yaml_out = yaml.dump(out_dict, Dumper=Dumper,
default_flow_style=None, indent=4,
width=1000)
# Restore comments to the YAML text.
default_source = None
for source in self.sources:
if source.default:
default_source = source
break
if default_source and default_source.filename:
with open(default_source.filename, 'rb') as fp:
default_data = fp.read()
yaml_out = restore_yaml_comments(yaml_out,
default_data.decode('utf-8'))
return yaml_out | python | def dump(self, full=True, redact=False):
"""Dump the Configuration object to a YAML file.
The order of the keys is determined from the default
configuration file. All keys not in the default configuration
will be appended to the end of the file.
:param filename: The file to dump the configuration to, or None
if the YAML string should be returned instead
:type filename: unicode
:param full: Dump settings that don't differ from the defaults
as well
:param redact: Remove sensitive information (views with the `redact`
flag set) from the output
"""
if full:
out_dict = self.flatten(redact=redact)
else:
# Exclude defaults when flattening.
sources = [s for s in self.sources if not s.default]
temp_root = RootView(sources)
temp_root.redactions = self.redactions
out_dict = temp_root.flatten(redact=redact)
yaml_out = yaml.dump(out_dict, Dumper=Dumper,
default_flow_style=None, indent=4,
width=1000)
# Restore comments to the YAML text.
default_source = None
for source in self.sources:
if source.default:
default_source = source
break
if default_source and default_source.filename:
with open(default_source.filename, 'rb') as fp:
default_data = fp.read()
yaml_out = restore_yaml_comments(yaml_out,
default_data.decode('utf-8'))
return yaml_out | [
"def",
"dump",
"(",
"self",
",",
"full",
"=",
"True",
",",
"redact",
"=",
"False",
")",
":",
"if",
"full",
":",
"out_dict",
"=",
"self",
".",
"flatten",
"(",
"redact",
"=",
"redact",
")",
"else",
":",
"# Exclude defaults when flattening.",
"sources",
"="... | Dump the Configuration object to a YAML file.
The order of the keys is determined from the default
configuration file. All keys not in the default configuration
will be appended to the end of the file.
:param filename: The file to dump the configuration to, or None
if the YAML string should be returned instead
:type filename: unicode
:param full: Dump settings that don't differ from the defaults
as well
:param redact: Remove sensitive information (views with the `redact`
flag set) from the output | [
"Dump",
"the",
"Configuration",
"object",
"to",
"a",
"YAML",
"file",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L989-L1029 | train | 199,913 |
sampsyo/confuse | confuse.py | LazyConfig.clear | def clear(self):
"""Remove all sources from this configuration."""
super(LazyConfig, self).clear()
self._lazy_suffix = []
self._lazy_prefix = [] | python | def clear(self):
"""Remove all sources from this configuration."""
super(LazyConfig, self).clear()
self._lazy_suffix = []
self._lazy_prefix = [] | [
"def",
"clear",
"(",
"self",
")",
":",
"super",
"(",
"LazyConfig",
",",
"self",
")",
".",
"clear",
"(",
")",
"self",
".",
"_lazy_suffix",
"=",
"[",
"]",
"self",
".",
"_lazy_prefix",
"=",
"[",
"]"
] | Remove all sources from this configuration. | [
"Remove",
"all",
"sources",
"from",
"this",
"configuration",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L1069-L1073 | train | 199,914 |
sampsyo/confuse | confuse.py | Template.value | def value(self, view, template=None):
"""Get the value for a `ConfigView`.
May raise a `NotFoundError` if the value is missing (and the
template requires it) or a `ConfigValueError` for invalid values.
"""
if view.exists():
value, _ = view.first()
return self.convert(value, view)
elif self.default is REQUIRED:
# Missing required value. This is an error.
raise NotFoundError(u"{0} not found".format(view.name))
else:
# Missing value, but not required.
return self.default | python | def value(self, view, template=None):
"""Get the value for a `ConfigView`.
May raise a `NotFoundError` if the value is missing (and the
template requires it) or a `ConfigValueError` for invalid values.
"""
if view.exists():
value, _ = view.first()
return self.convert(value, view)
elif self.default is REQUIRED:
# Missing required value. This is an error.
raise NotFoundError(u"{0} not found".format(view.name))
else:
# Missing value, but not required.
return self.default | [
"def",
"value",
"(",
"self",
",",
"view",
",",
"template",
"=",
"None",
")",
":",
"if",
"view",
".",
"exists",
"(",
")",
":",
"value",
",",
"_",
"=",
"view",
".",
"first",
"(",
")",
"return",
"self",
".",
"convert",
"(",
"value",
",",
"view",
"... | Get the value for a `ConfigView`.
May raise a `NotFoundError` if the value is missing (and the
template requires it) or a `ConfigValueError` for invalid values. | [
"Get",
"the",
"value",
"for",
"a",
"ConfigView",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L1108-L1122 | train | 199,915 |
sampsyo/confuse | confuse.py | Template.fail | def fail(self, message, view, type_error=False):
"""Raise an exception indicating that a value cannot be
accepted.
`type_error` indicates whether the error is due to a type
mismatch rather than a malformed value. In this case, a more
specific exception is raised.
"""
exc_class = ConfigTypeError if type_error else ConfigValueError
raise exc_class(
u'{0}: {1}'.format(view.name, message)
) | python | def fail(self, message, view, type_error=False):
"""Raise an exception indicating that a value cannot be
accepted.
`type_error` indicates whether the error is due to a type
mismatch rather than a malformed value. In this case, a more
specific exception is raised.
"""
exc_class = ConfigTypeError if type_error else ConfigValueError
raise exc_class(
u'{0}: {1}'.format(view.name, message)
) | [
"def",
"fail",
"(",
"self",
",",
"message",
",",
"view",
",",
"type_error",
"=",
"False",
")",
":",
"exc_class",
"=",
"ConfigTypeError",
"if",
"type_error",
"else",
"ConfigValueError",
"raise",
"exc_class",
"(",
"u'{0}: {1}'",
".",
"format",
"(",
"view",
"."... | Raise an exception indicating that a value cannot be
accepted.
`type_error` indicates whether the error is due to a type
mismatch rather than a malformed value. In this case, a more
specific exception is raised. | [
"Raise",
"an",
"exception",
"indicating",
"that",
"a",
"value",
"cannot",
"be",
"accepted",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L1134-L1145 | train | 199,916 |
sampsyo/confuse | confuse.py | Integer.convert | def convert(self, value, view):
"""Check that the value is an integer. Floats are rounded.
"""
if isinstance(value, int):
return value
elif isinstance(value, float):
return int(value)
else:
self.fail(u'must be a number', view, True) | python | def convert(self, value, view):
"""Check that the value is an integer. Floats are rounded.
"""
if isinstance(value, int):
return value
elif isinstance(value, float):
return int(value)
else:
self.fail(u'must be a number', view, True) | [
"def",
"convert",
"(",
"self",
",",
"value",
",",
"view",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"float",
")",
":",
"return",
"int",
"(",
"value",
")",
"else",
"... | Check that the value is an integer. Floats are rounded. | [
"Check",
"that",
"the",
"value",
"is",
"an",
"integer",
".",
"Floats",
"are",
"rounded",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L1157-L1165 | train | 199,917 |
sampsyo/confuse | confuse.py | Number.convert | def convert(self, value, view):
"""Check that the value is an int or a float.
"""
if isinstance(value, NUMERIC_TYPES):
return value
else:
self.fail(
u'must be numeric, not {0}'.format(type(value).__name__),
view,
True
) | python | def convert(self, value, view):
"""Check that the value is an int or a float.
"""
if isinstance(value, NUMERIC_TYPES):
return value
else:
self.fail(
u'must be numeric, not {0}'.format(type(value).__name__),
view,
True
) | [
"def",
"convert",
"(",
"self",
",",
"value",
",",
"view",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"NUMERIC_TYPES",
")",
":",
"return",
"value",
"else",
":",
"self",
".",
"fail",
"(",
"u'must be numeric, not {0}'",
".",
"format",
"(",
"type",
"("... | Check that the value is an int or a float. | [
"Check",
"that",
"the",
"value",
"is",
"an",
"int",
"or",
"a",
"float",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L1171-L1181 | train | 199,918 |
sampsyo/confuse | confuse.py | MappingTemplate.value | def value(self, view, template=None):
"""Get a dict with the same keys as the template and values
validated according to the value types.
"""
out = AttrDict()
for key, typ in self.subtemplates.items():
out[key] = typ.value(view[key], self)
return out | python | def value(self, view, template=None):
"""Get a dict with the same keys as the template and values
validated according to the value types.
"""
out = AttrDict()
for key, typ in self.subtemplates.items():
out[key] = typ.value(view[key], self)
return out | [
"def",
"value",
"(",
"self",
",",
"view",
",",
"template",
"=",
"None",
")",
":",
"out",
"=",
"AttrDict",
"(",
")",
"for",
"key",
",",
"typ",
"in",
"self",
".",
"subtemplates",
".",
"items",
"(",
")",
":",
"out",
"[",
"key",
"]",
"=",
"typ",
".... | Get a dict with the same keys as the template and values
validated according to the value types. | [
"Get",
"a",
"dict",
"with",
"the",
"same",
"keys",
"as",
"the",
"template",
"and",
"values",
"validated",
"according",
"to",
"the",
"value",
"types",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L1198-L1205 | train | 199,919 |
sampsyo/confuse | confuse.py | Sequence.value | def value(self, view, template=None):
"""Get a list of items validated against the template.
"""
out = []
for item in view:
out.append(self.subtemplate.value(item, self))
return out | python | def value(self, view, template=None):
"""Get a list of items validated against the template.
"""
out = []
for item in view:
out.append(self.subtemplate.value(item, self))
return out | [
"def",
"value",
"(",
"self",
",",
"view",
",",
"template",
"=",
"None",
")",
":",
"out",
"=",
"[",
"]",
"for",
"item",
"in",
"view",
":",
"out",
".",
"append",
"(",
"self",
".",
"subtemplate",
".",
"value",
"(",
"item",
",",
"self",
")",
")",
"... | Get a list of items validated against the template. | [
"Get",
"a",
"list",
"of",
"items",
"validated",
"against",
"the",
"template",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L1221-L1227 | train | 199,920 |
sampsyo/confuse | confuse.py | String.convert | def convert(self, value, view):
"""Check that the value is a string and matches the pattern.
"""
if isinstance(value, BASESTRING):
if self.pattern and not self.regex.match(value):
self.fail(
u"must match the pattern {0}".format(self.pattern),
view
)
return value
else:
self.fail(u'must be a string', view, True) | python | def convert(self, value, view):
"""Check that the value is a string and matches the pattern.
"""
if isinstance(value, BASESTRING):
if self.pattern and not self.regex.match(value):
self.fail(
u"must match the pattern {0}".format(self.pattern),
view
)
return value
else:
self.fail(u'must be a string', view, True) | [
"def",
"convert",
"(",
"self",
",",
"value",
",",
"view",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"BASESTRING",
")",
":",
"if",
"self",
".",
"pattern",
"and",
"not",
"self",
".",
"regex",
".",
"match",
"(",
"value",
")",
":",
"self",
".",
... | Check that the value is a string and matches the pattern. | [
"Check",
"that",
"the",
"value",
"is",
"a",
"string",
"and",
"matches",
"the",
"pattern",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L1256-L1267 | train | 199,921 |
sampsyo/confuse | confuse.py | OneOf.convert | def convert(self, value, view):
"""Ensure that the value follows at least one template.
"""
is_mapping = isinstance(self.template, MappingTemplate)
for candidate in self.allowed:
try:
if is_mapping:
if isinstance(candidate, Filename) and \
candidate.relative_to:
next_template = candidate.template_with_relatives(
view,
self.template
)
next_template.subtemplates[view.key] = as_template(
candidate
)
else:
next_template = MappingTemplate({view.key: candidate})
return view.parent.get(next_template)[view.key]
else:
return view.get(candidate)
except ConfigTemplateError:
raise
except ConfigError:
pass
except ValueError as exc:
raise ConfigTemplateError(exc)
self.fail(
u'must be one of {0}, not {1}'.format(
repr(self.allowed), repr(value)
),
view
) | python | def convert(self, value, view):
"""Ensure that the value follows at least one template.
"""
is_mapping = isinstance(self.template, MappingTemplate)
for candidate in self.allowed:
try:
if is_mapping:
if isinstance(candidate, Filename) and \
candidate.relative_to:
next_template = candidate.template_with_relatives(
view,
self.template
)
next_template.subtemplates[view.key] = as_template(
candidate
)
else:
next_template = MappingTemplate({view.key: candidate})
return view.parent.get(next_template)[view.key]
else:
return view.get(candidate)
except ConfigTemplateError:
raise
except ConfigError:
pass
except ValueError as exc:
raise ConfigTemplateError(exc)
self.fail(
u'must be one of {0}, not {1}'.format(
repr(self.allowed), repr(value)
),
view
) | [
"def",
"convert",
"(",
"self",
",",
"value",
",",
"view",
")",
":",
"is_mapping",
"=",
"isinstance",
"(",
"self",
".",
"template",
",",
"MappingTemplate",
")",
"for",
"candidate",
"in",
"self",
".",
"allowed",
":",
"try",
":",
"if",
"is_mapping",
":",
... | Ensure that the value follows at least one template. | [
"Ensure",
"that",
"the",
"value",
"follows",
"at",
"least",
"one",
"template",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L1341-L1377 | train | 199,922 |
sampsyo/confuse | setup.py | CustomDistribution.export_live_eggs | def export_live_eggs(self, env=False):
"""Adds all of the eggs in the current environment to PYTHONPATH."""
path_eggs = [p for p in sys.path if p.endswith('.egg')]
command = self.get_finalized_command("egg_info")
egg_base = path.abspath(command.egg_base)
unique_path_eggs = set(path_eggs + [egg_base])
os.environ['PYTHONPATH'] = ':'.join(unique_path_eggs) | python | def export_live_eggs(self, env=False):
"""Adds all of the eggs in the current environment to PYTHONPATH."""
path_eggs = [p for p in sys.path if p.endswith('.egg')]
command = self.get_finalized_command("egg_info")
egg_base = path.abspath(command.egg_base)
unique_path_eggs = set(path_eggs + [egg_base])
os.environ['PYTHONPATH'] = ':'.join(unique_path_eggs) | [
"def",
"export_live_eggs",
"(",
"self",
",",
"env",
"=",
"False",
")",
":",
"path_eggs",
"=",
"[",
"p",
"for",
"p",
"in",
"sys",
".",
"path",
"if",
"p",
".",
"endswith",
"(",
"'.egg'",
")",
"]",
"command",
"=",
"self",
".",
"get_finalized_command",
"... | Adds all of the eggs in the current environment to PYTHONPATH. | [
"Adds",
"all",
"of",
"the",
"eggs",
"in",
"the",
"current",
"environment",
"to",
"PYTHONPATH",
"."
] | 9ff0992e30470f6822824711950e6dd906e253fb | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/setup.py#L21-L30 | train | 199,923 |
doloopwhile/PyExecJS | execjs/_runtimes.py | get_from_environment | def get_from_environment():
'''
Return the JavaScript runtime that is specified in EXECJS_RUNTIME environment variable.
If EXECJS_RUNTIME environment variable is empty or invalid, return None.
'''
name = os.environ.get("EXECJS_RUNTIME", "")
if not name:
return None
try:
return _find_runtime_by_name(name)
except exceptions.RuntimeUnavailableError:
return None | python | def get_from_environment():
'''
Return the JavaScript runtime that is specified in EXECJS_RUNTIME environment variable.
If EXECJS_RUNTIME environment variable is empty or invalid, return None.
'''
name = os.environ.get("EXECJS_RUNTIME", "")
if not name:
return None
try:
return _find_runtime_by_name(name)
except exceptions.RuntimeUnavailableError:
return None | [
"def",
"get_from_environment",
"(",
")",
":",
"name",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"EXECJS_RUNTIME\"",
",",
"\"\"",
")",
"if",
"not",
"name",
":",
"return",
"None",
"try",
":",
"return",
"_find_runtime_by_name",
"(",
"name",
")",
"except",... | Return the JavaScript runtime that is specified in EXECJS_RUNTIME environment variable.
If EXECJS_RUNTIME environment variable is empty or invalid, return None. | [
"Return",
"the",
"JavaScript",
"runtime",
"that",
"is",
"specified",
"in",
"EXECJS_RUNTIME",
"environment",
"variable",
".",
"If",
"EXECJS_RUNTIME",
"environment",
"variable",
"is",
"empty",
"or",
"invalid",
"return",
"None",
"."
] | e300f0a8120c0b7b70eed0758c3c85a9bd1a7b9f | https://github.com/doloopwhile/PyExecJS/blob/e300f0a8120c0b7b70eed0758c3c85a9bd1a7b9f/execjs/_runtimes.py#L30-L42 | train | 199,924 |
cucumber/gherkin-python | gherkin/gherkin_line.py | GherkinLine.split_table_cells | def split_table_cells(self, row):
"""
An iterator returning all the table cells in a row with their positions,
accounting for escaping.
"""
row = iter(row)
col = 0
start_col = col + 1
cell = ''
first_cell = True
while True:
char = next(row, None)
col += 1
if char == '|':
if first_cell:
# First cell (content before the first |) is skipped
first_cell = False
else:
yield (cell, start_col)
cell = ''
start_col = col + 1
elif char == '\\':
char = next(row)
col += 1
if char == 'n':
cell += '\n'
else:
if char not in ['|', '\\']:
cell += '\\'
cell += char
elif char:
cell += char
else:
break | python | def split_table_cells(self, row):
"""
An iterator returning all the table cells in a row with their positions,
accounting for escaping.
"""
row = iter(row)
col = 0
start_col = col + 1
cell = ''
first_cell = True
while True:
char = next(row, None)
col += 1
if char == '|':
if first_cell:
# First cell (content before the first |) is skipped
first_cell = False
else:
yield (cell, start_col)
cell = ''
start_col = col + 1
elif char == '\\':
char = next(row)
col += 1
if char == 'n':
cell += '\n'
else:
if char not in ['|', '\\']:
cell += '\\'
cell += char
elif char:
cell += char
else:
break | [
"def",
"split_table_cells",
"(",
"self",
",",
"row",
")",
":",
"row",
"=",
"iter",
"(",
"row",
")",
"col",
"=",
"0",
"start_col",
"=",
"col",
"+",
"1",
"cell",
"=",
"''",
"first_cell",
"=",
"True",
"while",
"True",
":",
"char",
"=",
"next",
"(",
... | An iterator returning all the table cells in a row with their positions,
accounting for escaping. | [
"An",
"iterator",
"returning",
"all",
"the",
"table",
"cells",
"in",
"a",
"row",
"with",
"their",
"positions",
"accounting",
"for",
"escaping",
"."
] | 40d7ce0b30b40e4589b233947f5339cbc61715a6 | https://github.com/cucumber/gherkin-python/blob/40d7ce0b30b40e4589b233947f5339cbc61715a6/gherkin/gherkin_line.py#L34-L68 | train | 199,925 |
openeemeter/eemeter | eemeter/transform.py | as_freq | def as_freq(data_series, freq, atomic_freq="1 Min", series_type="cumulative"):
"""Resample data to a different frequency.
This method can be used to upsample or downsample meter data. The
assumption it makes to do so is that meter data is constant and averaged
over the given periods. For instance, to convert billing-period data to
daily data, this method first upsamples to the atomic frequency
(1 minute freqency, by default), "spreading" usage evenly across all
minutes in each period. Then it downsamples to hourly frequency and
returns that result. With instantaneous series, the data is copied to all
contiguous time intervals and the mean over `freq` is returned.
**Caveats**:
- This method gives a fair amount of flexibility in
resampling as long as you are OK with the assumption that usage is
constant over the period (this assumption is generally broken in
observed data at large enough frequencies, so this caveat should not be
taken lightly).
Parameters
----------
data_series : :any:`pandas.Series`
Data to resample. Should have a :any:`pandas.DatetimeIndex`.
freq : :any:`str`
The frequency to resample to. This should be given in a form recognized
by the :any:`pandas.Series.resample` method.
atomic_freq : :any:`str`, optional
The "atomic" frequency of the intermediate data form. This can be
adjusted to a higher atomic frequency to increase speed or memory
performance.
series_type : :any:`str`, {'cumulative', ‘instantaneous’},
default 'cumulative'
Type of data sampling. 'cumulative' data can be spread over smaller
time intervals and is aggregated using addition (e.g. meter data).
'instantaneous' data is copied (not spread) over smaller time intervals
and is aggregated by averaging (e.g. weather data).
Returns
-------
resampled_data : :any:`pandas.Series`
Data resampled to the given frequency.
"""
# TODO(philngo): make sure this complies with CalTRACK 2.2.2.1
if not isinstance(data_series, pd.Series):
raise ValueError(
"expected series, got object with class {}".format(data_series.__class__)
)
if data_series.empty:
return data_series
series = remove_duplicates(data_series)
target_freq = pd.Timedelta(atomic_freq)
timedeltas = (series.index[1:] - series.index[:-1]).append(
pd.TimedeltaIndex([pd.NaT])
)
if series_type == "cumulative":
spread_factor = target_freq.total_seconds() / timedeltas.total_seconds()
series_spread = series * spread_factor
atomic_series = series_spread.asfreq(atomic_freq, method="ffill")
resampled = atomic_series.resample(freq).sum()
resampled_with_nans = atomic_series.resample(freq).mean()
resampled = resampled[resampled_with_nans.notnull()].reindex(resampled.index)
elif series_type == "instantaneous":
atomic_series = series.asfreq(atomic_freq, method="ffill")
resampled = atomic_series.resample(freq).mean()
if resampled.index[-1] < series.index[-1]:
# this adds a null at the end using the target frequency
last_index = pd.date_range(resampled.index[-1], freq=freq, periods=2)[1:]
resampled = (
pd.concat([resampled, pd.Series(np.nan, index=last_index)])
.resample(freq)
.mean()
)
return resampled | python | def as_freq(data_series, freq, atomic_freq="1 Min", series_type="cumulative"):
"""Resample data to a different frequency.
This method can be used to upsample or downsample meter data. The
assumption it makes to do so is that meter data is constant and averaged
over the given periods. For instance, to convert billing-period data to
daily data, this method first upsamples to the atomic frequency
(1 minute freqency, by default), "spreading" usage evenly across all
minutes in each period. Then it downsamples to hourly frequency and
returns that result. With instantaneous series, the data is copied to all
contiguous time intervals and the mean over `freq` is returned.
**Caveats**:
- This method gives a fair amount of flexibility in
resampling as long as you are OK with the assumption that usage is
constant over the period (this assumption is generally broken in
observed data at large enough frequencies, so this caveat should not be
taken lightly).
Parameters
----------
data_series : :any:`pandas.Series`
Data to resample. Should have a :any:`pandas.DatetimeIndex`.
freq : :any:`str`
The frequency to resample to. This should be given in a form recognized
by the :any:`pandas.Series.resample` method.
atomic_freq : :any:`str`, optional
The "atomic" frequency of the intermediate data form. This can be
adjusted to a higher atomic frequency to increase speed or memory
performance.
series_type : :any:`str`, {'cumulative', ‘instantaneous’},
default 'cumulative'
Type of data sampling. 'cumulative' data can be spread over smaller
time intervals and is aggregated using addition (e.g. meter data).
'instantaneous' data is copied (not spread) over smaller time intervals
and is aggregated by averaging (e.g. weather data).
Returns
-------
resampled_data : :any:`pandas.Series`
Data resampled to the given frequency.
"""
# TODO(philngo): make sure this complies with CalTRACK 2.2.2.1
if not isinstance(data_series, pd.Series):
raise ValueError(
"expected series, got object with class {}".format(data_series.__class__)
)
if data_series.empty:
return data_series
series = remove_duplicates(data_series)
target_freq = pd.Timedelta(atomic_freq)
timedeltas = (series.index[1:] - series.index[:-1]).append(
pd.TimedeltaIndex([pd.NaT])
)
if series_type == "cumulative":
spread_factor = target_freq.total_seconds() / timedeltas.total_seconds()
series_spread = series * spread_factor
atomic_series = series_spread.asfreq(atomic_freq, method="ffill")
resampled = atomic_series.resample(freq).sum()
resampled_with_nans = atomic_series.resample(freq).mean()
resampled = resampled[resampled_with_nans.notnull()].reindex(resampled.index)
elif series_type == "instantaneous":
atomic_series = series.asfreq(atomic_freq, method="ffill")
resampled = atomic_series.resample(freq).mean()
if resampled.index[-1] < series.index[-1]:
# this adds a null at the end using the target frequency
last_index = pd.date_range(resampled.index[-1], freq=freq, periods=2)[1:]
resampled = (
pd.concat([resampled, pd.Series(np.nan, index=last_index)])
.resample(freq)
.mean()
)
return resampled | [
"def",
"as_freq",
"(",
"data_series",
",",
"freq",
",",
"atomic_freq",
"=",
"\"1 Min\"",
",",
"series_type",
"=",
"\"cumulative\"",
")",
":",
"# TODO(philngo): make sure this complies with CalTRACK 2.2.2.1",
"if",
"not",
"isinstance",
"(",
"data_series",
",",
"pd",
".... | Resample data to a different frequency.
This method can be used to upsample or downsample meter data. The
assumption it makes to do so is that meter data is constant and averaged
over the given periods. For instance, to convert billing-period data to
daily data, this method first upsamples to the atomic frequency
(1 minute freqency, by default), "spreading" usage evenly across all
minutes in each period. Then it downsamples to hourly frequency and
returns that result. With instantaneous series, the data is copied to all
contiguous time intervals and the mean over `freq` is returned.
**Caveats**:
- This method gives a fair amount of flexibility in
resampling as long as you are OK with the assumption that usage is
constant over the period (this assumption is generally broken in
observed data at large enough frequencies, so this caveat should not be
taken lightly).
Parameters
----------
data_series : :any:`pandas.Series`
Data to resample. Should have a :any:`pandas.DatetimeIndex`.
freq : :any:`str`
The frequency to resample to. This should be given in a form recognized
by the :any:`pandas.Series.resample` method.
atomic_freq : :any:`str`, optional
The "atomic" frequency of the intermediate data form. This can be
adjusted to a higher atomic frequency to increase speed or memory
performance.
series_type : :any:`str`, {'cumulative', ‘instantaneous’},
default 'cumulative'
Type of data sampling. 'cumulative' data can be spread over smaller
time intervals and is aggregated using addition (e.g. meter data).
'instantaneous' data is copied (not spread) over smaller time intervals
and is aggregated by averaging (e.g. weather data).
Returns
-------
resampled_data : :any:`pandas.Series`
Data resampled to the given frequency. | [
"Resample",
"data",
"to",
"a",
"different",
"frequency",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/transform.py#L62-L138 | train | 199,926 |
openeemeter/eemeter | eemeter/transform.py | get_baseline_data | def get_baseline_data(
data,
start=None,
end=None,
max_days=365,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
):
""" Filter down to baseline period data.
.. note::
For compliance with CalTRACK, set ``max_days=365`` (section 2.2.1.1).
Parameters
----------
data : :any:`pandas.DataFrame` or :any:`pandas.Series`
The data to filter to baseline data. This data will be filtered down
to an acceptable baseline period according to the dates passed as
`start` and `end`, or the maximum period specified with `max_days`.
start : :any:`datetime.datetime`
A timezone-aware datetime that represents the earliest allowable start
date for the baseline data. The stricter of this or `max_days` is used
to determine the earliest allowable baseline period date.
end : :any:`datetime.datetime`
A timezone-aware datetime that represents the latest allowable end
date for the baseline data, i.e., the latest date for which data is
available before the intervention begins.
max_days : :any:`int`, default 365
The maximum length of the period. Ignored if `end` is not set.
The stricter of this or `start` is used to determine the earliest
allowable baseline period date.
allow_billing_period_overshoot : :any:`bool`, default False
If True, count `max_days` from the end of the last billing data period
that ends before the `end` date, rather than from the exact `end` date.
Otherwise use the exact `end` date as the cutoff.
ignore_billing_period_gap_for_day_count : :any:`bool`, default False
If True, instead of going back `max_days` from either the
`end` date or end of the last billing period before that date (depending
on the value of the `allow_billing_period_overshoot` setting) and
excluding the last period that began before that date, first check to
see if excluding or including that period gets closer to a total of
`max_days` of data.
For example, with `max_days=365`, if an exact 365 period would targeted
Feb 15, but the billing period went from Jan 20 to Feb 20, exclude that
period for a total of ~360 days of data, because that's closer to 365
than ~390 days, which would be the total if that period was included.
If, on the other hand, if that period started Feb 10 and went to Mar 10,
include the period, because ~370 days of data is closer to than ~340.
Returns
-------
baseline_data, warnings : :any:`tuple` of (:any:`pandas.DataFrame` or :any:`pandas.Series`, :any:`list` of :any:`eemeter.EEMeterWarning`)
Data for only the specified baseline period and any associated warnings.
"""
if max_days is not None:
if start is not None:
raise ValueError( # pragma: no cover
"If max_days is set, start cannot be set: start={}, max_days={}.".format(
start, max_days
)
)
start_inf = False
if start is None:
# py datetime min/max are out of range of pd.Timestamp min/max
start_target = pytz.UTC.localize(pd.Timestamp.min)
start_inf = True
else:
start_target = start
end_inf = False
if end is None:
end_limit = pytz.UTC.localize(pd.Timestamp.max)
end_inf = True
else:
end_limit = end
# copying prevents setting on slice warnings
data_before_end_limit = data[:end_limit].copy()
if ignore_billing_period_gap_for_day_count:
end_limit = data_before_end_limit.index.max()
if not end_inf and max_days is not None:
start_target = end_limit - timedelta(days=max_days)
if allow_billing_period_overshoot:
# adjust start limit to get a selection closest to max_days
# also consider ffill for get_loc method - always picks previous
try:
loc = data_before_end_limit.index.get_loc(start_target, method="nearest")
except (KeyError, IndexError): # pragma: no cover
baseline_data = data_before_end_limit
start_limit = start_target
else:
start_limit = data_before_end_limit.index[loc]
baseline_data = data_before_end_limit[start_limit:].copy()
else:
# use hard limit for baseline start
start_limit = start_target
baseline_data = data_before_end_limit[start_limit:].copy()
if baseline_data.dropna().empty:
raise NoBaselineDataError()
baseline_data.iloc[-1] = np.nan
data_end = data.index.max()
data_start = data.index.min()
return (
baseline_data,
_make_baseline_warnings(
end_inf, start_inf, data_start, data_end, start_limit, end_limit
),
) | python | def get_baseline_data(
data,
start=None,
end=None,
max_days=365,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
):
""" Filter down to baseline period data.
.. note::
For compliance with CalTRACK, set ``max_days=365`` (section 2.2.1.1).
Parameters
----------
data : :any:`pandas.DataFrame` or :any:`pandas.Series`
The data to filter to baseline data. This data will be filtered down
to an acceptable baseline period according to the dates passed as
`start` and `end`, or the maximum period specified with `max_days`.
start : :any:`datetime.datetime`
A timezone-aware datetime that represents the earliest allowable start
date for the baseline data. The stricter of this or `max_days` is used
to determine the earliest allowable baseline period date.
end : :any:`datetime.datetime`
A timezone-aware datetime that represents the latest allowable end
date for the baseline data, i.e., the latest date for which data is
available before the intervention begins.
max_days : :any:`int`, default 365
The maximum length of the period. Ignored if `end` is not set.
The stricter of this or `start` is used to determine the earliest
allowable baseline period date.
allow_billing_period_overshoot : :any:`bool`, default False
If True, count `max_days` from the end of the last billing data period
that ends before the `end` date, rather than from the exact `end` date.
Otherwise use the exact `end` date as the cutoff.
ignore_billing_period_gap_for_day_count : :any:`bool`, default False
If True, instead of going back `max_days` from either the
`end` date or end of the last billing period before that date (depending
on the value of the `allow_billing_period_overshoot` setting) and
excluding the last period that began before that date, first check to
see if excluding or including that period gets closer to a total of
`max_days` of data.
For example, with `max_days=365`, if an exact 365 period would targeted
Feb 15, but the billing period went from Jan 20 to Feb 20, exclude that
period for a total of ~360 days of data, because that's closer to 365
than ~390 days, which would be the total if that period was included.
If, on the other hand, if that period started Feb 10 and went to Mar 10,
include the period, because ~370 days of data is closer to than ~340.
Returns
-------
baseline_data, warnings : :any:`tuple` of (:any:`pandas.DataFrame` or :any:`pandas.Series`, :any:`list` of :any:`eemeter.EEMeterWarning`)
Data for only the specified baseline period and any associated warnings.
"""
if max_days is not None:
if start is not None:
raise ValueError( # pragma: no cover
"If max_days is set, start cannot be set: start={}, max_days={}.".format(
start, max_days
)
)
start_inf = False
if start is None:
# py datetime min/max are out of range of pd.Timestamp min/max
start_target = pytz.UTC.localize(pd.Timestamp.min)
start_inf = True
else:
start_target = start
end_inf = False
if end is None:
end_limit = pytz.UTC.localize(pd.Timestamp.max)
end_inf = True
else:
end_limit = end
# copying prevents setting on slice warnings
data_before_end_limit = data[:end_limit].copy()
if ignore_billing_period_gap_for_day_count:
end_limit = data_before_end_limit.index.max()
if not end_inf and max_days is not None:
start_target = end_limit - timedelta(days=max_days)
if allow_billing_period_overshoot:
# adjust start limit to get a selection closest to max_days
# also consider ffill for get_loc method - always picks previous
try:
loc = data_before_end_limit.index.get_loc(start_target, method="nearest")
except (KeyError, IndexError): # pragma: no cover
baseline_data = data_before_end_limit
start_limit = start_target
else:
start_limit = data_before_end_limit.index[loc]
baseline_data = data_before_end_limit[start_limit:].copy()
else:
# use hard limit for baseline start
start_limit = start_target
baseline_data = data_before_end_limit[start_limit:].copy()
if baseline_data.dropna().empty:
raise NoBaselineDataError()
baseline_data.iloc[-1] = np.nan
data_end = data.index.max()
data_start = data.index.min()
return (
baseline_data,
_make_baseline_warnings(
end_inf, start_inf, data_start, data_end, start_limit, end_limit
),
) | [
"def",
"get_baseline_data",
"(",
"data",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"max_days",
"=",
"365",
",",
"allow_billing_period_overshoot",
"=",
"False",
",",
"ignore_billing_period_gap_for_day_count",
"=",
"False",
",",
")",
":",
"if",
"m... | Filter down to baseline period data.
.. note::
For compliance with CalTRACK, set ``max_days=365`` (section 2.2.1.1).
Parameters
----------
data : :any:`pandas.DataFrame` or :any:`pandas.Series`
The data to filter to baseline data. This data will be filtered down
to an acceptable baseline period according to the dates passed as
`start` and `end`, or the maximum period specified with `max_days`.
start : :any:`datetime.datetime`
A timezone-aware datetime that represents the earliest allowable start
date for the baseline data. The stricter of this or `max_days` is used
to determine the earliest allowable baseline period date.
end : :any:`datetime.datetime`
A timezone-aware datetime that represents the latest allowable end
date for the baseline data, i.e., the latest date for which data is
available before the intervention begins.
max_days : :any:`int`, default 365
The maximum length of the period. Ignored if `end` is not set.
The stricter of this or `start` is used to determine the earliest
allowable baseline period date.
allow_billing_period_overshoot : :any:`bool`, default False
If True, count `max_days` from the end of the last billing data period
that ends before the `end` date, rather than from the exact `end` date.
Otherwise use the exact `end` date as the cutoff.
ignore_billing_period_gap_for_day_count : :any:`bool`, default False
If True, instead of going back `max_days` from either the
`end` date or end of the last billing period before that date (depending
on the value of the `allow_billing_period_overshoot` setting) and
excluding the last period that began before that date, first check to
see if excluding or including that period gets closer to a total of
`max_days` of data.
For example, with `max_days=365`, if an exact 365 period would targeted
Feb 15, but the billing period went from Jan 20 to Feb 20, exclude that
period for a total of ~360 days of data, because that's closer to 365
than ~390 days, which would be the total if that period was included.
If, on the other hand, if that period started Feb 10 and went to Mar 10,
include the period, because ~370 days of data is closer to than ~340.
Returns
-------
baseline_data, warnings : :any:`tuple` of (:any:`pandas.DataFrame` or :any:`pandas.Series`, :any:`list` of :any:`eemeter.EEMeterWarning`)
Data for only the specified baseline period and any associated warnings. | [
"Filter",
"down",
"to",
"baseline",
"period",
"data",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/transform.py#L202-L319 | train | 199,927 |
openeemeter/eemeter | eemeter/transform.py | get_reporting_data | def get_reporting_data(
data,
start=None,
end=None,
max_days=365,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
):
""" Filter down to reporting period data.
Parameters
----------
data : :any:`pandas.DataFrame` or :any:`pandas.Series`
The data to filter to reporting data. This data will be filtered down
to an acceptable reporting period according to the dates passed as
`start` and `end`, or the maximum period specified with `max_days`.
start : :any:`datetime.datetime`
A timezone-aware datetime that represents the earliest allowable start
date for the reporting data, i.e., the earliest date for which data is
available after the intervention begins.
end : :any:`datetime.datetime`
A timezone-aware datetime that represents the latest allowable end
date for the reporting data. The stricter of this or `max_days` is used
to determine the latest allowable reporting period date.
max_days : :any:`int`, default 365
The maximum length of the period. Ignored if `start` is not set.
The stricter of this or `end` is used to determine the latest
allowable reporting period date.
allow_billing_period_overshoot : :any:`bool`, default False
If True, count `max_days` from the start of the first billing data period
that starts after the `start` date, rather than from the exact `start` date.
Otherwise use the exact `start` date as the cutoff.
ignore_billing_period_gap_for_day_count : :any:`bool`, default False
If True, instead of going forward `max_days` from either the
`start` date or the `start` of the first billing period after that date
(depending on the value of the `allow_billing_period_overshoot` setting)
and excluding the first period that ended after that date, first check
to see if excluding or including that period gets closer to a total of
`max_days` of data.
For example, with `max_days=365`, if an exact 365 period would targeted
Feb 15, but the billing period went from Jan 20 to Feb 20, include that
period for a total of ~370 days of data, because that's closer to 365
than ~340 days, which would be the total if that period was excluded.
If, on the other hand, if that period started Feb 10 and went to Mar 10,
exclude the period, because ~360 days of data is closer to than ~390.
Returns
-------
reporting_data, warnings : :any:`tuple` of (:any:`pandas.DataFrame` or :any:`pandas.Series`, :any:`list` of :any:`eemeter.EEMeterWarning`)
Data for only the specified reporting period and any associated warnings.
"""
if max_days is not None:
if end is not None:
raise ValueError( # pragma: no cover
"If max_days is set, end cannot be set: end={}, max_days={}.".format(
end, max_days
)
)
start_inf = False
if start is None:
# py datetime min/max are out of range of pd.Timestamp min/max
start_limit = pytz.UTC.localize(pd.Timestamp.min)
start_inf = True
else:
start_limit = start
end_inf = False
if end is None:
end_target = pytz.UTC.localize(pd.Timestamp.max)
end_inf = True
else:
end_target = end
# copying prevents setting on slice warnings
data_after_start_limit = data[start_limit:].copy()
if ignore_billing_period_gap_for_day_count:
start_limit = data_after_start_limit.index.min()
if not start_inf and max_days is not None:
end_target = start_limit + timedelta(days=max_days)
if allow_billing_period_overshoot:
# adjust start limit to get a selection closest to max_days
# also consider bfill for get_loc method - always picks next
try:
loc = data_after_start_limit.index.get_loc(end_target, method="nearest")
except (KeyError, IndexError): # pragma: no cover
reporting_data = data_after_start_limit
end_limit = end_target
else:
end_limit = data_after_start_limit.index[loc]
reporting_data = data_after_start_limit[:end_limit].copy()
else:
# use hard limit for baseline start
end_limit = end_target
reporting_data = data_after_start_limit[:end_limit].copy()
if reporting_data.dropna().empty:
raise NoReportingDataError()
reporting_data.iloc[-1] = np.nan
data_end = data.index.max()
data_start = data.index.min()
return (
reporting_data,
_make_reporting_warnings(
end_inf, start_inf, data_start, data_end, start_limit, end_limit
),
) | python | def get_reporting_data(
data,
start=None,
end=None,
max_days=365,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
):
""" Filter down to reporting period data.
Parameters
----------
data : :any:`pandas.DataFrame` or :any:`pandas.Series`
The data to filter to reporting data. This data will be filtered down
to an acceptable reporting period according to the dates passed as
`start` and `end`, or the maximum period specified with `max_days`.
start : :any:`datetime.datetime`
A timezone-aware datetime that represents the earliest allowable start
date for the reporting data, i.e., the earliest date for which data is
available after the intervention begins.
end : :any:`datetime.datetime`
A timezone-aware datetime that represents the latest allowable end
date for the reporting data. The stricter of this or `max_days` is used
to determine the latest allowable reporting period date.
max_days : :any:`int`, default 365
The maximum length of the period. Ignored if `start` is not set.
The stricter of this or `end` is used to determine the latest
allowable reporting period date.
allow_billing_period_overshoot : :any:`bool`, default False
If True, count `max_days` from the start of the first billing data period
that starts after the `start` date, rather than from the exact `start` date.
Otherwise use the exact `start` date as the cutoff.
ignore_billing_period_gap_for_day_count : :any:`bool`, default False
If True, instead of going forward `max_days` from either the
`start` date or the `start` of the first billing period after that date
(depending on the value of the `allow_billing_period_overshoot` setting)
and excluding the first period that ended after that date, first check
to see if excluding or including that period gets closer to a total of
`max_days` of data.
For example, with `max_days=365`, if an exact 365 period would targeted
Feb 15, but the billing period went from Jan 20 to Feb 20, include that
period for a total of ~370 days of data, because that's closer to 365
than ~340 days, which would be the total if that period was excluded.
If, on the other hand, if that period started Feb 10 and went to Mar 10,
exclude the period, because ~360 days of data is closer to than ~390.
Returns
-------
reporting_data, warnings : :any:`tuple` of (:any:`pandas.DataFrame` or :any:`pandas.Series`, :any:`list` of :any:`eemeter.EEMeterWarning`)
Data for only the specified reporting period and any associated warnings.
"""
if max_days is not None:
if end is not None:
raise ValueError( # pragma: no cover
"If max_days is set, end cannot be set: end={}, max_days={}.".format(
end, max_days
)
)
start_inf = False
if start is None:
# py datetime min/max are out of range of pd.Timestamp min/max
start_limit = pytz.UTC.localize(pd.Timestamp.min)
start_inf = True
else:
start_limit = start
end_inf = False
if end is None:
end_target = pytz.UTC.localize(pd.Timestamp.max)
end_inf = True
else:
end_target = end
# copying prevents setting on slice warnings
data_after_start_limit = data[start_limit:].copy()
if ignore_billing_period_gap_for_day_count:
start_limit = data_after_start_limit.index.min()
if not start_inf and max_days is not None:
end_target = start_limit + timedelta(days=max_days)
if allow_billing_period_overshoot:
# adjust start limit to get a selection closest to max_days
# also consider bfill for get_loc method - always picks next
try:
loc = data_after_start_limit.index.get_loc(end_target, method="nearest")
except (KeyError, IndexError): # pragma: no cover
reporting_data = data_after_start_limit
end_limit = end_target
else:
end_limit = data_after_start_limit.index[loc]
reporting_data = data_after_start_limit[:end_limit].copy()
else:
# use hard limit for baseline start
end_limit = end_target
reporting_data = data_after_start_limit[:end_limit].copy()
if reporting_data.dropna().empty:
raise NoReportingDataError()
reporting_data.iloc[-1] = np.nan
data_end = data.index.max()
data_start = data.index.min()
return (
reporting_data,
_make_reporting_warnings(
end_inf, start_inf, data_start, data_end, start_limit, end_limit
),
) | [
"def",
"get_reporting_data",
"(",
"data",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"max_days",
"=",
"365",
",",
"allow_billing_period_overshoot",
"=",
"False",
",",
"ignore_billing_period_gap_for_day_count",
"=",
"False",
",",
")",
":",
"if",
"... | Filter down to reporting period data.
Parameters
----------
data : :any:`pandas.DataFrame` or :any:`pandas.Series`
The data to filter to reporting data. This data will be filtered down
to an acceptable reporting period according to the dates passed as
`start` and `end`, or the maximum period specified with `max_days`.
start : :any:`datetime.datetime`
A timezone-aware datetime that represents the earliest allowable start
date for the reporting data, i.e., the earliest date for which data is
available after the intervention begins.
end : :any:`datetime.datetime`
A timezone-aware datetime that represents the latest allowable end
date for the reporting data. The stricter of this or `max_days` is used
to determine the latest allowable reporting period date.
max_days : :any:`int`, default 365
The maximum length of the period. Ignored if `start` is not set.
The stricter of this or `end` is used to determine the latest
allowable reporting period date.
allow_billing_period_overshoot : :any:`bool`, default False
If True, count `max_days` from the start of the first billing data period
that starts after the `start` date, rather than from the exact `start` date.
Otherwise use the exact `start` date as the cutoff.
ignore_billing_period_gap_for_day_count : :any:`bool`, default False
If True, instead of going forward `max_days` from either the
`start` date or the `start` of the first billing period after that date
(depending on the value of the `allow_billing_period_overshoot` setting)
and excluding the first period that ended after that date, first check
to see if excluding or including that period gets closer to a total of
`max_days` of data.
For example, with `max_days=365`, if an exact 365 period would targeted
Feb 15, but the billing period went from Jan 20 to Feb 20, include that
period for a total of ~370 days of data, because that's closer to 365
than ~340 days, which would be the total if that period was excluded.
If, on the other hand, if that period started Feb 10 and went to Mar 10,
exclude the period, because ~360 days of data is closer to than ~390.
Returns
-------
reporting_data, warnings : :any:`tuple` of (:any:`pandas.DataFrame` or :any:`pandas.Series`, :any:`list` of :any:`eemeter.EEMeterWarning`)
Data for only the specified reporting period and any associated warnings. | [
"Filter",
"down",
"to",
"reporting",
"period",
"data",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/transform.py#L357-L470 | train | 199,928 |
openeemeter/eemeter | eemeter/derivatives.py | modeled_savings | def modeled_savings(
baseline_model,
reporting_model,
result_index,
temperature_data,
with_disaggregated=False,
confidence_level=0.90,
predict_kwargs=None,
):
""" Compute modeled savings, i.e., savings in which baseline and reporting
usage values are based on models. This is appropriate for annualizing or
weather normalizing models.
Parameters
----------
baseline_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Model to use for predicting pre-intervention usage.
reporting_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Model to use for predicting post-intervention usage.
result_index : :any:`pandas.DatetimeIndex`
The dates for which usage should be modeled.
temperature_data : :any:`pandas.Series`
Hourly-frequency timeseries of temperature data during the modeled
period.
with_disaggregated : :any:`bool`, optional
If True, calculate modeled disaggregated usage estimates and savings.
confidence_level : :any:`float`, optional
The two-tailed confidence level used to calculate the t-statistic used
in calculation of the error bands.
Ignored if not computing error bands.
predict_kwargs : :any:`dict`, optional
Extra kwargs to pass to the baseline_model.predict and
reporting_model.predict methods.
Returns
-------
results : :any:`pandas.DataFrame`
DataFrame with modeled savings, indexed with the result_index. Will
include the following columns:
- ``modeled_baseline_usage``
- ``modeled_reporting_usage``
- ``modeled_savings``
If `with_disaggregated` is set to True, the following columns will also
be in the results DataFrame:
- ``modeled_baseline_base_load``
- ``modeled_baseline_cooling_load``
- ``modeled_baseline_heating_load``
- ``modeled_reporting_base_load``
- ``modeled_reporting_cooling_load``
- ``modeled_reporting_heating_load``
- ``modeled_base_load_savings``
- ``modeled_cooling_load_savings``
- ``modeled_heating_load_savings``
error_bands : :any:`dict`, optional
If baseline_model and reporting_model are instances of
CalTRACKUsagePerDayModelResults, will also return a dictionary of
FSU and error bands for the aggregated energy savings over the
normal year period.
"""
prediction_index = result_index
if predict_kwargs is None:
predict_kwargs = {}
model_type = None # generic
if isinstance(baseline_model, CalTRACKUsagePerDayModelResults):
model_type = "usage_per_day"
if model_type == "usage_per_day" and with_disaggregated:
predict_kwargs["with_disaggregated"] = True
def _predicted_usage(model):
model_prediction = model.predict(
prediction_index, temperature_data, **predict_kwargs
)
predicted_usage = model_prediction.result
return predicted_usage
predicted_baseline_usage = _predicted_usage(baseline_model)
predicted_reporting_usage = _predicted_usage(reporting_model)
modeled_baseline_usage = predicted_baseline_usage["predicted_usage"].to_frame(
"modeled_baseline_usage"
)
modeled_reporting_usage = predicted_reporting_usage["predicted_usage"].to_frame(
"modeled_reporting_usage"
)
def modeled_savings_func(row):
return row.modeled_baseline_usage - row.modeled_reporting_usage
results = modeled_baseline_usage.join(modeled_reporting_usage).assign(
modeled_savings=modeled_savings_func
)
if model_type == "usage_per_day" and with_disaggregated:
modeled_baseline_usage_disaggregated = predicted_baseline_usage[
["base_load", "heating_load", "cooling_load"]
].rename(
columns={
"base_load": "modeled_baseline_base_load",
"heating_load": "modeled_baseline_heating_load",
"cooling_load": "modeled_baseline_cooling_load",
}
)
modeled_reporting_usage_disaggregated = predicted_reporting_usage[
["base_load", "heating_load", "cooling_load"]
].rename(
columns={
"base_load": "modeled_reporting_base_load",
"heating_load": "modeled_reporting_heating_load",
"cooling_load": "modeled_reporting_cooling_load",
}
)
def modeled_base_load_savings_func(row):
return row.modeled_baseline_base_load - row.modeled_reporting_base_load
def modeled_heating_load_savings_func(row):
return (
row.modeled_baseline_heating_load - row.modeled_reporting_heating_load
)
def modeled_cooling_load_savings_func(row):
return (
row.modeled_baseline_cooling_load - row.modeled_reporting_cooling_load
)
results = (
results.join(modeled_baseline_usage_disaggregated)
.join(modeled_reporting_usage_disaggregated)
.assign(
modeled_base_load_savings=modeled_base_load_savings_func,
modeled_heating_load_savings=modeled_heating_load_savings_func,
modeled_cooling_load_savings=modeled_cooling_load_savings_func,
)
)
results = results.dropna().reindex(results.index) # carry NaNs
error_bands = None
if model_type == "usage_per_day": # has totals_metrics
error_bands = _compute_error_bands_modeled_savings(
baseline_model.totals_metrics,
reporting_model.totals_metrics,
results,
baseline_model.interval,
reporting_model.interval,
confidence_level,
)
return results, error_bands | python | def modeled_savings(
baseline_model,
reporting_model,
result_index,
temperature_data,
with_disaggregated=False,
confidence_level=0.90,
predict_kwargs=None,
):
""" Compute modeled savings, i.e., savings in which baseline and reporting
usage values are based on models. This is appropriate for annualizing or
weather normalizing models.
Parameters
----------
baseline_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Model to use for predicting pre-intervention usage.
reporting_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Model to use for predicting post-intervention usage.
result_index : :any:`pandas.DatetimeIndex`
The dates for which usage should be modeled.
temperature_data : :any:`pandas.Series`
Hourly-frequency timeseries of temperature data during the modeled
period.
with_disaggregated : :any:`bool`, optional
If True, calculate modeled disaggregated usage estimates and savings.
confidence_level : :any:`float`, optional
The two-tailed confidence level used to calculate the t-statistic used
in calculation of the error bands.
Ignored if not computing error bands.
predict_kwargs : :any:`dict`, optional
Extra kwargs to pass to the baseline_model.predict and
reporting_model.predict methods.
Returns
-------
results : :any:`pandas.DataFrame`
DataFrame with modeled savings, indexed with the result_index. Will
include the following columns:
- ``modeled_baseline_usage``
- ``modeled_reporting_usage``
- ``modeled_savings``
If `with_disaggregated` is set to True, the following columns will also
be in the results DataFrame:
- ``modeled_baseline_base_load``
- ``modeled_baseline_cooling_load``
- ``modeled_baseline_heating_load``
- ``modeled_reporting_base_load``
- ``modeled_reporting_cooling_load``
- ``modeled_reporting_heating_load``
- ``modeled_base_load_savings``
- ``modeled_cooling_load_savings``
- ``modeled_heating_load_savings``
error_bands : :any:`dict`, optional
If baseline_model and reporting_model are instances of
CalTRACKUsagePerDayModelResults, will also return a dictionary of
FSU and error bands for the aggregated energy savings over the
normal year period.
"""
prediction_index = result_index
if predict_kwargs is None:
predict_kwargs = {}
model_type = None # generic
if isinstance(baseline_model, CalTRACKUsagePerDayModelResults):
model_type = "usage_per_day"
if model_type == "usage_per_day" and with_disaggregated:
predict_kwargs["with_disaggregated"] = True
def _predicted_usage(model):
model_prediction = model.predict(
prediction_index, temperature_data, **predict_kwargs
)
predicted_usage = model_prediction.result
return predicted_usage
predicted_baseline_usage = _predicted_usage(baseline_model)
predicted_reporting_usage = _predicted_usage(reporting_model)
modeled_baseline_usage = predicted_baseline_usage["predicted_usage"].to_frame(
"modeled_baseline_usage"
)
modeled_reporting_usage = predicted_reporting_usage["predicted_usage"].to_frame(
"modeled_reporting_usage"
)
def modeled_savings_func(row):
return row.modeled_baseline_usage - row.modeled_reporting_usage
results = modeled_baseline_usage.join(modeled_reporting_usage).assign(
modeled_savings=modeled_savings_func
)
if model_type == "usage_per_day" and with_disaggregated:
modeled_baseline_usage_disaggregated = predicted_baseline_usage[
["base_load", "heating_load", "cooling_load"]
].rename(
columns={
"base_load": "modeled_baseline_base_load",
"heating_load": "modeled_baseline_heating_load",
"cooling_load": "modeled_baseline_cooling_load",
}
)
modeled_reporting_usage_disaggregated = predicted_reporting_usage[
["base_load", "heating_load", "cooling_load"]
].rename(
columns={
"base_load": "modeled_reporting_base_load",
"heating_load": "modeled_reporting_heating_load",
"cooling_load": "modeled_reporting_cooling_load",
}
)
def modeled_base_load_savings_func(row):
return row.modeled_baseline_base_load - row.modeled_reporting_base_load
def modeled_heating_load_savings_func(row):
return (
row.modeled_baseline_heating_load - row.modeled_reporting_heating_load
)
def modeled_cooling_load_savings_func(row):
return (
row.modeled_baseline_cooling_load - row.modeled_reporting_cooling_load
)
results = (
results.join(modeled_baseline_usage_disaggregated)
.join(modeled_reporting_usage_disaggregated)
.assign(
modeled_base_load_savings=modeled_base_load_savings_func,
modeled_heating_load_savings=modeled_heating_load_savings_func,
modeled_cooling_load_savings=modeled_cooling_load_savings_func,
)
)
results = results.dropna().reindex(results.index) # carry NaNs
error_bands = None
if model_type == "usage_per_day": # has totals_metrics
error_bands = _compute_error_bands_modeled_savings(
baseline_model.totals_metrics,
reporting_model.totals_metrics,
results,
baseline_model.interval,
reporting_model.interval,
confidence_level,
)
return results, error_bands | [
"def",
"modeled_savings",
"(",
"baseline_model",
",",
"reporting_model",
",",
"result_index",
",",
"temperature_data",
",",
"with_disaggregated",
"=",
"False",
",",
"confidence_level",
"=",
"0.90",
",",
"predict_kwargs",
"=",
"None",
",",
")",
":",
"prediction_index... | Compute modeled savings, i.e., savings in which baseline and reporting
usage values are based on models. This is appropriate for annualizing or
weather normalizing models.
Parameters
----------
baseline_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Model to use for predicting pre-intervention usage.
reporting_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Model to use for predicting post-intervention usage.
result_index : :any:`pandas.DatetimeIndex`
The dates for which usage should be modeled.
temperature_data : :any:`pandas.Series`
Hourly-frequency timeseries of temperature data during the modeled
period.
with_disaggregated : :any:`bool`, optional
If True, calculate modeled disaggregated usage estimates and savings.
confidence_level : :any:`float`, optional
The two-tailed confidence level used to calculate the t-statistic used
in calculation of the error bands.
Ignored if not computing error bands.
predict_kwargs : :any:`dict`, optional
Extra kwargs to pass to the baseline_model.predict and
reporting_model.predict methods.
Returns
-------
results : :any:`pandas.DataFrame`
DataFrame with modeled savings, indexed with the result_index. Will
include the following columns:
- ``modeled_baseline_usage``
- ``modeled_reporting_usage``
- ``modeled_savings``
If `with_disaggregated` is set to True, the following columns will also
be in the results DataFrame:
- ``modeled_baseline_base_load``
- ``modeled_baseline_cooling_load``
- ``modeled_baseline_heating_load``
- ``modeled_reporting_base_load``
- ``modeled_reporting_cooling_load``
- ``modeled_reporting_heating_load``
- ``modeled_base_load_savings``
- ``modeled_cooling_load_savings``
- ``modeled_heating_load_savings``
error_bands : :any:`dict`, optional
If baseline_model and reporting_model are instances of
CalTRACKUsagePerDayModelResults, will also return a dictionary of
FSU and error bands for the aggregated energy savings over the
normal year period. | [
"Compute",
"modeled",
"savings",
"i",
".",
"e",
".",
"savings",
"in",
"which",
"baseline",
"and",
"reporting",
"usage",
"values",
"are",
"based",
"on",
"models",
".",
"This",
"is",
"appropriate",
"for",
"annualizing",
"or",
"weather",
"normalizing",
"models",
... | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/derivatives.py#L381-L535 | train | 199,929 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | _caltrack_predict_design_matrix | def _caltrack_predict_design_matrix(
model_type,
model_params,
data,
disaggregated=False,
input_averages=False,
output_averages=False,
):
""" An internal CalTRACK predict method for use with a design matrix of the form
used in model fitting.
Given a set model type, parameters, and daily temperatures, return model
predictions.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
data : :any:`pandas.DataFrame`
Data over which to predict. Assumed to be like the format of the data used
for fitting, although it need only have the columns. If not giving data
with a `pandas.DatetimeIndex` it must have the column `n_days`,
representing the number of days per prediction period (otherwise
inferred from DatetimeIndex).
disaggregated : :any:`bool`, optional
If True, return results as a :any:`pandas.DataFrame` with columns
``'base_load'``, ``'heating_load'``, and ``'cooling_load'``
input_averages : :any:`bool`, optional
If HDD and CDD columns expressed as period totals, select False. If HDD
and CDD columns expressed as period averages, select True. If prediction
period is daily, results should be the same either way. Matters for billing.
output_averages : :any:`bool`, optional
If True, prediction returned as averages (not totals). If False, returned
as totals.
Returns
-------
prediction : :any:`pandas.Series` or :any:`pandas.DataFrame`
Returns results as series unless ``disaggregated=True``.
"""
zeros = pd.Series(0, index=data.index)
ones = zeros + 1
if isinstance(data.index, pd.DatetimeIndex):
days_per_period = day_counts(data.index)
else:
try:
days_per_period = data["n_days"]
except KeyError:
raise ValueError("Data needs DatetimeIndex or an n_days column.")
# TODO(philngo): handle different degree day methods and hourly temperatures
if model_type in ["intercept_only", "hdd_only", "cdd_only", "cdd_hdd"]:
intercept = _get_parameter_or_raise(model_type, model_params, "intercept")
if output_averages == False:
base_load = intercept * days_per_period
else:
base_load = intercept * ones
elif model_type is None:
raise ValueError("Model not valid for prediction: model_type=None")
else:
raise UnrecognizedModelTypeError(
"invalid caltrack model type: {}".format(model_type)
)
if model_type in ["hdd_only", "cdd_hdd"]:
beta_hdd = _get_parameter_or_raise(model_type, model_params, "beta_hdd")
heating_balance_point = _get_parameter_or_raise(
model_type, model_params, "heating_balance_point"
)
hdd_column_name = "hdd_%s" % heating_balance_point
hdd = data[hdd_column_name]
if input_averages == True and output_averages == False:
heating_load = hdd * beta_hdd * days_per_period
elif input_averages == True and output_averages == True:
heating_load = hdd * beta_hdd
elif input_averages == False and output_averages == False:
heating_load = hdd * beta_hdd
else:
heating_load = hdd * beta_hdd / days_per_period
else:
heating_load = zeros
if model_type in ["cdd_only", "cdd_hdd"]:
beta_cdd = _get_parameter_or_raise(model_type, model_params, "beta_cdd")
cooling_balance_point = _get_parameter_or_raise(
model_type, model_params, "cooling_balance_point"
)
cdd_column_name = "cdd_%s" % cooling_balance_point
cdd = data[cdd_column_name]
if input_averages == True and output_averages == False:
cooling_load = cdd * beta_cdd * days_per_period
elif input_averages == True and output_averages == True:
cooling_load = cdd * beta_cdd
elif input_averages == False and output_averages == False:
cooling_load = cdd * beta_cdd
else:
cooling_load = cdd * beta_cdd / days_per_period
else:
cooling_load = zeros
# If any of the rows of input data contained NaNs, restore the NaNs
# Note: If data contains ANY NaNs at all, this declares the entire row a NaN.
# TODO(philngo): Consider making this more nuanced.
def _restore_nans(load):
load = load[data.sum(axis=1, skipna=False).notnull()].reindex(data.index)
return load
base_load = _restore_nans(base_load)
heating_load = _restore_nans(heating_load)
cooling_load = _restore_nans(cooling_load)
if disaggregated:
return pd.DataFrame(
{
"base_load": base_load,
"heating_load": heating_load,
"cooling_load": cooling_load,
}
)
else:
return base_load + heating_load + cooling_load | python | def _caltrack_predict_design_matrix(
model_type,
model_params,
data,
disaggregated=False,
input_averages=False,
output_averages=False,
):
""" An internal CalTRACK predict method for use with a design matrix of the form
used in model fitting.
Given a set model type, parameters, and daily temperatures, return model
predictions.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
data : :any:`pandas.DataFrame`
Data over which to predict. Assumed to be like the format of the data used
for fitting, although it need only have the columns. If not giving data
with a `pandas.DatetimeIndex` it must have the column `n_days`,
representing the number of days per prediction period (otherwise
inferred from DatetimeIndex).
disaggregated : :any:`bool`, optional
If True, return results as a :any:`pandas.DataFrame` with columns
``'base_load'``, ``'heating_load'``, and ``'cooling_load'``
input_averages : :any:`bool`, optional
If HDD and CDD columns expressed as period totals, select False. If HDD
and CDD columns expressed as period averages, select True. If prediction
period is daily, results should be the same either way. Matters for billing.
output_averages : :any:`bool`, optional
If True, prediction returned as averages (not totals). If False, returned
as totals.
Returns
-------
prediction : :any:`pandas.Series` or :any:`pandas.DataFrame`
Returns results as series unless ``disaggregated=True``.
"""
zeros = pd.Series(0, index=data.index)
ones = zeros + 1
if isinstance(data.index, pd.DatetimeIndex):
days_per_period = day_counts(data.index)
else:
try:
days_per_period = data["n_days"]
except KeyError:
raise ValueError("Data needs DatetimeIndex or an n_days column.")
# TODO(philngo): handle different degree day methods and hourly temperatures
if model_type in ["intercept_only", "hdd_only", "cdd_only", "cdd_hdd"]:
intercept = _get_parameter_or_raise(model_type, model_params, "intercept")
if output_averages == False:
base_load = intercept * days_per_period
else:
base_load = intercept * ones
elif model_type is None:
raise ValueError("Model not valid for prediction: model_type=None")
else:
raise UnrecognizedModelTypeError(
"invalid caltrack model type: {}".format(model_type)
)
if model_type in ["hdd_only", "cdd_hdd"]:
beta_hdd = _get_parameter_or_raise(model_type, model_params, "beta_hdd")
heating_balance_point = _get_parameter_or_raise(
model_type, model_params, "heating_balance_point"
)
hdd_column_name = "hdd_%s" % heating_balance_point
hdd = data[hdd_column_name]
if input_averages == True and output_averages == False:
heating_load = hdd * beta_hdd * days_per_period
elif input_averages == True and output_averages == True:
heating_load = hdd * beta_hdd
elif input_averages == False and output_averages == False:
heating_load = hdd * beta_hdd
else:
heating_load = hdd * beta_hdd / days_per_period
else:
heating_load = zeros
if model_type in ["cdd_only", "cdd_hdd"]:
beta_cdd = _get_parameter_or_raise(model_type, model_params, "beta_cdd")
cooling_balance_point = _get_parameter_or_raise(
model_type, model_params, "cooling_balance_point"
)
cdd_column_name = "cdd_%s" % cooling_balance_point
cdd = data[cdd_column_name]
if input_averages == True and output_averages == False:
cooling_load = cdd * beta_cdd * days_per_period
elif input_averages == True and output_averages == True:
cooling_load = cdd * beta_cdd
elif input_averages == False and output_averages == False:
cooling_load = cdd * beta_cdd
else:
cooling_load = cdd * beta_cdd / days_per_period
else:
cooling_load = zeros
# If any of the rows of input data contained NaNs, restore the NaNs
# Note: If data contains ANY NaNs at all, this declares the entire row a NaN.
# TODO(philngo): Consider making this more nuanced.
def _restore_nans(load):
load = load[data.sum(axis=1, skipna=False).notnull()].reindex(data.index)
return load
base_load = _restore_nans(base_load)
heating_load = _restore_nans(heating_load)
cooling_load = _restore_nans(cooling_load)
if disaggregated:
return pd.DataFrame(
{
"base_load": base_load,
"heating_load": heating_load,
"cooling_load": cooling_load,
}
)
else:
return base_load + heating_load + cooling_load | [
"def",
"_caltrack_predict_design_matrix",
"(",
"model_type",
",",
"model_params",
",",
"data",
",",
"disaggregated",
"=",
"False",
",",
"input_averages",
"=",
"False",
",",
"output_averages",
"=",
"False",
",",
")",
":",
"zeros",
"=",
"pd",
".",
"Series",
"(",... | An internal CalTRACK predict method for use with a design matrix of the form
used in model fitting.
Given a set model type, parameters, and daily temperatures, return model
predictions.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
data : :any:`pandas.DataFrame`
Data over which to predict. Assumed to be like the format of the data used
for fitting, although it need only have the columns. If not giving data
with a `pandas.DatetimeIndex` it must have the column `n_days`,
representing the number of days per prediction period (otherwise
inferred from DatetimeIndex).
disaggregated : :any:`bool`, optional
If True, return results as a :any:`pandas.DataFrame` with columns
``'base_load'``, ``'heating_load'``, and ``'cooling_load'``
input_averages : :any:`bool`, optional
If HDD and CDD columns expressed as period totals, select False. If HDD
and CDD columns expressed as period averages, select True. If prediction
period is daily, results should be the same either way. Matters for billing.
output_averages : :any:`bool`, optional
If True, prediction returned as averages (not totals). If False, returned
as totals.
Returns
-------
prediction : :any:`pandas.Series` or :any:`pandas.DataFrame`
Returns results as series unless ``disaggregated=True``. | [
"An",
"internal",
"CalTRACK",
"predict",
"method",
"for",
"use",
"with",
"a",
"design",
"matrix",
"of",
"the",
"form",
"used",
"in",
"model",
"fitting",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L453-L577 | train | 199,930 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | caltrack_usage_per_day_predict | def caltrack_usage_per_day_predict(
model_type,
model_params,
prediction_index,
temperature_data,
degree_day_method="daily",
with_disaggregated=False,
with_design_matrix=False,
):
""" CalTRACK predict method.
Given a model type, parameters, hourly temperatures, a
:any:`pandas.DatetimeIndex` index over which to predict meter usage,
return model predictions as totals for the period (so billing period totals,
daily totals, etc.). Optionally include the computed design matrix or
disaggregated usage in the output dataframe.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
temperature_data : :any:`pandas.DataFrame`
Hourly temperature data to use for prediction. Time period should match
the ``prediction_index`` argument.
prediction_index : :any:`pandas.DatetimeIndex`
Time period over which to predict.
with_disaggregated : :any:`bool`, optional
If True, return results as a :any:`pandas.DataFrame` with columns
``'base_load'``, ``'heating_load'``, and ``'cooling_load'``.
with_design_matrix : :any:`bool`, optional
If True, return results as a :any:`pandas.DataFrame` with columns
``'n_days'``, ``'n_days_dropped'``, ``n_days_kept``, and
``temperature_mean``.
Returns
-------
prediction : :any:`pandas.DataFrame`
Columns are as follows:
- ``predicted_usage``: Predicted usage values computed to match
``prediction_index``.
- ``base_load``: modeled base load (only for ``with_disaggregated=True``).
- ``cooling_load``: modeled cooling load (only for ``with_disaggregated=True``).
- ``heating_load``: modeled heating load (only for ``with_disaggregated=True``).
- ``n_days``: number of days in period (only for ``with_design_matrix=True``).
- ``n_days_dropped``: number of days dropped because of insufficient
data (only for ``with_design_matrix=True``).
- ``n_days_kept``: number of days kept because of sufficient data
(only for ``with_design_matrix=True``).
- ``temperature_mean``: mean temperature during given period.
(only for ``with_design_matrix=True``).
predict_warnings: :any: list of EEMeterWarning if any.
"""
if model_params is None:
raise MissingModelParameterError("model_params is None.")
predict_warnings = []
cooling_balance_points = []
heating_balance_points = []
if "cooling_balance_point" in model_params:
cooling_balance_points.append(model_params["cooling_balance_point"])
if "heating_balance_point" in model_params:
heating_balance_points.append(model_params["heating_balance_point"])
design_matrix = compute_temperature_features(
prediction_index,
temperature_data,
heating_balance_points=heating_balance_points,
cooling_balance_points=cooling_balance_points,
degree_day_method=degree_day_method,
use_mean_daily_values=False,
)
if design_matrix.dropna().empty:
if with_disaggregated:
empty_columns = {
"predicted_usage": [],
"base_load": [],
"heating_load": [],
"cooling_load": [],
}
else:
empty_columns = {"predicted_usage": []}
predict_warnings.append(
EEMeterWarning(
qualified_name=("eemeter.caltrack.compute_temperature_features"),
description=(
"Design matrix empty, compute_temperature_features failed"
),
data={"temperature_data": temperature_data},
)
)
return ModelPrediction(
pd.DataFrame(empty_columns),
design_matrix=pd.DataFrame(),
warnings=predict_warnings,
)
if degree_day_method == "daily":
design_matrix["n_days"] = (
design_matrix.n_days_kept + design_matrix.n_days_dropped
)
else:
design_matrix["n_days"] = (
design_matrix.n_hours_kept + design_matrix.n_hours_dropped
) / 24
results = _caltrack_predict_design_matrix(
model_type,
model_params,
design_matrix,
input_averages=False,
output_averages=False,
).to_frame("predicted_usage")
if with_disaggregated:
disaggregated = _caltrack_predict_design_matrix(
model_type,
model_params,
design_matrix,
disaggregated=True,
input_averages=False,
output_averages=False,
)
results = results.join(disaggregated)
if with_design_matrix:
results = results.join(design_matrix)
return ModelPrediction(
result=results, design_matrix=design_matrix, warnings=predict_warnings
) | python | def caltrack_usage_per_day_predict(
model_type,
model_params,
prediction_index,
temperature_data,
degree_day_method="daily",
with_disaggregated=False,
with_design_matrix=False,
):
""" CalTRACK predict method.
Given a model type, parameters, hourly temperatures, a
:any:`pandas.DatetimeIndex` index over which to predict meter usage,
return model predictions as totals for the period (so billing period totals,
daily totals, etc.). Optionally include the computed design matrix or
disaggregated usage in the output dataframe.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
temperature_data : :any:`pandas.DataFrame`
Hourly temperature data to use for prediction. Time period should match
the ``prediction_index`` argument.
prediction_index : :any:`pandas.DatetimeIndex`
Time period over which to predict.
with_disaggregated : :any:`bool`, optional
If True, return results as a :any:`pandas.DataFrame` with columns
``'base_load'``, ``'heating_load'``, and ``'cooling_load'``.
with_design_matrix : :any:`bool`, optional
If True, return results as a :any:`pandas.DataFrame` with columns
``'n_days'``, ``'n_days_dropped'``, ``n_days_kept``, and
``temperature_mean``.
Returns
-------
prediction : :any:`pandas.DataFrame`
Columns are as follows:
- ``predicted_usage``: Predicted usage values computed to match
``prediction_index``.
- ``base_load``: modeled base load (only for ``with_disaggregated=True``).
- ``cooling_load``: modeled cooling load (only for ``with_disaggregated=True``).
- ``heating_load``: modeled heating load (only for ``with_disaggregated=True``).
- ``n_days``: number of days in period (only for ``with_design_matrix=True``).
- ``n_days_dropped``: number of days dropped because of insufficient
data (only for ``with_design_matrix=True``).
- ``n_days_kept``: number of days kept because of sufficient data
(only for ``with_design_matrix=True``).
- ``temperature_mean``: mean temperature during given period.
(only for ``with_design_matrix=True``).
predict_warnings: :any: list of EEMeterWarning if any.
"""
if model_params is None:
raise MissingModelParameterError("model_params is None.")
predict_warnings = []
cooling_balance_points = []
heating_balance_points = []
if "cooling_balance_point" in model_params:
cooling_balance_points.append(model_params["cooling_balance_point"])
if "heating_balance_point" in model_params:
heating_balance_points.append(model_params["heating_balance_point"])
design_matrix = compute_temperature_features(
prediction_index,
temperature_data,
heating_balance_points=heating_balance_points,
cooling_balance_points=cooling_balance_points,
degree_day_method=degree_day_method,
use_mean_daily_values=False,
)
if design_matrix.dropna().empty:
if with_disaggregated:
empty_columns = {
"predicted_usage": [],
"base_load": [],
"heating_load": [],
"cooling_load": [],
}
else:
empty_columns = {"predicted_usage": []}
predict_warnings.append(
EEMeterWarning(
qualified_name=("eemeter.caltrack.compute_temperature_features"),
description=(
"Design matrix empty, compute_temperature_features failed"
),
data={"temperature_data": temperature_data},
)
)
return ModelPrediction(
pd.DataFrame(empty_columns),
design_matrix=pd.DataFrame(),
warnings=predict_warnings,
)
if degree_day_method == "daily":
design_matrix["n_days"] = (
design_matrix.n_days_kept + design_matrix.n_days_dropped
)
else:
design_matrix["n_days"] = (
design_matrix.n_hours_kept + design_matrix.n_hours_dropped
) / 24
results = _caltrack_predict_design_matrix(
model_type,
model_params,
design_matrix,
input_averages=False,
output_averages=False,
).to_frame("predicted_usage")
if with_disaggregated:
disaggregated = _caltrack_predict_design_matrix(
model_type,
model_params,
design_matrix,
disaggregated=True,
input_averages=False,
output_averages=False,
)
results = results.join(disaggregated)
if with_design_matrix:
results = results.join(design_matrix)
return ModelPrediction(
result=results, design_matrix=design_matrix, warnings=predict_warnings
) | [
"def",
"caltrack_usage_per_day_predict",
"(",
"model_type",
",",
"model_params",
",",
"prediction_index",
",",
"temperature_data",
",",
"degree_day_method",
"=",
"\"daily\"",
",",
"with_disaggregated",
"=",
"False",
",",
"with_design_matrix",
"=",
"False",
",",
")",
"... | CalTRACK predict method.
Given a model type, parameters, hourly temperatures, a
:any:`pandas.DatetimeIndex` index over which to predict meter usage,
return model predictions as totals for the period (so billing period totals,
daily totals, etc.). Optionally include the computed design matrix or
disaggregated usage in the output dataframe.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
temperature_data : :any:`pandas.DataFrame`
Hourly temperature data to use for prediction. Time period should match
the ``prediction_index`` argument.
prediction_index : :any:`pandas.DatetimeIndex`
Time period over which to predict.
with_disaggregated : :any:`bool`, optional
If True, return results as a :any:`pandas.DataFrame` with columns
``'base_load'``, ``'heating_load'``, and ``'cooling_load'``.
with_design_matrix : :any:`bool`, optional
If True, return results as a :any:`pandas.DataFrame` with columns
``'n_days'``, ``'n_days_dropped'``, ``n_days_kept``, and
``temperature_mean``.
Returns
-------
prediction : :any:`pandas.DataFrame`
Columns are as follows:
- ``predicted_usage``: Predicted usage values computed to match
``prediction_index``.
- ``base_load``: modeled base load (only for ``with_disaggregated=True``).
- ``cooling_load``: modeled cooling load (only for ``with_disaggregated=True``).
- ``heating_load``: modeled heating load (only for ``with_disaggregated=True``).
- ``n_days``: number of days in period (only for ``with_design_matrix=True``).
- ``n_days_dropped``: number of days dropped because of insufficient
data (only for ``with_design_matrix=True``).
- ``n_days_kept``: number of days kept because of sufficient data
(only for ``with_design_matrix=True``).
- ``temperature_mean``: mean temperature during given period.
(only for ``with_design_matrix=True``).
predict_warnings: :any: list of EEMeterWarning if any. | [
"CalTRACK",
"predict",
"method",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L580-L715 | train | 199,931 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | get_too_few_non_zero_degree_day_warning | def get_too_few_non_zero_degree_day_warning(
model_type, balance_point, degree_day_type, degree_days, minimum_non_zero
):
""" Return an empty list or a single warning wrapped in a list regarding
non-zero degree days for a set of degree days.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
degree_days : :any:`pandas.Series`
A series of degree day values.
minimum_non_zero : :any:`int`
Minimum allowable number of non-zero degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
n_non_zero = int((degree_days > 0).sum())
if n_non_zero < minimum_non_zero:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.too_few_non_zero_{degree_day_type}".format(
model_type=model_type, degree_day_type=degree_day_type
)
),
description=(
"Number of non-zero daily {degree_day_type} values below accepted minimum."
" Candidate fit not attempted.".format(
degree_day_type=degree_day_type.upper()
)
),
data={
"n_non_zero_{degree_day_type}".format(
degree_day_type=degree_day_type
): n_non_zero,
"minimum_non_zero_{degree_day_type}".format(
degree_day_type=degree_day_type
): minimum_non_zero,
"{degree_day_type}_balance_point".format(
degree_day_type=degree_day_type
): balance_point,
},
)
)
return warnings | python | def get_too_few_non_zero_degree_day_warning(
model_type, balance_point, degree_day_type, degree_days, minimum_non_zero
):
""" Return an empty list or a single warning wrapped in a list regarding
non-zero degree days for a set of degree days.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
degree_days : :any:`pandas.Series`
A series of degree day values.
minimum_non_zero : :any:`int`
Minimum allowable number of non-zero degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
n_non_zero = int((degree_days > 0).sum())
if n_non_zero < minimum_non_zero:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.too_few_non_zero_{degree_day_type}".format(
model_type=model_type, degree_day_type=degree_day_type
)
),
description=(
"Number of non-zero daily {degree_day_type} values below accepted minimum."
" Candidate fit not attempted.".format(
degree_day_type=degree_day_type.upper()
)
),
data={
"n_non_zero_{degree_day_type}".format(
degree_day_type=degree_day_type
): n_non_zero,
"minimum_non_zero_{degree_day_type}".format(
degree_day_type=degree_day_type
): minimum_non_zero,
"{degree_day_type}_balance_point".format(
degree_day_type=degree_day_type
): balance_point,
},
)
)
return warnings | [
"def",
"get_too_few_non_zero_degree_day_warning",
"(",
"model_type",
",",
"balance_point",
",",
"degree_day_type",
",",
"degree_days",
",",
"minimum_non_zero",
")",
":",
"warnings",
"=",
"[",
"]",
"n_non_zero",
"=",
"int",
"(",
"(",
"degree_days",
">",
"0",
")",
... | Return an empty list or a single warning wrapped in a list regarding
non-zero degree days for a set of degree days.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
degree_days : :any:`pandas.Series`
A series of degree day values.
minimum_non_zero : :any:`int`
Minimum allowable number of non-zero degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning. | [
"Return",
"an",
"empty",
"list",
"or",
"a",
"single",
"warning",
"wrapped",
"in",
"a",
"list",
"regarding",
"non",
"-",
"zero",
"degree",
"days",
"for",
"a",
"set",
"of",
"degree",
"days",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L718-L771 | train | 199,932 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | get_total_degree_day_too_low_warning | def get_total_degree_day_too_low_warning(
model_type,
balance_point,
degree_day_type,
avg_degree_days,
period_days,
minimum_total,
):
""" Return an empty list or a single warning wrapped in a list regarding
the total summed degree day values.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
avg_degree_days : :any:`pandas.Series`
A series of degree day values.
period_days : :any:`pandas.Series`
A series of containing day counts.
minimum_total : :any:`float`
Minimum allowable total sum of degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
total_degree_days = (avg_degree_days * period_days).sum()
if total_degree_days < minimum_total:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.total_{degree_day_type}_too_low".format(
model_type=model_type, degree_day_type=degree_day_type
)
),
description=(
"Total {degree_day_type} below accepted minimum."
" Candidate fit not attempted.".format(
degree_day_type=degree_day_type.upper()
)
),
data={
"total_{degree_day_type}".format(
degree_day_type=degree_day_type
): total_degree_days,
"total_{degree_day_type}_minimum".format(
degree_day_type=degree_day_type
): minimum_total,
"{degree_day_type}_balance_point".format(
degree_day_type=degree_day_type
): balance_point,
},
)
)
return warnings | python | def get_total_degree_day_too_low_warning(
model_type,
balance_point,
degree_day_type,
avg_degree_days,
period_days,
minimum_total,
):
""" Return an empty list or a single warning wrapped in a list regarding
the total summed degree day values.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
avg_degree_days : :any:`pandas.Series`
A series of degree day values.
period_days : :any:`pandas.Series`
A series of containing day counts.
minimum_total : :any:`float`
Minimum allowable total sum of degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
total_degree_days = (avg_degree_days * period_days).sum()
if total_degree_days < minimum_total:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.total_{degree_day_type}_too_low".format(
model_type=model_type, degree_day_type=degree_day_type
)
),
description=(
"Total {degree_day_type} below accepted minimum."
" Candidate fit not attempted.".format(
degree_day_type=degree_day_type.upper()
)
),
data={
"total_{degree_day_type}".format(
degree_day_type=degree_day_type
): total_degree_days,
"total_{degree_day_type}_minimum".format(
degree_day_type=degree_day_type
): minimum_total,
"{degree_day_type}_balance_point".format(
degree_day_type=degree_day_type
): balance_point,
},
)
)
return warnings | [
"def",
"get_total_degree_day_too_low_warning",
"(",
"model_type",
",",
"balance_point",
",",
"degree_day_type",
",",
"avg_degree_days",
",",
"period_days",
",",
"minimum_total",
",",
")",
":",
"warnings",
"=",
"[",
"]",
"total_degree_days",
"=",
"(",
"avg_degree_days"... | Return an empty list or a single warning wrapped in a list regarding
the total summed degree day values.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
avg_degree_days : :any:`pandas.Series`
A series of degree day values.
period_days : :any:`pandas.Series`
A series of containing day counts.
minimum_total : :any:`float`
Minimum allowable total sum of degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning. | [
"Return",
"an",
"empty",
"list",
"or",
"a",
"single",
"warning",
"wrapped",
"in",
"a",
"list",
"regarding",
"the",
"total",
"summed",
"degree",
"day",
"values",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L774-L835 | train | 199,933 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | get_parameter_negative_warning | def get_parameter_negative_warning(model_type, model_params, parameter):
""" Return an empty list or a single warning wrapped in a list indicating
whether model parameter is negative.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
if model_params.get(parameter, 0) < 0:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.{parameter}_negative".format(
model_type=model_type, parameter=parameter
)
),
description=(
"Model fit {parameter} parameter is negative. Candidate model rejected.".format(
parameter=parameter
)
),
data=model_params,
)
)
return warnings | python | def get_parameter_negative_warning(model_type, model_params, parameter):
""" Return an empty list or a single warning wrapped in a list indicating
whether model parameter is negative.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
if model_params.get(parameter, 0) < 0:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.{parameter}_negative".format(
model_type=model_type, parameter=parameter
)
),
description=(
"Model fit {parameter} parameter is negative. Candidate model rejected.".format(
parameter=parameter
)
),
data=model_params,
)
)
return warnings | [
"def",
"get_parameter_negative_warning",
"(",
"model_type",
",",
"model_params",
",",
"parameter",
")",
":",
"warnings",
"=",
"[",
"]",
"if",
"model_params",
".",
"get",
"(",
"parameter",
",",
"0",
")",
"<",
"0",
":",
"warnings",
".",
"append",
"(",
"EEMet... | Return an empty list or a single warning wrapped in a list indicating
whether model parameter is negative.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning. | [
"Return",
"an",
"empty",
"list",
"or",
"a",
"single",
"warning",
"wrapped",
"in",
"a",
"list",
"indicating",
"whether",
"model",
"parameter",
"is",
"negative",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L838-L873 | train | 199,934 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | get_parameter_p_value_too_high_warning | def get_parameter_p_value_too_high_warning(
model_type, model_params, parameter, p_value, maximum_p_value
):
""" Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
if p_value > maximum_p_value:
data = {
"{}_p_value".format(parameter): p_value,
"{}_maximum_p_value".format(parameter): maximum_p_value,
}
data.update(model_params)
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high".format(
model_type=model_type, parameter=parameter
)
),
description=(
"Model fit {parameter} p-value is too high. Candidate model rejected.".format(
parameter=parameter
)
),
data=data,
)
)
return warnings | python | def get_parameter_p_value_too_high_warning(
model_type, model_params, parameter, p_value, maximum_p_value
):
""" Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
if p_value > maximum_p_value:
data = {
"{}_p_value".format(parameter): p_value,
"{}_maximum_p_value".format(parameter): maximum_p_value,
}
data.update(model_params)
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high".format(
model_type=model_type, parameter=parameter
)
),
description=(
"Model fit {parameter} p-value is too high. Candidate model rejected.".format(
parameter=parameter
)
),
data=data,
)
)
return warnings | [
"def",
"get_parameter_p_value_too_high_warning",
"(",
"model_type",
",",
"model_params",
",",
"parameter",
",",
"p_value",
",",
"maximum_p_value",
")",
":",
"warnings",
"=",
"[",
"]",
"if",
"p_value",
">",
"maximum_p_value",
":",
"data",
"=",
"{",
"\"{}_p_value\""... | Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning. | [
"Return",
"an",
"empty",
"list",
"or",
"a",
"single",
"warning",
"wrapped",
"in",
"a",
"list",
"indicating",
"whether",
"model",
"parameter",
"p",
"-",
"value",
"is",
"too",
"high",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L876-L922 | train | 199,935 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | get_fit_failed_candidate_model | def get_fit_failed_candidate_model(model_type, formula):
""" Return a Candidate model that indicates the fitting routine failed.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
formula : :any:`float`
The candidate model formula.
Returns
-------
candidate_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Candidate model instance with status ``'ERROR'``, and warning with
traceback.
"""
warnings = [
EEMeterWarning(
qualified_name="eemeter.caltrack_daily.{}.model_results".format(model_type),
description=(
"Error encountered in statsmodels.formula.api.ols method. (Empty data?)"
),
data={"traceback": traceback.format_exc()},
)
]
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type, formula=formula, status="ERROR", warnings=warnings
) | python | def get_fit_failed_candidate_model(model_type, formula):
""" Return a Candidate model that indicates the fitting routine failed.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
formula : :any:`float`
The candidate model formula.
Returns
-------
candidate_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Candidate model instance with status ``'ERROR'``, and warning with
traceback.
"""
warnings = [
EEMeterWarning(
qualified_name="eemeter.caltrack_daily.{}.model_results".format(model_type),
description=(
"Error encountered in statsmodels.formula.api.ols method. (Empty data?)"
),
data={"traceback": traceback.format_exc()},
)
]
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type, formula=formula, status="ERROR", warnings=warnings
) | [
"def",
"get_fit_failed_candidate_model",
"(",
"model_type",
",",
"formula",
")",
":",
"warnings",
"=",
"[",
"EEMeterWarning",
"(",
"qualified_name",
"=",
"\"eemeter.caltrack_daily.{}.model_results\"",
".",
"format",
"(",
"model_type",
")",
",",
"description",
"=",
"("... | Return a Candidate model that indicates the fitting routine failed.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
formula : :any:`float`
The candidate model formula.
Returns
-------
candidate_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Candidate model instance with status ``'ERROR'``, and warning with
traceback. | [
"Return",
"a",
"Candidate",
"model",
"that",
"indicates",
"the",
"fitting",
"routine",
"failed",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L925-L952 | train | 199,936 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | get_intercept_only_candidate_models | def get_intercept_only_candidate_models(data, weights_col):
""" Return a list of a single candidate intercept-only model.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value``.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
List containing a single intercept-only candidate model.
"""
model_type = "intercept_only"
formula = "meter_value ~ 1"
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return [get_fit_failed_candidate_model(model_type, formula)]
result = model.fit()
# CalTrack 3.3.1.3
model_params = {"intercept": result.params["Intercept"]}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return [
CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=0,
)
] | python | def get_intercept_only_candidate_models(data, weights_col):
""" Return a list of a single candidate intercept-only model.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value``.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
List containing a single intercept-only candidate model.
"""
model_type = "intercept_only"
formula = "meter_value ~ 1"
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return [get_fit_failed_candidate_model(model_type, formula)]
result = model.fit()
# CalTrack 3.3.1.3
model_params = {"intercept": result.params["Intercept"]}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return [
CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=0,
)
] | [
"def",
"get_intercept_only_candidate_models",
"(",
"data",
",",
"weights_col",
")",
":",
"model_type",
"=",
"\"intercept_only\"",
"formula",
"=",
"\"meter_value ~ 1\"",
"if",
"weights_col",
"is",
"None",
":",
"weights",
"=",
"1",
"else",
":",
"weights",
"=",
"data... | Return a list of a single candidate intercept-only model.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value``.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
List containing a single intercept-only candidate model. | [
"Return",
"a",
"list",
"of",
"a",
"single",
"candidate",
"intercept",
"-",
"only",
"model",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L955-L1015 | train | 199,937 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | get_single_cdd_only_candidate_model | def get_single_cdd_only_candidate_model(
data,
minimum_non_zero_cdd,
minimum_total_cdd,
beta_cdd_maximum_p_value,
weights_col,
balance_point,
):
""" Return a single candidate cdd-only model for a particular balance
point.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and
``cdd_<balance_point>``
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
balance_point : :any:`float`
The cooling balance point for this model.
Returns
-------
candidate_model : :any:`CalTRACKUsagePerDayCandidateModel`
A single cdd-only candidate model, with any associated warnings.
"""
model_type = "cdd_only"
cdd_column = "cdd_%s" % balance_point
formula = "meter_value ~ %s" % cdd_column
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
period_days = weights
degree_day_warnings = []
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
balance_point,
"cdd",
data[cdd_column],
period_days,
minimum_total_cdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type, balance_point, "cdd", data[cdd_column], minimum_non_zero_cdd
)
)
if len(degree_day_warnings) > 0:
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status="NOT ATTEMPTED",
warnings=degree_day_warnings,
)
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return get_fit_failed_candidate_model(model_type, formula)
result = model.fit()
r_squared_adj = result.rsquared_adj
beta_cdd_p_value = result.pvalues[cdd_column]
# CalTrack 3.3.1.3
model_params = {
"intercept": result.params["Intercept"],
"beta_cdd": result.params[cdd_column],
"cooling_balance_point": balance_point,
}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept", "beta_cdd"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_cdd_p_value,
beta_cdd_maximum_p_value,
)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=r_squared_adj,
) | python | def get_single_cdd_only_candidate_model(
data,
minimum_non_zero_cdd,
minimum_total_cdd,
beta_cdd_maximum_p_value,
weights_col,
balance_point,
):
""" Return a single candidate cdd-only model for a particular balance
point.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and
``cdd_<balance_point>``
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
balance_point : :any:`float`
The cooling balance point for this model.
Returns
-------
candidate_model : :any:`CalTRACKUsagePerDayCandidateModel`
A single cdd-only candidate model, with any associated warnings.
"""
model_type = "cdd_only"
cdd_column = "cdd_%s" % balance_point
formula = "meter_value ~ %s" % cdd_column
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
period_days = weights
degree_day_warnings = []
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
balance_point,
"cdd",
data[cdd_column],
period_days,
minimum_total_cdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type, balance_point, "cdd", data[cdd_column], minimum_non_zero_cdd
)
)
if len(degree_day_warnings) > 0:
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status="NOT ATTEMPTED",
warnings=degree_day_warnings,
)
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return get_fit_failed_candidate_model(model_type, formula)
result = model.fit()
r_squared_adj = result.rsquared_adj
beta_cdd_p_value = result.pvalues[cdd_column]
# CalTrack 3.3.1.3
model_params = {
"intercept": result.params["Intercept"],
"beta_cdd": result.params[cdd_column],
"cooling_balance_point": balance_point,
}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept", "beta_cdd"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_cdd_p_value,
beta_cdd_maximum_p_value,
)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=r_squared_adj,
) | [
"def",
"get_single_cdd_only_candidate_model",
"(",
"data",
",",
"minimum_non_zero_cdd",
",",
"minimum_total_cdd",
",",
"beta_cdd_maximum_p_value",
",",
"weights_col",
",",
"balance_point",
",",
")",
":",
"model_type",
"=",
"\"cdd_only\"",
"cdd_column",
"=",
"\"cdd_%s\"",
... | Return a single candidate cdd-only model for a particular balance
point.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and
``cdd_<balance_point>``
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
balance_point : :any:`float`
The cooling balance point for this model.
Returns
-------
candidate_model : :any:`CalTRACKUsagePerDayCandidateModel`
A single cdd-only candidate model, with any associated warnings. | [
"Return",
"a",
"single",
"candidate",
"cdd",
"-",
"only",
"model",
"for",
"a",
"particular",
"balance",
"point",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L1018-L1136 | train | 199,938 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | get_cdd_only_candidate_models | def get_cdd_only_candidate_models(
data, minimum_non_zero_cdd, minimum_total_cdd, beta_cdd_maximum_p_value, weights_col
):
""" Return a list of all possible candidate cdd-only models.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns with names of the form ``cdd_<balance_point>``. All columns
with names of this form will be used to fit a candidate model.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of cdd-only candidate models, with any associated warnings.
"""
balance_points = [int(col[4:]) for col in data.columns if col.startswith("cdd")]
candidate_models = [
get_single_cdd_only_candidate_model(
data,
minimum_non_zero_cdd,
minimum_total_cdd,
beta_cdd_maximum_p_value,
weights_col,
balance_point,
)
for balance_point in balance_points
]
return candidate_models | python | def get_cdd_only_candidate_models(
data, minimum_non_zero_cdd, minimum_total_cdd, beta_cdd_maximum_p_value, weights_col
):
""" Return a list of all possible candidate cdd-only models.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns with names of the form ``cdd_<balance_point>``. All columns
with names of this form will be used to fit a candidate model.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of cdd-only candidate models, with any associated warnings.
"""
balance_points = [int(col[4:]) for col in data.columns if col.startswith("cdd")]
candidate_models = [
get_single_cdd_only_candidate_model(
data,
minimum_non_zero_cdd,
minimum_total_cdd,
beta_cdd_maximum_p_value,
weights_col,
balance_point,
)
for balance_point in balance_points
]
return candidate_models | [
"def",
"get_cdd_only_candidate_models",
"(",
"data",
",",
"minimum_non_zero_cdd",
",",
"minimum_total_cdd",
",",
"beta_cdd_maximum_p_value",
",",
"weights_col",
")",
":",
"balance_points",
"=",
"[",
"int",
"(",
"col",
"[",
"4",
":",
"]",
")",
"for",
"col",
"in",... | Return a list of all possible candidate cdd-only models.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns with names of the form ``cdd_<balance_point>``. All columns
with names of this form will be used to fit a candidate model.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of cdd-only candidate models, with any associated warnings. | [
"Return",
"a",
"list",
"of",
"all",
"possible",
"candidate",
"cdd",
"-",
"only",
"models",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L1139-L1179 | train | 199,939 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | get_single_hdd_only_candidate_model | def get_single_hdd_only_candidate_model(
data,
minimum_non_zero_hdd,
minimum_total_hdd,
beta_hdd_maximum_p_value,
weights_col,
balance_point,
):
""" Return a single candidate hdd-only model for a particular balance
point.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and
``hdd_<balance_point>``
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
balance_point : :any:`float`
The heating balance point for this model.
Returns
-------
candidate_model : :any:`CalTRACKUsagePerDayCandidateModel`
A single hdd-only candidate model, with any associated warnings.
"""
model_type = "hdd_only"
hdd_column = "hdd_%s" % balance_point
formula = "meter_value ~ %s" % hdd_column
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
period_days = weights
degree_day_warnings = []
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
balance_point,
"hdd",
data[hdd_column],
period_days,
minimum_total_hdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type, balance_point, "hdd", data[hdd_column], minimum_non_zero_hdd
)
)
if len(degree_day_warnings) > 0:
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status="NOT ATTEMPTED",
warnings=degree_day_warnings,
)
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return get_fit_failed_candidate_model(model_type, formula)
result = model.fit()
r_squared_adj = result.rsquared_adj
beta_hdd_p_value = result.pvalues[hdd_column]
# CalTrack 3.3.1.3
model_params = {
"intercept": result.params["Intercept"],
"beta_hdd": result.params[hdd_column],
"heating_balance_point": balance_point,
}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept", "beta_hdd"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_hdd_p_value,
beta_hdd_maximum_p_value,
)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=r_squared_adj,
) | python | def get_single_hdd_only_candidate_model(
data,
minimum_non_zero_hdd,
minimum_total_hdd,
beta_hdd_maximum_p_value,
weights_col,
balance_point,
):
""" Return a single candidate hdd-only model for a particular balance
point.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and
``hdd_<balance_point>``
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
balance_point : :any:`float`
The heating balance point for this model.
Returns
-------
candidate_model : :any:`CalTRACKUsagePerDayCandidateModel`
A single hdd-only candidate model, with any associated warnings.
"""
model_type = "hdd_only"
hdd_column = "hdd_%s" % balance_point
formula = "meter_value ~ %s" % hdd_column
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
period_days = weights
degree_day_warnings = []
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
balance_point,
"hdd",
data[hdd_column],
period_days,
minimum_total_hdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type, balance_point, "hdd", data[hdd_column], minimum_non_zero_hdd
)
)
if len(degree_day_warnings) > 0:
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status="NOT ATTEMPTED",
warnings=degree_day_warnings,
)
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return get_fit_failed_candidate_model(model_type, formula)
result = model.fit()
r_squared_adj = result.rsquared_adj
beta_hdd_p_value = result.pvalues[hdd_column]
# CalTrack 3.3.1.3
model_params = {
"intercept": result.params["Intercept"],
"beta_hdd": result.params[hdd_column],
"heating_balance_point": balance_point,
}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept", "beta_hdd"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_hdd_p_value,
beta_hdd_maximum_p_value,
)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=r_squared_adj,
) | [
"def",
"get_single_hdd_only_candidate_model",
"(",
"data",
",",
"minimum_non_zero_hdd",
",",
"minimum_total_hdd",
",",
"beta_hdd_maximum_p_value",
",",
"weights_col",
",",
"balance_point",
",",
")",
":",
"model_type",
"=",
"\"hdd_only\"",
"hdd_column",
"=",
"\"hdd_%s\"",
... | Return a single candidate hdd-only model for a particular balance
point.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and
``hdd_<balance_point>``
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
balance_point : :any:`float`
The heating balance point for this model.
Returns
-------
candidate_model : :any:`CalTRACKUsagePerDayCandidateModel`
A single hdd-only candidate model, with any associated warnings. | [
"Return",
"a",
"single",
"candidate",
"hdd",
"-",
"only",
"model",
"for",
"a",
"particular",
"balance",
"point",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L1182-L1300 | train | 199,940 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | get_single_cdd_hdd_candidate_model | def get_single_cdd_hdd_candidate_model(
data,
minimum_non_zero_cdd,
minimum_non_zero_hdd,
minimum_total_cdd,
minimum_total_hdd,
beta_cdd_maximum_p_value,
beta_hdd_maximum_p_value,
weights_col,
cooling_balance_point,
heating_balance_point,
):
""" Return and fit a single candidate cdd_hdd model for a particular selection
of cooling balance point and heating balance point
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and
``hdd_<heating_balance_point>`` and ``cdd_<cooling_balance_point>``
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
cooling_balance_point : :any:`float`
The cooling balance point for this model.
heating_balance_point : :any:`float`
The heating balance point for this model.
Returns
-------
candidate_model : :any:`CalTRACKUsagePerDayCandidateModel`
A single cdd-hdd candidate model, with any associated warnings.
"""
model_type = "cdd_hdd"
cdd_column = "cdd_%s" % cooling_balance_point
hdd_column = "hdd_%s" % heating_balance_point
formula = "meter_value ~ %s + %s" % (cdd_column, hdd_column)
n_days_column = None
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
period_days = weights
degree_day_warnings = []
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
cooling_balance_point,
"cdd",
data[cdd_column],
period_days,
minimum_total_cdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type,
cooling_balance_point,
"cdd",
data[cdd_column],
minimum_non_zero_cdd,
)
)
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
heating_balance_point,
"hdd",
data[hdd_column],
period_days,
minimum_total_hdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type,
heating_balance_point,
"hdd",
data[hdd_column],
minimum_non_zero_hdd,
)
)
if len(degree_day_warnings) > 0:
return CalTRACKUsagePerDayCandidateModel(
model_type, formula, "NOT ATTEMPTED", warnings=degree_day_warnings
)
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return get_fit_failed_candidate_model(model_type, formula)
result = model.fit()
r_squared_adj = result.rsquared_adj
beta_cdd_p_value = result.pvalues[cdd_column]
beta_hdd_p_value = result.pvalues[hdd_column]
# CalTrack 3.3.1.3
model_params = {
"intercept": result.params["Intercept"],
"beta_cdd": result.params[cdd_column],
"beta_hdd": result.params[hdd_column],
"cooling_balance_point": cooling_balance_point,
"heating_balance_point": heating_balance_point,
}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept", "beta_cdd", "beta_hdd"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_cdd_p_value,
beta_cdd_maximum_p_value,
)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_hdd_p_value,
beta_hdd_maximum_p_value,
)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=r_squared_adj,
) | python | def get_single_cdd_hdd_candidate_model(
data,
minimum_non_zero_cdd,
minimum_non_zero_hdd,
minimum_total_cdd,
minimum_total_hdd,
beta_cdd_maximum_p_value,
beta_hdd_maximum_p_value,
weights_col,
cooling_balance_point,
heating_balance_point,
):
""" Return and fit a single candidate cdd_hdd model for a particular selection
of cooling balance point and heating balance point
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and
``hdd_<heating_balance_point>`` and ``cdd_<cooling_balance_point>``
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
cooling_balance_point : :any:`float`
The cooling balance point for this model.
heating_balance_point : :any:`float`
The heating balance point for this model.
Returns
-------
candidate_model : :any:`CalTRACKUsagePerDayCandidateModel`
A single cdd-hdd candidate model, with any associated warnings.
"""
model_type = "cdd_hdd"
cdd_column = "cdd_%s" % cooling_balance_point
hdd_column = "hdd_%s" % heating_balance_point
formula = "meter_value ~ %s + %s" % (cdd_column, hdd_column)
n_days_column = None
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
period_days = weights
degree_day_warnings = []
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
cooling_balance_point,
"cdd",
data[cdd_column],
period_days,
minimum_total_cdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type,
cooling_balance_point,
"cdd",
data[cdd_column],
minimum_non_zero_cdd,
)
)
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
heating_balance_point,
"hdd",
data[hdd_column],
period_days,
minimum_total_hdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type,
heating_balance_point,
"hdd",
data[hdd_column],
minimum_non_zero_hdd,
)
)
if len(degree_day_warnings) > 0:
return CalTRACKUsagePerDayCandidateModel(
model_type, formula, "NOT ATTEMPTED", warnings=degree_day_warnings
)
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return get_fit_failed_candidate_model(model_type, formula)
result = model.fit()
r_squared_adj = result.rsquared_adj
beta_cdd_p_value = result.pvalues[cdd_column]
beta_hdd_p_value = result.pvalues[hdd_column]
# CalTrack 3.3.1.3
model_params = {
"intercept": result.params["Intercept"],
"beta_cdd": result.params[cdd_column],
"beta_hdd": result.params[hdd_column],
"cooling_balance_point": cooling_balance_point,
"heating_balance_point": heating_balance_point,
}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept", "beta_cdd", "beta_hdd"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_cdd_p_value,
beta_cdd_maximum_p_value,
)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_hdd_p_value,
beta_hdd_maximum_p_value,
)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=r_squared_adj,
) | [
"def",
"get_single_cdd_hdd_candidate_model",
"(",
"data",
",",
"minimum_non_zero_cdd",
",",
"minimum_non_zero_hdd",
",",
"minimum_total_cdd",
",",
"minimum_total_hdd",
",",
"beta_cdd_maximum_p_value",
",",
"beta_hdd_maximum_p_value",
",",
"weights_col",
",",
"cooling_balance_po... | Return and fit a single candidate cdd_hdd model for a particular selection
of cooling balance point and heating balance point
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and
``hdd_<heating_balance_point>`` and ``cdd_<cooling_balance_point>``
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
cooling_balance_point : :any:`float`
The cooling balance point for this model.
heating_balance_point : :any:`float`
The heating balance point for this model.
Returns
-------
candidate_model : :any:`CalTRACKUsagePerDayCandidateModel`
A single cdd-hdd candidate model, with any associated warnings. | [
"Return",
"and",
"fit",
"a",
"single",
"candidate",
"cdd_hdd",
"model",
"for",
"a",
"particular",
"selection",
"of",
"cooling",
"balance",
"point",
"and",
"heating",
"balance",
"point"
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L1347-L1511 | train | 199,941 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | get_cdd_hdd_candidate_models | def get_cdd_hdd_candidate_models(
data,
minimum_non_zero_cdd,
minimum_non_zero_hdd,
minimum_total_cdd,
minimum_total_hdd,
beta_cdd_maximum_p_value,
beta_hdd_maximum_p_value,
weights_col,
):
""" Return a list of candidate cdd_hdd models for a particular selection
of cooling balance point and heating balance point
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns each of the form ``hdd_<heating_balance_point>``
and ``cdd_<cooling_balance_point>``. DataFrames of this form can be
made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of cdd_hdd candidate models, with any associated warnings.
"""
cooling_balance_points = [
int(col[4:]) for col in data.columns if col.startswith("cdd")
]
heating_balance_points = [
int(col[4:]) for col in data.columns if col.startswith("hdd")
]
# CalTrack 3.2.2.1
candidate_models = [
get_single_cdd_hdd_candidate_model(
data,
minimum_non_zero_cdd,
minimum_non_zero_hdd,
minimum_total_cdd,
minimum_total_hdd,
beta_cdd_maximum_p_value,
beta_hdd_maximum_p_value,
weights_col,
cooling_balance_point,
heating_balance_point,
)
for cooling_balance_point in cooling_balance_points
for heating_balance_point in heating_balance_points
if heating_balance_point <= cooling_balance_point
]
return candidate_models | python | def get_cdd_hdd_candidate_models(
data,
minimum_non_zero_cdd,
minimum_non_zero_hdd,
minimum_total_cdd,
minimum_total_hdd,
beta_cdd_maximum_p_value,
beta_hdd_maximum_p_value,
weights_col,
):
""" Return a list of candidate cdd_hdd models for a particular selection
of cooling balance point and heating balance point
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns each of the form ``hdd_<heating_balance_point>``
and ``cdd_<cooling_balance_point>``. DataFrames of this form can be
made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of cdd_hdd candidate models, with any associated warnings.
"""
cooling_balance_points = [
int(col[4:]) for col in data.columns if col.startswith("cdd")
]
heating_balance_points = [
int(col[4:]) for col in data.columns if col.startswith("hdd")
]
# CalTrack 3.2.2.1
candidate_models = [
get_single_cdd_hdd_candidate_model(
data,
minimum_non_zero_cdd,
minimum_non_zero_hdd,
minimum_total_cdd,
minimum_total_hdd,
beta_cdd_maximum_p_value,
beta_hdd_maximum_p_value,
weights_col,
cooling_balance_point,
heating_balance_point,
)
for cooling_balance_point in cooling_balance_points
for heating_balance_point in heating_balance_points
if heating_balance_point <= cooling_balance_point
]
return candidate_models | [
"def",
"get_cdd_hdd_candidate_models",
"(",
"data",
",",
"minimum_non_zero_cdd",
",",
"minimum_non_zero_hdd",
",",
"minimum_total_cdd",
",",
"minimum_total_hdd",
",",
"beta_cdd_maximum_p_value",
",",
"beta_hdd_maximum_p_value",
",",
"weights_col",
",",
")",
":",
"cooling_ba... | Return a list of candidate cdd_hdd models for a particular selection
of cooling balance point and heating balance point
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns each of the form ``hdd_<heating_balance_point>``
and ``cdd_<cooling_balance_point>``. DataFrames of this form can be
made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_cdd : :any:`int`
Minimum allowable number of non-zero cooling degree day values.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_cdd : :any:`float`
Minimum allowable total sum of cooling degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_cdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta cdd parameter.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of cdd_hdd candidate models, with any associated warnings. | [
"Return",
"a",
"list",
"of",
"candidate",
"cdd_hdd",
"models",
"for",
"a",
"particular",
"selection",
"of",
"cooling",
"balance",
"point",
"and",
"heating",
"balance",
"point"
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L1514-L1582 | train | 199,942 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | select_best_candidate | def select_best_candidate(candidate_models):
""" Select and return the best candidate model based on r-squared and
qualification.
Parameters
----------
candidate_models : :any:`list` of :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Candidate models to select from.
Returns
-------
(best_candidate, warnings) : :any:`tuple` of :any:`eemeter.CalTRACKUsagePerDayCandidateModel` or :any:`None` and :any:`list` of `eemeter.EEMeterWarning`
Return the candidate model with highest r-squared or None if none meet
the requirements, and a list of warnings about this selection (or lack
of selection).
"""
best_r_squared_adj = -np.inf
best_candidate = None
# CalTrack 3.4.3.3
for candidate in candidate_models:
if (
candidate.status == "QUALIFIED"
and candidate.r_squared_adj > best_r_squared_adj
):
best_candidate = candidate
best_r_squared_adj = candidate.r_squared_adj
if best_candidate is None:
warnings = [
EEMeterWarning(
qualified_name="eemeter.caltrack_daily.select_best_candidate.no_candidates",
description="No qualified model candidates available.",
data={
"status_count:{}".format(status): count
for status, count in Counter(
[c.status for c in candidate_models]
).items()
},
)
]
return None, warnings
return best_candidate, [] | python | def select_best_candidate(candidate_models):
""" Select and return the best candidate model based on r-squared and
qualification.
Parameters
----------
candidate_models : :any:`list` of :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Candidate models to select from.
Returns
-------
(best_candidate, warnings) : :any:`tuple` of :any:`eemeter.CalTRACKUsagePerDayCandidateModel` or :any:`None` and :any:`list` of `eemeter.EEMeterWarning`
Return the candidate model with highest r-squared or None if none meet
the requirements, and a list of warnings about this selection (or lack
of selection).
"""
best_r_squared_adj = -np.inf
best_candidate = None
# CalTrack 3.4.3.3
for candidate in candidate_models:
if (
candidate.status == "QUALIFIED"
and candidate.r_squared_adj > best_r_squared_adj
):
best_candidate = candidate
best_r_squared_adj = candidate.r_squared_adj
if best_candidate is None:
warnings = [
EEMeterWarning(
qualified_name="eemeter.caltrack_daily.select_best_candidate.no_candidates",
description="No qualified model candidates available.",
data={
"status_count:{}".format(status): count
for status, count in Counter(
[c.status for c in candidate_models]
).items()
},
)
]
return None, warnings
return best_candidate, [] | [
"def",
"select_best_candidate",
"(",
"candidate_models",
")",
":",
"best_r_squared_adj",
"=",
"-",
"np",
".",
"inf",
"best_candidate",
"=",
"None",
"# CalTrack 3.4.3.3",
"for",
"candidate",
"in",
"candidate_models",
":",
"if",
"(",
"candidate",
".",
"status",
"=="... | Select and return the best candidate model based on r-squared and
qualification.
Parameters
----------
candidate_models : :any:`list` of :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
Candidate models to select from.
Returns
-------
(best_candidate, warnings) : :any:`tuple` of :any:`eemeter.CalTRACKUsagePerDayCandidateModel` or :any:`None` and :any:`list` of `eemeter.EEMeterWarning`
Return the candidate model with highest r-squared or None if none meet
the requirements, and a list of warnings about this selection (or lack
of selection). | [
"Select",
"and",
"return",
"the",
"best",
"candidate",
"model",
"based",
"on",
"r",
"-",
"squared",
"and",
"qualification",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L1585-L1628 | train | 199,943 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | fit_caltrack_usage_per_day_model | def fit_caltrack_usage_per_day_model(
data,
fit_cdd=True,
use_billing_presets=False,
minimum_non_zero_cdd=10,
minimum_non_zero_hdd=10,
minimum_total_cdd=20,
minimum_total_hdd=20,
beta_cdd_maximum_p_value=1,
beta_hdd_maximum_p_value=1,
weights_col=None,
fit_intercept_only=True,
fit_cdd_only=True,
fit_hdd_only=True,
fit_cdd_hdd=True,
):
""" CalTRACK daily and billing methods using a usage-per-day modeling
strategy.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns each of the form ``hdd_<heating_balance_point>``
and ``cdd_<cooling_balance_point>``. DataFrames of this form can be
made using the :any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
Should have a :any:`pandas.DatetimeIndex`.
fit_cdd : :any:`bool`, optional
If True, fit CDD models unless overridden by ``fit_cdd_only`` or
``fit_cdd_hdd`` flags. Should be set to ``False`` for gas meter data.
use_billing_presets : :any:`bool`, optional
Use presets appropriate for billing models. Otherwise defaults are
appropriate for daily models.
minimum_non_zero_cdd : :any:`int`, optional
Minimum allowable number of non-zero cooling degree day values.
minimum_non_zero_hdd : :any:`int`, optional
Minimum allowable number of non-zero heating degree day values.
minimum_total_cdd : :any:`float`, optional
Minimum allowable total sum of cooling degree day values.
minimum_total_hdd : :any:`float`, optional
Minimum allowable total sum of heating degree day values.
beta_cdd_maximum_p_value : :any:`float`, optional
The maximum allowable p-value of the beta cdd parameter. The default
value is the most permissive possible (i.e., 1). This is here
for backwards compatibility with CalTRACK 1.0 methods.
beta_hdd_maximum_p_value : :any:`float`, optional
The maximum allowable p-value of the beta hdd parameter. The default
value is the most permissive possible (i.e., 1). This is here
for backwards compatibility with CalTRACK 1.0 methods.
weights_col : :any:`str` or None, optional
The name of the column (if any) in ``data`` to use as weights. Weight
must be the number of days of data in the period.
fit_intercept_only : :any:`bool`, optional
If True, fit and consider intercept_only model candidates.
fit_cdd_only : :any:`bool`, optional
If True, fit and consider cdd_only model candidates. Ignored if
``fit_cdd=False``.
fit_hdd_only : :any:`bool`, optional
If True, fit and consider hdd_only model candidates.
fit_cdd_hdd : :any:`bool`, optional
If True, fit and consider cdd_hdd model candidates. Ignored if
``fit_cdd=False``.
Returns
-------
model_results : :any:`eemeter.CalTRACKUsagePerDayModelResults`
Results of running CalTRACK daily method. See :any:`eemeter.CalTRACKUsagePerDayModelResults`
for more details.
"""
if use_billing_presets:
# CalTrack 3.2.2.2.1
minimum_non_zero_cdd = 0
minimum_non_zero_hdd = 0
# CalTrack 3.2.2.2.2
minimum_total_cdd = 20
minimum_total_hdd = 20
# CalTrack 3.4.2
if weights_col is None:
raise ValueError(
"If using billing presets, the weights_col argument must be specified."
)
interval = "billing"
else:
interval = "daily"
# cleans data to fully NaN rows that have missing temp or meter data
data = overwrite_partial_rows_with_nan(data)
if data.dropna().empty:
return CalTRACKUsagePerDayModelResults(
status="NO DATA",
method_name="caltrack_usage_per_day",
warnings=[
EEMeterWarning(
qualified_name="eemeter.caltrack_usage_per_day.no_data",
description=("No data available. Cannot fit model."),
data={},
)
],
)
# collect all candidate results, then validate all at once
# CalTrack 3.4.3.1
candidates = []
if fit_intercept_only:
candidates.extend(
get_intercept_only_candidate_models(data, weights_col=weights_col)
)
if fit_hdd_only:
candidates.extend(
get_hdd_only_candidate_models(
data=data,
minimum_non_zero_hdd=minimum_non_zero_hdd,
minimum_total_hdd=minimum_total_hdd,
beta_hdd_maximum_p_value=beta_hdd_maximum_p_value,
weights_col=weights_col,
)
)
# cdd models ignored for gas
if fit_cdd:
if fit_cdd_only:
candidates.extend(
get_cdd_only_candidate_models(
data=data,
minimum_non_zero_cdd=minimum_non_zero_cdd,
minimum_total_cdd=minimum_total_cdd,
beta_cdd_maximum_p_value=beta_cdd_maximum_p_value,
weights_col=weights_col,
)
)
if fit_cdd_hdd:
candidates.extend(
get_cdd_hdd_candidate_models(
data=data,
minimum_non_zero_cdd=minimum_non_zero_cdd,
minimum_non_zero_hdd=minimum_non_zero_hdd,
minimum_total_cdd=minimum_total_cdd,
minimum_total_hdd=minimum_total_hdd,
beta_cdd_maximum_p_value=beta_cdd_maximum_p_value,
beta_hdd_maximum_p_value=beta_hdd_maximum_p_value,
weights_col=weights_col,
)
)
# find best candidate result
best_candidate, candidate_warnings = select_best_candidate(candidates)
warnings = candidate_warnings
if best_candidate is None:
status = "NO MODEL"
r_squared_adj = None
else:
status = "SUCCESS"
r_squared_adj = best_candidate.r_squared_adj
model_result = CalTRACKUsagePerDayModelResults(
status=status,
method_name="caltrack_usage_per_day",
interval=interval,
model=best_candidate,
candidates=candidates,
r_squared_adj=r_squared_adj,
warnings=warnings,
settings={
"fit_cdd": fit_cdd,
"minimum_non_zero_cdd": minimum_non_zero_cdd,
"minimum_non_zero_hdd": minimum_non_zero_hdd,
"minimum_total_cdd": minimum_total_cdd,
"minimum_total_hdd": minimum_total_hdd,
"beta_cdd_maximum_p_value": beta_cdd_maximum_p_value,
"beta_hdd_maximum_p_value": beta_hdd_maximum_p_value,
},
)
if best_candidate is not None:
if best_candidate.model_type in ["cdd_hdd"]:
num_parameters = 2
elif best_candidate.model_type in ["hdd_only", "cdd_only"]:
num_parameters = 1
else:
num_parameters = 0
predicted_avgs = _caltrack_predict_design_matrix(
best_candidate.model_type,
best_candidate.model_params,
data,
input_averages=True,
output_averages=True,
)
model_result.avgs_metrics = ModelMetrics(
data.meter_value, predicted_avgs, num_parameters
)
predicted_totals = _caltrack_predict_design_matrix(
best_candidate.model_type,
best_candidate.model_params,
data,
input_averages=True,
output_averages=False,
)
days_per_period = day_counts(data.index)
data_totals = data.meter_value * days_per_period
model_result.totals_metrics = ModelMetrics(
data_totals, predicted_totals, num_parameters
)
return model_result | python | def fit_caltrack_usage_per_day_model(
data,
fit_cdd=True,
use_billing_presets=False,
minimum_non_zero_cdd=10,
minimum_non_zero_hdd=10,
minimum_total_cdd=20,
minimum_total_hdd=20,
beta_cdd_maximum_p_value=1,
beta_hdd_maximum_p_value=1,
weights_col=None,
fit_intercept_only=True,
fit_cdd_only=True,
fit_hdd_only=True,
fit_cdd_hdd=True,
):
""" CalTRACK daily and billing methods using a usage-per-day modeling
strategy.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns each of the form ``hdd_<heating_balance_point>``
and ``cdd_<cooling_balance_point>``. DataFrames of this form can be
made using the :any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
Should have a :any:`pandas.DatetimeIndex`.
fit_cdd : :any:`bool`, optional
If True, fit CDD models unless overridden by ``fit_cdd_only`` or
``fit_cdd_hdd`` flags. Should be set to ``False`` for gas meter data.
use_billing_presets : :any:`bool`, optional
Use presets appropriate for billing models. Otherwise defaults are
appropriate for daily models.
minimum_non_zero_cdd : :any:`int`, optional
Minimum allowable number of non-zero cooling degree day values.
minimum_non_zero_hdd : :any:`int`, optional
Minimum allowable number of non-zero heating degree day values.
minimum_total_cdd : :any:`float`, optional
Minimum allowable total sum of cooling degree day values.
minimum_total_hdd : :any:`float`, optional
Minimum allowable total sum of heating degree day values.
beta_cdd_maximum_p_value : :any:`float`, optional
The maximum allowable p-value of the beta cdd parameter. The default
value is the most permissive possible (i.e., 1). This is here
for backwards compatibility with CalTRACK 1.0 methods.
beta_hdd_maximum_p_value : :any:`float`, optional
The maximum allowable p-value of the beta hdd parameter. The default
value is the most permissive possible (i.e., 1). This is here
for backwards compatibility with CalTRACK 1.0 methods.
weights_col : :any:`str` or None, optional
The name of the column (if any) in ``data`` to use as weights. Weight
must be the number of days of data in the period.
fit_intercept_only : :any:`bool`, optional
If True, fit and consider intercept_only model candidates.
fit_cdd_only : :any:`bool`, optional
If True, fit and consider cdd_only model candidates. Ignored if
``fit_cdd=False``.
fit_hdd_only : :any:`bool`, optional
If True, fit and consider hdd_only model candidates.
fit_cdd_hdd : :any:`bool`, optional
If True, fit and consider cdd_hdd model candidates. Ignored if
``fit_cdd=False``.
Returns
-------
model_results : :any:`eemeter.CalTRACKUsagePerDayModelResults`
Results of running CalTRACK daily method. See :any:`eemeter.CalTRACKUsagePerDayModelResults`
for more details.
"""
if use_billing_presets:
# CalTrack 3.2.2.2.1
minimum_non_zero_cdd = 0
minimum_non_zero_hdd = 0
# CalTrack 3.2.2.2.2
minimum_total_cdd = 20
minimum_total_hdd = 20
# CalTrack 3.4.2
if weights_col is None:
raise ValueError(
"If using billing presets, the weights_col argument must be specified."
)
interval = "billing"
else:
interval = "daily"
# cleans data to fully NaN rows that have missing temp or meter data
data = overwrite_partial_rows_with_nan(data)
if data.dropna().empty:
return CalTRACKUsagePerDayModelResults(
status="NO DATA",
method_name="caltrack_usage_per_day",
warnings=[
EEMeterWarning(
qualified_name="eemeter.caltrack_usage_per_day.no_data",
description=("No data available. Cannot fit model."),
data={},
)
],
)
# collect all candidate results, then validate all at once
# CalTrack 3.4.3.1
candidates = []
if fit_intercept_only:
candidates.extend(
get_intercept_only_candidate_models(data, weights_col=weights_col)
)
if fit_hdd_only:
candidates.extend(
get_hdd_only_candidate_models(
data=data,
minimum_non_zero_hdd=minimum_non_zero_hdd,
minimum_total_hdd=minimum_total_hdd,
beta_hdd_maximum_p_value=beta_hdd_maximum_p_value,
weights_col=weights_col,
)
)
# cdd models ignored for gas
if fit_cdd:
if fit_cdd_only:
candidates.extend(
get_cdd_only_candidate_models(
data=data,
minimum_non_zero_cdd=minimum_non_zero_cdd,
minimum_total_cdd=minimum_total_cdd,
beta_cdd_maximum_p_value=beta_cdd_maximum_p_value,
weights_col=weights_col,
)
)
if fit_cdd_hdd:
candidates.extend(
get_cdd_hdd_candidate_models(
data=data,
minimum_non_zero_cdd=minimum_non_zero_cdd,
minimum_non_zero_hdd=minimum_non_zero_hdd,
minimum_total_cdd=minimum_total_cdd,
minimum_total_hdd=minimum_total_hdd,
beta_cdd_maximum_p_value=beta_cdd_maximum_p_value,
beta_hdd_maximum_p_value=beta_hdd_maximum_p_value,
weights_col=weights_col,
)
)
# find best candidate result
best_candidate, candidate_warnings = select_best_candidate(candidates)
warnings = candidate_warnings
if best_candidate is None:
status = "NO MODEL"
r_squared_adj = None
else:
status = "SUCCESS"
r_squared_adj = best_candidate.r_squared_adj
model_result = CalTRACKUsagePerDayModelResults(
status=status,
method_name="caltrack_usage_per_day",
interval=interval,
model=best_candidate,
candidates=candidates,
r_squared_adj=r_squared_adj,
warnings=warnings,
settings={
"fit_cdd": fit_cdd,
"minimum_non_zero_cdd": minimum_non_zero_cdd,
"minimum_non_zero_hdd": minimum_non_zero_hdd,
"minimum_total_cdd": minimum_total_cdd,
"minimum_total_hdd": minimum_total_hdd,
"beta_cdd_maximum_p_value": beta_cdd_maximum_p_value,
"beta_hdd_maximum_p_value": beta_hdd_maximum_p_value,
},
)
if best_candidate is not None:
if best_candidate.model_type in ["cdd_hdd"]:
num_parameters = 2
elif best_candidate.model_type in ["hdd_only", "cdd_only"]:
num_parameters = 1
else:
num_parameters = 0
predicted_avgs = _caltrack_predict_design_matrix(
best_candidate.model_type,
best_candidate.model_params,
data,
input_averages=True,
output_averages=True,
)
model_result.avgs_metrics = ModelMetrics(
data.meter_value, predicted_avgs, num_parameters
)
predicted_totals = _caltrack_predict_design_matrix(
best_candidate.model_type,
best_candidate.model_params,
data,
input_averages=True,
output_averages=False,
)
days_per_period = day_counts(data.index)
data_totals = data.meter_value * days_per_period
model_result.totals_metrics = ModelMetrics(
data_totals, predicted_totals, num_parameters
)
return model_result | [
"def",
"fit_caltrack_usage_per_day_model",
"(",
"data",
",",
"fit_cdd",
"=",
"True",
",",
"use_billing_presets",
"=",
"False",
",",
"minimum_non_zero_cdd",
"=",
"10",
",",
"minimum_non_zero_hdd",
"=",
"10",
",",
"minimum_total_cdd",
"=",
"20",
",",
"minimum_total_hd... | CalTRACK daily and billing methods using a usage-per-day modeling
strategy.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns each of the form ``hdd_<heating_balance_point>``
and ``cdd_<cooling_balance_point>``. DataFrames of this form can be
made using the :any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
Should have a :any:`pandas.DatetimeIndex`.
fit_cdd : :any:`bool`, optional
If True, fit CDD models unless overridden by ``fit_cdd_only`` or
``fit_cdd_hdd`` flags. Should be set to ``False`` for gas meter data.
use_billing_presets : :any:`bool`, optional
Use presets appropriate for billing models. Otherwise defaults are
appropriate for daily models.
minimum_non_zero_cdd : :any:`int`, optional
Minimum allowable number of non-zero cooling degree day values.
minimum_non_zero_hdd : :any:`int`, optional
Minimum allowable number of non-zero heating degree day values.
minimum_total_cdd : :any:`float`, optional
Minimum allowable total sum of cooling degree day values.
minimum_total_hdd : :any:`float`, optional
Minimum allowable total sum of heating degree day values.
beta_cdd_maximum_p_value : :any:`float`, optional
The maximum allowable p-value of the beta cdd parameter. The default
value is the most permissive possible (i.e., 1). This is here
for backwards compatibility with CalTRACK 1.0 methods.
beta_hdd_maximum_p_value : :any:`float`, optional
The maximum allowable p-value of the beta hdd parameter. The default
value is the most permissive possible (i.e., 1). This is here
for backwards compatibility with CalTRACK 1.0 methods.
weights_col : :any:`str` or None, optional
The name of the column (if any) in ``data`` to use as weights. Weight
must be the number of days of data in the period.
fit_intercept_only : :any:`bool`, optional
If True, fit and consider intercept_only model candidates.
fit_cdd_only : :any:`bool`, optional
If True, fit and consider cdd_only model candidates. Ignored if
``fit_cdd=False``.
fit_hdd_only : :any:`bool`, optional
If True, fit and consider hdd_only model candidates.
fit_cdd_hdd : :any:`bool`, optional
If True, fit and consider cdd_hdd model candidates. Ignored if
``fit_cdd=False``.
Returns
-------
model_results : :any:`eemeter.CalTRACKUsagePerDayModelResults`
Results of running CalTRACK daily method. See :any:`eemeter.CalTRACKUsagePerDayModelResults`
for more details. | [
"CalTRACK",
"daily",
"and",
"billing",
"methods",
"using",
"a",
"usage",
"-",
"per",
"-",
"day",
"modeling",
"strategy",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L1631-L1844 | train | 199,944 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | plot_caltrack_candidate | def plot_caltrack_candidate(
candidate,
best=False,
ax=None,
title=None,
figsize=None,
temp_range=None,
alpha=None,
**kwargs
):
""" Plot a CalTRACK candidate model.
Parameters
----------
candidate : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
A candidate model with a predict function.
best : :any:`bool`, optional
Whether this is the best candidate or not.
ax : :any:`matplotlib.axes.Axes`, optional
Existing axes to plot on.
title : :any:`str`, optional
Chart title.
figsize : :any:`tuple`, optional
(width, height) of chart.
temp_range : :any:`tuple`, optional
(min, max) temperatures to plot model.
alpha : :any:`float` between 0 and 1, optional
Transparency, 0 fully transparent, 1 fully opaque.
**kwargs
Keyword arguments for :any:`matplotlib.axes.Axes.plot`
Returns
-------
ax : :any:`matplotlib.axes.Axes`
Matplotlib axes.
"""
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
if figsize is None:
figsize = (10, 4)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if candidate.status == "QUALIFIED":
color = "C2"
elif candidate.status == "DISQUALIFIED":
color = "C3"
else:
return
if best:
color = "C1"
alpha = 1
temp_min, temp_max = (30, 90) if temp_range is None else temp_range
temps = np.arange(temp_min, temp_max)
data = {"n_days": np.ones(temps.shape)}
prediction_index = pd.date_range(
"2017-01-01T00:00:00Z", periods=len(temps), freq="D"
)
temps_hourly = pd.Series(temps, index=prediction_index).resample("H").ffill()
prediction = candidate.predict(
prediction_index, temps_hourly, "daily"
).result.predicted_usage
plot_kwargs = {"color": color, "alpha": alpha or 0.3}
plot_kwargs.update(kwargs)
ax.plot(temps, prediction, **plot_kwargs)
if title is not None:
ax.set_title(title)
return ax | python | def plot_caltrack_candidate(
candidate,
best=False,
ax=None,
title=None,
figsize=None,
temp_range=None,
alpha=None,
**kwargs
):
""" Plot a CalTRACK candidate model.
Parameters
----------
candidate : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
A candidate model with a predict function.
best : :any:`bool`, optional
Whether this is the best candidate or not.
ax : :any:`matplotlib.axes.Axes`, optional
Existing axes to plot on.
title : :any:`str`, optional
Chart title.
figsize : :any:`tuple`, optional
(width, height) of chart.
temp_range : :any:`tuple`, optional
(min, max) temperatures to plot model.
alpha : :any:`float` between 0 and 1, optional
Transparency, 0 fully transparent, 1 fully opaque.
**kwargs
Keyword arguments for :any:`matplotlib.axes.Axes.plot`
Returns
-------
ax : :any:`matplotlib.axes.Axes`
Matplotlib axes.
"""
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
if figsize is None:
figsize = (10, 4)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if candidate.status == "QUALIFIED":
color = "C2"
elif candidate.status == "DISQUALIFIED":
color = "C3"
else:
return
if best:
color = "C1"
alpha = 1
temp_min, temp_max = (30, 90) if temp_range is None else temp_range
temps = np.arange(temp_min, temp_max)
data = {"n_days": np.ones(temps.shape)}
prediction_index = pd.date_range(
"2017-01-01T00:00:00Z", periods=len(temps), freq="D"
)
temps_hourly = pd.Series(temps, index=prediction_index).resample("H").ffill()
prediction = candidate.predict(
prediction_index, temps_hourly, "daily"
).result.predicted_usage
plot_kwargs = {"color": color, "alpha": alpha or 0.3}
plot_kwargs.update(kwargs)
ax.plot(temps, prediction, **plot_kwargs)
if title is not None:
ax.set_title(title)
return ax | [
"def",
"plot_caltrack_candidate",
"(",
"candidate",
",",
"best",
"=",
"False",
",",
"ax",
"=",
"None",
",",
"title",
"=",
"None",
",",
"figsize",
"=",
"None",
",",
"temp_range",
"=",
"None",
",",
"alpha",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":... | Plot a CalTRACK candidate model.
Parameters
----------
candidate : :any:`eemeter.CalTRACKUsagePerDayCandidateModel`
A candidate model with a predict function.
best : :any:`bool`, optional
Whether this is the best candidate or not.
ax : :any:`matplotlib.axes.Axes`, optional
Existing axes to plot on.
title : :any:`str`, optional
Chart title.
figsize : :any:`tuple`, optional
(width, height) of chart.
temp_range : :any:`tuple`, optional
(min, max) temperatures to plot model.
alpha : :any:`float` between 0 and 1, optional
Transparency, 0 fully transparent, 1 fully opaque.
**kwargs
Keyword arguments for :any:`matplotlib.axes.Axes.plot`
Returns
-------
ax : :any:`matplotlib.axes.Axes`
Matplotlib axes. | [
"Plot",
"a",
"CalTRACK",
"candidate",
"model",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L2125-L2207 | train | 199,945 |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | CalTRACKUsagePerDayModelResults.plot | def plot(
self,
ax=None,
title=None,
figsize=None,
with_candidates=False,
candidate_alpha=None,
temp_range=None,
):
""" Plot a model fit.
Parameters
----------
ax : :any:`matplotlib.axes.Axes`, optional
Existing axes to plot on.
title : :any:`str`, optional
Chart title.
figsize : :any:`tuple`, optional
(width, height) of chart.
with_candidates : :any:`bool`
If True, also plot candidate models.
candidate_alpha : :any:`float` between 0 and 1
Transparency at which to plot candidate models. 0 fully transparent,
1 fully opaque.
Returns
-------
ax : :any:`matplotlib.axes.Axes`
Matplotlib axes.
"""
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
if figsize is None:
figsize = (10, 4)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if temp_range is None:
temp_range = (20, 90)
if with_candidates:
for candidate in self.candidates:
candidate.plot(ax=ax, temp_range=temp_range, alpha=candidate_alpha)
self.model.plot(ax=ax, best=True, temp_range=temp_range)
if title is not None:
ax.set_title(title)
return ax | python | def plot(
self,
ax=None,
title=None,
figsize=None,
with_candidates=False,
candidate_alpha=None,
temp_range=None,
):
""" Plot a model fit.
Parameters
----------
ax : :any:`matplotlib.axes.Axes`, optional
Existing axes to plot on.
title : :any:`str`, optional
Chart title.
figsize : :any:`tuple`, optional
(width, height) of chart.
with_candidates : :any:`bool`
If True, also plot candidate models.
candidate_alpha : :any:`float` between 0 and 1
Transparency at which to plot candidate models. 0 fully transparent,
1 fully opaque.
Returns
-------
ax : :any:`matplotlib.axes.Axes`
Matplotlib axes.
"""
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
if figsize is None:
figsize = (10, 4)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if temp_range is None:
temp_range = (20, 90)
if with_candidates:
for candidate in self.candidates:
candidate.plot(ax=ax, temp_range=temp_range, alpha=candidate_alpha)
self.model.plot(ax=ax, best=True, temp_range=temp_range)
if title is not None:
ax.set_title(title)
return ax | [
"def",
"plot",
"(",
"self",
",",
"ax",
"=",
"None",
",",
"title",
"=",
"None",
",",
"figsize",
"=",
"None",
",",
"with_candidates",
"=",
"False",
",",
"candidate_alpha",
"=",
"None",
",",
"temp_range",
"=",
"None",
",",
")",
":",
"try",
":",
"import"... | Plot a model fit.
Parameters
----------
ax : :any:`matplotlib.axes.Axes`, optional
Existing axes to plot on.
title : :any:`str`, optional
Chart title.
figsize : :any:`tuple`, optional
(width, height) of chart.
with_candidates : :any:`bool`
If True, also plot candidate models.
candidate_alpha : :any:`float` between 0 and 1
Transparency at which to plot candidate models. 0 fully transparent,
1 fully opaque.
Returns
-------
ax : :any:`matplotlib.axes.Axes`
Matplotlib axes. | [
"Plot",
"a",
"model",
"fit",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L204-L256 | train | 199,946 |
openeemeter/eemeter | eemeter/visualization.py | plot_time_series | def plot_time_series(meter_data, temperature_data, **kwargs):
""" Plot meter and temperature data in dual-axes time series.
Parameters
----------
meter_data : :any:`pandas.DataFrame`
A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``.
temperature_data : :any:`pandas.Series`
A :any:`pandas.DatetimeIndex`-indexed Series of temperature data.
**kwargs
Arbitrary keyword arguments to pass to
:any:`plt.subplots <matplotlib.pyplot.subplots>`
Returns
-------
axes : :any:`tuple` of :any:`matplotlib.axes.Axes`
Tuple of ``(ax_meter_data, ax_temperature_data)``.
"""
# TODO(philngo): include image in docs.
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
default_kwargs = {"figsize": (16, 4)}
default_kwargs.update(kwargs)
fig, ax1 = plt.subplots(**default_kwargs)
ax1.plot(
meter_data.index,
meter_data.value,
color="C0",
label="Energy Use",
drawstyle="steps-post",
)
ax1.set_ylabel("Energy Use")
ax2 = ax1.twinx()
ax2.plot(
temperature_data.index,
temperature_data,
color="C1",
label="Temperature",
alpha=0.8,
)
ax2.set_ylabel("Temperature")
fig.legend()
return ax1, ax2 | python | def plot_time_series(meter_data, temperature_data, **kwargs):
""" Plot meter and temperature data in dual-axes time series.
Parameters
----------
meter_data : :any:`pandas.DataFrame`
A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``.
temperature_data : :any:`pandas.Series`
A :any:`pandas.DatetimeIndex`-indexed Series of temperature data.
**kwargs
Arbitrary keyword arguments to pass to
:any:`plt.subplots <matplotlib.pyplot.subplots>`
Returns
-------
axes : :any:`tuple` of :any:`matplotlib.axes.Axes`
Tuple of ``(ax_meter_data, ax_temperature_data)``.
"""
# TODO(philngo): include image in docs.
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
default_kwargs = {"figsize": (16, 4)}
default_kwargs.update(kwargs)
fig, ax1 = plt.subplots(**default_kwargs)
ax1.plot(
meter_data.index,
meter_data.value,
color="C0",
label="Energy Use",
drawstyle="steps-post",
)
ax1.set_ylabel("Energy Use")
ax2 = ax1.twinx()
ax2.plot(
temperature_data.index,
temperature_data,
color="C1",
label="Temperature",
alpha=0.8,
)
ax2.set_ylabel("Temperature")
fig.legend()
return ax1, ax2 | [
"def",
"plot_time_series",
"(",
"meter_data",
",",
"temperature_data",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO(philngo): include image in docs.",
"try",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"except",
"ImportError",
":",
"# pragma: no cover",
"... | Plot meter and temperature data in dual-axes time series.
Parameters
----------
meter_data : :any:`pandas.DataFrame`
A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``.
temperature_data : :any:`pandas.Series`
A :any:`pandas.DatetimeIndex`-indexed Series of temperature data.
**kwargs
Arbitrary keyword arguments to pass to
:any:`plt.subplots <matplotlib.pyplot.subplots>`
Returns
-------
axes : :any:`tuple` of :any:`matplotlib.axes.Axes`
Tuple of ``(ax_meter_data, ax_temperature_data)``. | [
"Plot",
"meter",
"and",
"temperature",
"data",
"in",
"dual",
"-",
"axes",
"time",
"series",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/visualization.py#L33-L82 | train | 199,947 |
openeemeter/eemeter | eemeter/visualization.py | plot_energy_signature | def plot_energy_signature(
meter_data,
temperature_data,
temp_col=None,
ax=None,
title=None,
figsize=None,
**kwargs
):
""" Plot meter and temperature data in energy signature.
Parameters
----------
meter_data : :any:`pandas.DataFrame`
A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``.
temperature_data : :any:`pandas.Series`
A :any:`pandas.DatetimeIndex`-indexed Series of temperature data.
temp_col : :any:`str`, default ``'temperature_mean'``
The name of the temperature column.
ax : :any:`matplotlib.axes.Axes`
The axis on which to plot.
title : :any:`str`, optional
Chart title.
figsize : :any:`tuple`, optional
(width, height) of chart.
**kwargs
Arbitrary keyword arguments to pass to
:any:`matplotlib.axes.Axes.scatter`.
Returns
-------
ax : :any:`matplotlib.axes.Axes`
Matplotlib axes.
"""
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
# format data
temperature_mean = compute_temperature_features(meter_data.index, temperature_data)
usage_per_day = compute_usage_per_day_feature(meter_data, series_name="meter_value")
df = merge_features([usage_per_day, temperature_mean.temperature_mean])
if figsize is None:
figsize = (10, 4)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if temp_col is None:
temp_col = "temperature_mean"
ax.scatter(df[temp_col], df.meter_value, **kwargs)
ax.set_xlabel("Temperature")
ax.set_ylabel("Energy Use per Day")
if title is not None:
ax.set_title(title)
return ax | python | def plot_energy_signature(
meter_data,
temperature_data,
temp_col=None,
ax=None,
title=None,
figsize=None,
**kwargs
):
""" Plot meter and temperature data in energy signature.
Parameters
----------
meter_data : :any:`pandas.DataFrame`
A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``.
temperature_data : :any:`pandas.Series`
A :any:`pandas.DatetimeIndex`-indexed Series of temperature data.
temp_col : :any:`str`, default ``'temperature_mean'``
The name of the temperature column.
ax : :any:`matplotlib.axes.Axes`
The axis on which to plot.
title : :any:`str`, optional
Chart title.
figsize : :any:`tuple`, optional
(width, height) of chart.
**kwargs
Arbitrary keyword arguments to pass to
:any:`matplotlib.axes.Axes.scatter`.
Returns
-------
ax : :any:`matplotlib.axes.Axes`
Matplotlib axes.
"""
try:
import matplotlib.pyplot as plt
except ImportError: # pragma: no cover
raise ImportError("matplotlib is required for plotting.")
# format data
temperature_mean = compute_temperature_features(meter_data.index, temperature_data)
usage_per_day = compute_usage_per_day_feature(meter_data, series_name="meter_value")
df = merge_features([usage_per_day, temperature_mean.temperature_mean])
if figsize is None:
figsize = (10, 4)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if temp_col is None:
temp_col = "temperature_mean"
ax.scatter(df[temp_col], df.meter_value, **kwargs)
ax.set_xlabel("Temperature")
ax.set_ylabel("Energy Use per Day")
if title is not None:
ax.set_title(title)
return ax | [
"def",
"plot_energy_signature",
"(",
"meter_data",
",",
"temperature_data",
",",
"temp_col",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"title",
"=",
"None",
",",
"figsize",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"import",
"matplotlib... | Plot meter and temperature data in energy signature.
Parameters
----------
meter_data : :any:`pandas.DataFrame`
A :any:`pandas.DatetimeIndex`-indexed DataFrame of meter data with the column ``value``.
temperature_data : :any:`pandas.Series`
A :any:`pandas.DatetimeIndex`-indexed Series of temperature data.
temp_col : :any:`str`, default ``'temperature_mean'``
The name of the temperature column.
ax : :any:`matplotlib.axes.Axes`
The axis on which to plot.
title : :any:`str`, optional
Chart title.
figsize : :any:`tuple`, optional
(width, height) of chart.
**kwargs
Arbitrary keyword arguments to pass to
:any:`matplotlib.axes.Axes.scatter`.
Returns
-------
ax : :any:`matplotlib.axes.Axes`
Matplotlib axes. | [
"Plot",
"meter",
"and",
"temperature",
"data",
"in",
"energy",
"signature",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/visualization.py#L85-L145 | train | 199,948 |
openeemeter/eemeter | eemeter/io.py | meter_data_from_csv | def meter_data_from_csv(
filepath_or_buffer,
tz=None,
start_col="start",
value_col="value",
gzipped=False,
freq=None,
**kwargs
):
""" Load meter data from a CSV file.
Default format::
start,value
2017-01-01T00:00:00+00:00,0.31
2017-01-02T00:00:00+00:00,0.4
2017-01-03T00:00:00+00:00,0.58
Parameters
----------
filepath_or_buffer : :any:`str` or file-handle
File path or object.
tz : :any:`str`, optional
E.g., ``'UTC'`` or ``'US/Pacific'``
start_col : :any:`str`, optional, default ``'start'``
Date period start column.
value_col : :any:`str`, optional, default ``'value'``
Value column, can be in any unit.
gzipped : :any:`bool`, optional
Whether file is gzipped.
freq : :any:`str`, optional
If given, apply frequency to data using :any:`pandas.DataFrame.resample`.
**kwargs
Extra keyword arguments to pass to :any:`pandas.read_csv`, such as
``sep='|'``.
"""
read_csv_kwargs = {
"usecols": [start_col, value_col],
"dtype": {value_col: np.float64},
"parse_dates": [start_col],
"index_col": start_col,
}
if gzipped:
read_csv_kwargs.update({"compression": "gzip"})
# allow passing extra kwargs
read_csv_kwargs.update(kwargs)
df = pd.read_csv(filepath_or_buffer, **read_csv_kwargs).tz_localize("UTC")
if tz is not None:
df = df.tz_convert(tz)
if freq == "hourly":
df = df.resample("H").sum()
elif freq == "daily":
df = df.resample("D").sum()
return df | python | def meter_data_from_csv(
filepath_or_buffer,
tz=None,
start_col="start",
value_col="value",
gzipped=False,
freq=None,
**kwargs
):
""" Load meter data from a CSV file.
Default format::
start,value
2017-01-01T00:00:00+00:00,0.31
2017-01-02T00:00:00+00:00,0.4
2017-01-03T00:00:00+00:00,0.58
Parameters
----------
filepath_or_buffer : :any:`str` or file-handle
File path or object.
tz : :any:`str`, optional
E.g., ``'UTC'`` or ``'US/Pacific'``
start_col : :any:`str`, optional, default ``'start'``
Date period start column.
value_col : :any:`str`, optional, default ``'value'``
Value column, can be in any unit.
gzipped : :any:`bool`, optional
Whether file is gzipped.
freq : :any:`str`, optional
If given, apply frequency to data using :any:`pandas.DataFrame.resample`.
**kwargs
Extra keyword arguments to pass to :any:`pandas.read_csv`, such as
``sep='|'``.
"""
read_csv_kwargs = {
"usecols": [start_col, value_col],
"dtype": {value_col: np.float64},
"parse_dates": [start_col],
"index_col": start_col,
}
if gzipped:
read_csv_kwargs.update({"compression": "gzip"})
# allow passing extra kwargs
read_csv_kwargs.update(kwargs)
df = pd.read_csv(filepath_or_buffer, **read_csv_kwargs).tz_localize("UTC")
if tz is not None:
df = df.tz_convert(tz)
if freq == "hourly":
df = df.resample("H").sum()
elif freq == "daily":
df = df.resample("D").sum()
return df | [
"def",
"meter_data_from_csv",
"(",
"filepath_or_buffer",
",",
"tz",
"=",
"None",
",",
"start_col",
"=",
"\"start\"",
",",
"value_col",
"=",
"\"value\"",
",",
"gzipped",
"=",
"False",
",",
"freq",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"read_csv_kw... | Load meter data from a CSV file.
Default format::
start,value
2017-01-01T00:00:00+00:00,0.31
2017-01-02T00:00:00+00:00,0.4
2017-01-03T00:00:00+00:00,0.58
Parameters
----------
filepath_or_buffer : :any:`str` or file-handle
File path or object.
tz : :any:`str`, optional
E.g., ``'UTC'`` or ``'US/Pacific'``
start_col : :any:`str`, optional, default ``'start'``
Date period start column.
value_col : :any:`str`, optional, default ``'value'``
Value column, can be in any unit.
gzipped : :any:`bool`, optional
Whether file is gzipped.
freq : :any:`str`, optional
If given, apply frequency to data using :any:`pandas.DataFrame.resample`.
**kwargs
Extra keyword arguments to pass to :any:`pandas.read_csv`, such as
``sep='|'``. | [
"Load",
"meter",
"data",
"from",
"a",
"CSV",
"file",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/io.py#L33-L92 | train | 199,949 |
openeemeter/eemeter | eemeter/io.py | temperature_data_from_csv | def temperature_data_from_csv(
filepath_or_buffer,
tz=None,
date_col="dt",
temp_col="tempF",
gzipped=False,
freq=None,
**kwargs
):
""" Load temperature data from a CSV file.
Default format::
dt,tempF
2017-01-01T00:00:00+00:00,21
2017-01-01T01:00:00+00:00,22.5
2017-01-01T02:00:00+00:00,23.5
Parameters
----------
filepath_or_buffer : :any:`str` or file-handle
File path or object.
tz : :any:`str`, optional
E.g., ``'UTC'`` or ``'US/Pacific'``
date_col : :any:`str`, optional, default ``'dt'``
Date period start column.
temp_col : :any:`str`, optional, default ``'tempF'``
Temperature column.
gzipped : :any:`bool`, optional
Whether file is gzipped.
freq : :any:`str`, optional
If given, apply frequency to data using :any:`pandas.Series.resample`.
**kwargs
Extra keyword arguments to pass to :any:`pandas.read_csv`, such as
``sep='|'``.
"""
read_csv_kwargs = {
"usecols": [date_col, temp_col],
"dtype": {temp_col: np.float64},
"parse_dates": [date_col],
"index_col": date_col,
}
if gzipped:
read_csv_kwargs.update({"compression": "gzip"})
# allow passing extra kwargs
read_csv_kwargs.update(kwargs)
if tz is None:
tz = "UTC"
df = pd.read_csv(filepath_or_buffer, **read_csv_kwargs).tz_localize(tz)
if freq == "hourly":
df = df.resample("H").sum()
return df[temp_col] | python | def temperature_data_from_csv(
filepath_or_buffer,
tz=None,
date_col="dt",
temp_col="tempF",
gzipped=False,
freq=None,
**kwargs
):
""" Load temperature data from a CSV file.
Default format::
dt,tempF
2017-01-01T00:00:00+00:00,21
2017-01-01T01:00:00+00:00,22.5
2017-01-01T02:00:00+00:00,23.5
Parameters
----------
filepath_or_buffer : :any:`str` or file-handle
File path or object.
tz : :any:`str`, optional
E.g., ``'UTC'`` or ``'US/Pacific'``
date_col : :any:`str`, optional, default ``'dt'``
Date period start column.
temp_col : :any:`str`, optional, default ``'tempF'``
Temperature column.
gzipped : :any:`bool`, optional
Whether file is gzipped.
freq : :any:`str`, optional
If given, apply frequency to data using :any:`pandas.Series.resample`.
**kwargs
Extra keyword arguments to pass to :any:`pandas.read_csv`, such as
``sep='|'``.
"""
read_csv_kwargs = {
"usecols": [date_col, temp_col],
"dtype": {temp_col: np.float64},
"parse_dates": [date_col],
"index_col": date_col,
}
if gzipped:
read_csv_kwargs.update({"compression": "gzip"})
# allow passing extra kwargs
read_csv_kwargs.update(kwargs)
if tz is None:
tz = "UTC"
df = pd.read_csv(filepath_or_buffer, **read_csv_kwargs).tz_localize(tz)
if freq == "hourly":
df = df.resample("H").sum()
return df[temp_col] | [
"def",
"temperature_data_from_csv",
"(",
"filepath_or_buffer",
",",
"tz",
"=",
"None",
",",
"date_col",
"=",
"\"dt\"",
",",
"temp_col",
"=",
"\"tempF\"",
",",
"gzipped",
"=",
"False",
",",
"freq",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"read_csv_k... | Load temperature data from a CSV file.
Default format::
dt,tempF
2017-01-01T00:00:00+00:00,21
2017-01-01T01:00:00+00:00,22.5
2017-01-01T02:00:00+00:00,23.5
Parameters
----------
filepath_or_buffer : :any:`str` or file-handle
File path or object.
tz : :any:`str`, optional
E.g., ``'UTC'`` or ``'US/Pacific'``
date_col : :any:`str`, optional, default ``'dt'``
Date period start column.
temp_col : :any:`str`, optional, default ``'tempF'``
Temperature column.
gzipped : :any:`bool`, optional
Whether file is gzipped.
freq : :any:`str`, optional
If given, apply frequency to data using :any:`pandas.Series.resample`.
**kwargs
Extra keyword arguments to pass to :any:`pandas.read_csv`, such as
``sep='|'``. | [
"Load",
"temperature",
"data",
"from",
"a",
"CSV",
"file",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/io.py#L95-L152 | train | 199,950 |
openeemeter/eemeter | eemeter/io.py | meter_data_from_json | def meter_data_from_json(data, orient="list"):
""" Load meter data from json.
Default format::
[
['2017-01-01T00:00:00+00:00', 3.5],
['2017-02-01T00:00:00+00:00', 0.4],
['2017-03-01T00:00:00+00:00', 0.46],
]
Parameters
----------
data : :any:`list`
List elements are each a rows of data.
Returns
-------
df : :any:`pandas.DataFrame`
DataFrame with a single column (``'value'``) and a
:any:`pandas.DatetimeIndex`.
"""
if orient == "list":
df = pd.DataFrame(data, columns=["start", "value"])
df["start"] = pd.DatetimeIndex(df.start).tz_localize("UTC")
df = df.set_index("start")
return df
else:
raise ValueError("orientation not recognized.") | python | def meter_data_from_json(data, orient="list"):
""" Load meter data from json.
Default format::
[
['2017-01-01T00:00:00+00:00', 3.5],
['2017-02-01T00:00:00+00:00', 0.4],
['2017-03-01T00:00:00+00:00', 0.46],
]
Parameters
----------
data : :any:`list`
List elements are each a rows of data.
Returns
-------
df : :any:`pandas.DataFrame`
DataFrame with a single column (``'value'``) and a
:any:`pandas.DatetimeIndex`.
"""
if orient == "list":
df = pd.DataFrame(data, columns=["start", "value"])
df["start"] = pd.DatetimeIndex(df.start).tz_localize("UTC")
df = df.set_index("start")
return df
else:
raise ValueError("orientation not recognized.") | [
"def",
"meter_data_from_json",
"(",
"data",
",",
"orient",
"=",
"\"list\"",
")",
":",
"if",
"orient",
"==",
"\"list\"",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
",",
"columns",
"=",
"[",
"\"start\"",
",",
"\"value\"",
"]",
")",
"df",
"[",
... | Load meter data from json.
Default format::
[
['2017-01-01T00:00:00+00:00', 3.5],
['2017-02-01T00:00:00+00:00', 0.4],
['2017-03-01T00:00:00+00:00', 0.46],
]
Parameters
----------
data : :any:`list`
List elements are each a rows of data.
Returns
-------
df : :any:`pandas.DataFrame`
DataFrame with a single column (``'value'``) and a
:any:`pandas.DatetimeIndex`. | [
"Load",
"meter",
"data",
"from",
"json",
"."
] | e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0 | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/io.py#L155-L183 | train | 199,951 |
jwkvam/bowtie | bowtie/pager.py | Pager.notify | def notify(self):
"""Notify the client.
The function passed to ``App.respond`` will get called.
"""
if flask.has_request_context():
emit(_NAME + str(self._uuid))
else:
sio = flask.current_app.extensions['socketio']
sio.emit(_NAME + str(self._uuid))
eventlet.sleep() | python | def notify(self):
"""Notify the client.
The function passed to ``App.respond`` will get called.
"""
if flask.has_request_context():
emit(_NAME + str(self._uuid))
else:
sio = flask.current_app.extensions['socketio']
sio.emit(_NAME + str(self._uuid))
eventlet.sleep() | [
"def",
"notify",
"(",
"self",
")",
":",
"if",
"flask",
".",
"has_request_context",
"(",
")",
":",
"emit",
"(",
"_NAME",
"+",
"str",
"(",
"self",
".",
"_uuid",
")",
")",
"else",
":",
"sio",
"=",
"flask",
".",
"current_app",
".",
"extensions",
"[",
"... | Notify the client.
The function passed to ``App.respond`` will get called. | [
"Notify",
"the",
"client",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/pager.py#L26-L36 | train | 199,952 |
jwkvam/bowtie | bowtie/_cache.py | validate | def validate(key):
"""Check that the key is a string or bytestring.
That's the only valid type of key.
"""
if not isinstance(key, (str, bytes)):
raise KeyError('Key must be of type str or bytes, found type {}'.format(type(key))) | python | def validate(key):
"""Check that the key is a string or bytestring.
That's the only valid type of key.
"""
if not isinstance(key, (str, bytes)):
raise KeyError('Key must be of type str or bytes, found type {}'.format(type(key))) | [
"def",
"validate",
"(",
"key",
")",
":",
"if",
"not",
"isinstance",
"(",
"key",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"raise",
"KeyError",
"(",
"'Key must be of type str or bytes, found type {}'",
".",
"format",
"(",
"type",
"(",
"key",
")",
")",
... | Check that the key is a string or bytestring.
That's the only valid type of key. | [
"Check",
"that",
"the",
"key",
"is",
"a",
"string",
"or",
"bytestring",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_cache.py#L12-L18 | train | 199,953 |
jwkvam/bowtie | bowtie/_component.py | pack | def pack(x: Any) -> bytes:
"""Encode ``x`` into msgpack with additional encoders."""
try:
return msgpack.packb(x, default=encoders)
except TypeError as exc:
message = ('Serialization error, check the data passed to a do_ command. '
'Cannot serialize this object:\n') + str(exc)[16:]
raise SerializationError(message) | python | def pack(x: Any) -> bytes:
"""Encode ``x`` into msgpack with additional encoders."""
try:
return msgpack.packb(x, default=encoders)
except TypeError as exc:
message = ('Serialization error, check the data passed to a do_ command. '
'Cannot serialize this object:\n') + str(exc)[16:]
raise SerializationError(message) | [
"def",
"pack",
"(",
"x",
":",
"Any",
")",
"->",
"bytes",
":",
"try",
":",
"return",
"msgpack",
".",
"packb",
"(",
"x",
",",
"default",
"=",
"encoders",
")",
"except",
"TypeError",
"as",
"exc",
":",
"message",
"=",
"(",
"'Serialization error, check the da... | Encode ``x`` into msgpack with additional encoders. | [
"Encode",
"x",
"into",
"msgpack",
"with",
"additional",
"encoders",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_component.py#L150-L157 | train | 199,954 |
jwkvam/bowtie | bowtie/_component.py | make_event | def make_event(event: Callable) -> Callable:
"""Create an event from a method signature."""
@property # type: ignore
@wraps(event)
def actualevent(self): # pylint: disable=missing-docstring
name = event.__name__[3:]
try:
# the getter post processing function
# is preserved with an underscore
getter = event(self).__name__
except AttributeError:
getter = None
return Event(name, self._uuid, getter) # pylint: disable=protected-access
return actualevent | python | def make_event(event: Callable) -> Callable:
"""Create an event from a method signature."""
@property # type: ignore
@wraps(event)
def actualevent(self): # pylint: disable=missing-docstring
name = event.__name__[3:]
try:
# the getter post processing function
# is preserved with an underscore
getter = event(self).__name__
except AttributeError:
getter = None
return Event(name, self._uuid, getter) # pylint: disable=protected-access
return actualevent | [
"def",
"make_event",
"(",
"event",
":",
"Callable",
")",
"->",
"Callable",
":",
"@",
"property",
"# type: ignore",
"@",
"wraps",
"(",
"event",
")",
"def",
"actualevent",
"(",
"self",
")",
":",
"# pylint: disable=missing-docstring",
"name",
"=",
"event",
".",
... | Create an event from a method signature. | [
"Create",
"an",
"event",
"from",
"a",
"method",
"signature",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_component.py#L165-L179 | train | 199,955 |
jwkvam/bowtie | bowtie/_component.py | Component._insert | def _insert(wrap: str, tag: Optional[str]) -> str:
"""Insert the component tag into the wrapper html.
This ignores other tags already created like ``{socket}``.
https://stackoverflow.com/a/11284026/744520
"""
if tag is None:
raise ValueError('tag cannot be None')
formatter = string.Formatter()
mapping = FormatDict(component=tag)
return formatter.vformat(wrap, (), mapping) | python | def _insert(wrap: str, tag: Optional[str]) -> str:
"""Insert the component tag into the wrapper html.
This ignores other tags already created like ``{socket}``.
https://stackoverflow.com/a/11284026/744520
"""
if tag is None:
raise ValueError('tag cannot be None')
formatter = string.Formatter()
mapping = FormatDict(component=tag)
return formatter.vformat(wrap, (), mapping) | [
"def",
"_insert",
"(",
"wrap",
":",
"str",
",",
"tag",
":",
"Optional",
"[",
"str",
"]",
")",
"->",
"str",
":",
"if",
"tag",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'tag cannot be None'",
")",
"formatter",
"=",
"string",
".",
"Formatter",
"(",
... | Insert the component tag into the wrapper html.
This ignores other tags already created like ``{socket}``.
https://stackoverflow.com/a/11284026/744520 | [
"Insert",
"the",
"component",
"tag",
"into",
"the",
"wrapper",
"html",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_component.py#L315-L326 | train | 199,956 |
jwkvam/bowtie | bowtie/control.py | Dropdown.do_options | def do_options(self, labels, values):
"""Replace the drop down fields.
Parameters
----------
labels : array-like
List of strings which will be visible to the user.
values : array-like
List of values associated with the labels that are hidden from the user.
Returns
-------
None
"""
return [dict(label=l, value=v) for l, v in zip(labels, values)] | python | def do_options(self, labels, values):
"""Replace the drop down fields.
Parameters
----------
labels : array-like
List of strings which will be visible to the user.
values : array-like
List of values associated with the labels that are hidden from the user.
Returns
-------
None
"""
return [dict(label=l, value=v) for l, v in zip(labels, values)] | [
"def",
"do_options",
"(",
"self",
",",
"labels",
",",
"values",
")",
":",
"return",
"[",
"dict",
"(",
"label",
"=",
"l",
",",
"value",
"=",
"v",
")",
"for",
"l",
",",
"v",
"in",
"zip",
"(",
"labels",
",",
"values",
")",
"]"
] | Replace the drop down fields.
Parameters
----------
labels : array-like
List of strings which will be visible to the user.
values : array-like
List of values associated with the labels that are hidden from the user.
Returns
-------
None | [
"Replace",
"the",
"drop",
"down",
"fields",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/control.py#L148-L163 | train | 199,957 |
jwkvam/bowtie | bowtie/control.py | Checkbox.do_options | def do_options(self, labels: Sequence[str],
values: Sequence[Union[str, int]]) -> Sequence[Dict]:
"""Replace the checkbox options.
Parameters
----------
labels : array-like
List of strings which will be visible to the user.
values : array-like
List of values associated with the labels that are hidden from the user.
Returns
-------
None
"""
return [{'label': label, 'value': value} for label, value in zip(labels, values)] | python | def do_options(self, labels: Sequence[str],
values: Sequence[Union[str, int]]) -> Sequence[Dict]:
"""Replace the checkbox options.
Parameters
----------
labels : array-like
List of strings which will be visible to the user.
values : array-like
List of values associated with the labels that are hidden from the user.
Returns
-------
None
"""
return [{'label': label, 'value': value} for label, value in zip(labels, values)] | [
"def",
"do_options",
"(",
"self",
",",
"labels",
":",
"Sequence",
"[",
"str",
"]",
",",
"values",
":",
"Sequence",
"[",
"Union",
"[",
"str",
",",
"int",
"]",
"]",
")",
"->",
"Sequence",
"[",
"Dict",
"]",
":",
"return",
"[",
"{",
"'label'",
":",
"... | Replace the checkbox options.
Parameters
----------
labels : array-like
List of strings which will be visible to the user.
values : array-like
List of values associated with the labels that are hidden from the user.
Returns
-------
None | [
"Replace",
"the",
"checkbox",
"options",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/control.py#L887-L903 | train | 199,958 |
jwkvam/bowtie | bowtie/control.py | Radio.do_options | def do_options(self, labels, values):
"""Replace the radio button options.
Parameters
----------
labels : Sequence
List of strings which will be visible to the user.
values : Sequence
List of values associated with the labels that are hidden from the user.
Returns
-------
None
"""
return [{'label': label, 'value': value} for label, value in zip(labels, values)] | python | def do_options(self, labels, values):
"""Replace the radio button options.
Parameters
----------
labels : Sequence
List of strings which will be visible to the user.
values : Sequence
List of values associated with the labels that are hidden from the user.
Returns
-------
None
"""
return [{'label': label, 'value': value} for label, value in zip(labels, values)] | [
"def",
"do_options",
"(",
"self",
",",
"labels",
",",
"values",
")",
":",
"return",
"[",
"{",
"'label'",
":",
"label",
",",
"'value'",
":",
"value",
"}",
"for",
"label",
",",
"value",
"in",
"zip",
"(",
"labels",
",",
"values",
")",
"]"
] | Replace the radio button options.
Parameters
----------
labels : Sequence
List of strings which will be visible to the user.
values : Sequence
List of values associated with the labels that are hidden from the user.
Returns
-------
None | [
"Replace",
"the",
"radio",
"button",
"options",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/control.py#L988-L1003 | train | 199,959 |
jwkvam/bowtie | bowtie/_app.py | node_version | def node_version():
"""Get node version."""
version = check_output(('node', '--version'))
return tuple(int(x) for x in version.strip()[1:].split(b'.')) | python | def node_version():
"""Get node version."""
version = check_output(('node', '--version'))
return tuple(int(x) for x in version.strip()[1:].split(b'.')) | [
"def",
"node_version",
"(",
")",
":",
"version",
"=",
"check_output",
"(",
"(",
"'node'",
",",
"'--version'",
")",
")",
"return",
"tuple",
"(",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"version",
".",
"strip",
"(",
")",
"[",
"1",
":",
"]",
".",
"s... | Get node version. | [
"Get",
"node",
"version",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L1084-L1087 | train | 199,960 |
jwkvam/bowtie | bowtie/_app.py | Scheduler.run | def run(self):
"""Invoke the function repeatedly on a timer."""
ret = eventlet.spawn(self.context(self.func))
eventlet.sleep(self.seconds)
try:
ret.wait()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
self.thread = eventlet.spawn(self.run) | python | def run(self):
"""Invoke the function repeatedly on a timer."""
ret = eventlet.spawn(self.context(self.func))
eventlet.sleep(self.seconds)
try:
ret.wait()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
self.thread = eventlet.spawn(self.run) | [
"def",
"run",
"(",
"self",
")",
":",
"ret",
"=",
"eventlet",
".",
"spawn",
"(",
"self",
".",
"context",
"(",
"self",
".",
"func",
")",
")",
"eventlet",
".",
"sleep",
"(",
"self",
".",
"seconds",
")",
"try",
":",
"ret",
".",
"wait",
"(",
")",
"e... | Invoke the function repeatedly on a timer. | [
"Invoke",
"the",
"function",
"repeatedly",
"on",
"a",
"timer",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L67-L75 | train | 199,961 |
jwkvam/bowtie | bowtie/_app.py | Span.overlap | def overlap(self, other: 'Span'):
"""Detect if two spans overlap."""
return not (
# if one rectangle is left of other
other.column_end <= self.column_start
or self.column_end <= other.column_start
# if one rectangle is above other
or other.row_end <= self.row_start
or self.row_end <= other.row_start
) | python | def overlap(self, other: 'Span'):
"""Detect if two spans overlap."""
return not (
# if one rectangle is left of other
other.column_end <= self.column_start
or self.column_end <= other.column_start
# if one rectangle is above other
or other.row_end <= self.row_start
or self.row_end <= other.row_start
) | [
"def",
"overlap",
"(",
"self",
",",
"other",
":",
"'Span'",
")",
":",
"return",
"not",
"(",
"# if one rectangle is left of other",
"other",
".",
"column_end",
"<=",
"self",
".",
"column_start",
"or",
"self",
".",
"column_end",
"<=",
"other",
".",
"column_start... | Detect if two spans overlap. | [
"Detect",
"if",
"two",
"spans",
"overlap",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L149-L158 | train | 199,962 |
jwkvam/bowtie | bowtie/_app.py | Span.cells | def cells(self) -> Generator[Tuple[int, int], None, None]:
"""Generate cells in span."""
yield from itertools.product(
range(self.row_start, self.row_end),
range(self.column_start, self.column_end)
) | python | def cells(self) -> Generator[Tuple[int, int], None, None]:
"""Generate cells in span."""
yield from itertools.product(
range(self.row_start, self.row_end),
range(self.column_start, self.column_end)
) | [
"def",
"cells",
"(",
"self",
")",
"->",
"Generator",
"[",
"Tuple",
"[",
"int",
",",
"int",
"]",
",",
"None",
",",
"None",
"]",
":",
"yield",
"from",
"itertools",
".",
"product",
"(",
"range",
"(",
"self",
".",
"row_start",
",",
"self",
".",
"row_en... | Generate cells in span. | [
"Generate",
"cells",
"in",
"span",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L161-L166 | train | 199,963 |
jwkvam/bowtie | bowtie/_app.py | Size.pixels | def pixels(self, value: float) -> 'Size':
"""Set the size in pixels."""
raise_not_number(value)
self.maximum = '{}px'.format(value)
return self | python | def pixels(self, value: float) -> 'Size':
"""Set the size in pixels."""
raise_not_number(value)
self.maximum = '{}px'.format(value)
return self | [
"def",
"pixels",
"(",
"self",
",",
"value",
":",
"float",
")",
"->",
"'Size'",
":",
"raise_not_number",
"(",
"value",
")",
"self",
".",
"maximum",
"=",
"'{}px'",
".",
"format",
"(",
"value",
")",
"return",
"self"
] | Set the size in pixels. | [
"Set",
"the",
"size",
"in",
"pixels",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L210-L214 | train | 199,964 |
jwkvam/bowtie | bowtie/_app.py | Size.min_pixels | def min_pixels(self, value: float) -> 'Size':
"""Set the minimum size in pixels."""
raise_not_number(value)
self.minimum = '{}px'.format(value)
return self | python | def min_pixels(self, value: float) -> 'Size':
"""Set the minimum size in pixels."""
raise_not_number(value)
self.minimum = '{}px'.format(value)
return self | [
"def",
"min_pixels",
"(",
"self",
",",
"value",
":",
"float",
")",
"->",
"'Size'",
":",
"raise_not_number",
"(",
"value",
")",
"self",
".",
"minimum",
"=",
"'{}px'",
".",
"format",
"(",
"value",
")",
"return",
"self"
] | Set the minimum size in pixels. | [
"Set",
"the",
"minimum",
"size",
"in",
"pixels",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L216-L220 | train | 199,965 |
jwkvam/bowtie | bowtie/_app.py | Size.ems | def ems(self, value: float) -> 'Size':
"""Set the size in ems."""
raise_not_number(value)
self.maximum = '{}em'.format(value)
return self | python | def ems(self, value: float) -> 'Size':
"""Set the size in ems."""
raise_not_number(value)
self.maximum = '{}em'.format(value)
return self | [
"def",
"ems",
"(",
"self",
",",
"value",
":",
"float",
")",
"->",
"'Size'",
":",
"raise_not_number",
"(",
"value",
")",
"self",
".",
"maximum",
"=",
"'{}em'",
".",
"format",
"(",
"value",
")",
"return",
"self"
] | Set the size in ems. | [
"Set",
"the",
"size",
"in",
"ems",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L222-L226 | train | 199,966 |
jwkvam/bowtie | bowtie/_app.py | Size.min_ems | def min_ems(self, value: float) -> 'Size':
"""Set the minimum size in ems."""
raise_not_number(value)
self.minimum = '{}em'.format(value)
return self | python | def min_ems(self, value: float) -> 'Size':
"""Set the minimum size in ems."""
raise_not_number(value)
self.minimum = '{}em'.format(value)
return self | [
"def",
"min_ems",
"(",
"self",
",",
"value",
":",
"float",
")",
"->",
"'Size'",
":",
"raise_not_number",
"(",
"value",
")",
"self",
".",
"minimum",
"=",
"'{}em'",
".",
"format",
"(",
"value",
")",
"return",
"self"
] | Set the minimum size in ems. | [
"Set",
"the",
"minimum",
"size",
"in",
"ems",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L228-L232 | train | 199,967 |
jwkvam/bowtie | bowtie/_app.py | Size.fraction | def fraction(self, value: float) -> 'Size':
"""Set the fraction of free space to use."""
raise_not_number(value)
self.maximum = '{}fr'.format(value)
return self | python | def fraction(self, value: float) -> 'Size':
"""Set the fraction of free space to use."""
raise_not_number(value)
self.maximum = '{}fr'.format(value)
return self | [
"def",
"fraction",
"(",
"self",
",",
"value",
":",
"float",
")",
"->",
"'Size'",
":",
"raise_not_number",
"(",
"value",
")",
"self",
".",
"maximum",
"=",
"'{}fr'",
".",
"format",
"(",
"value",
")",
"return",
"self"
] | Set the fraction of free space to use. | [
"Set",
"the",
"fraction",
"of",
"free",
"space",
"to",
"use",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L234-L238 | train | 199,968 |
jwkvam/bowtie | bowtie/_app.py | Size.percent | def percent(self, value: float) -> 'Size':
"""Set the percentage of free space to use."""
raise_not_number(value)
self.maximum = '{}%'.format(value)
return self | python | def percent(self, value: float) -> 'Size':
"""Set the percentage of free space to use."""
raise_not_number(value)
self.maximum = '{}%'.format(value)
return self | [
"def",
"percent",
"(",
"self",
",",
"value",
":",
"float",
")",
"->",
"'Size'",
":",
"raise_not_number",
"(",
"value",
")",
"self",
".",
"maximum",
"=",
"'{}%'",
".",
"format",
"(",
"value",
")",
"return",
"self"
] | Set the percentage of free space to use. | [
"Set",
"the",
"percentage",
"of",
"free",
"space",
"to",
"use",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L240-L244 | train | 199,969 |
jwkvam/bowtie | bowtie/_app.py | Size.min_percent | def min_percent(self, value: float) -> 'Size':
"""Set the minimum percentage of free space to use."""
raise_not_number(value)
self.minimum = '{}%'.format(value)
return self | python | def min_percent(self, value: float) -> 'Size':
"""Set the minimum percentage of free space to use."""
raise_not_number(value)
self.minimum = '{}%'.format(value)
return self | [
"def",
"min_percent",
"(",
"self",
",",
"value",
":",
"float",
")",
"->",
"'Size'",
":",
"raise_not_number",
"(",
"value",
")",
"self",
".",
"minimum",
"=",
"'{}%'",
".",
"format",
"(",
"value",
")",
"return",
"self"
] | Set the minimum percentage of free space to use. | [
"Set",
"the",
"minimum",
"percentage",
"of",
"free",
"space",
"to",
"use",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L246-L250 | train | 199,970 |
jwkvam/bowtie | bowtie/_app.py | Gap.pixels | def pixels(self, value: int) -> 'Gap':
"""Set the margin in pixels."""
raise_not_number(value)
self.gap = '{}px'.format(value)
return self | python | def pixels(self, value: int) -> 'Gap':
"""Set the margin in pixels."""
raise_not_number(value)
self.gap = '{}px'.format(value)
return self | [
"def",
"pixels",
"(",
"self",
",",
"value",
":",
"int",
")",
"->",
"'Gap'",
":",
"raise_not_number",
"(",
"value",
")",
"self",
".",
"gap",
"=",
"'{}px'",
".",
"format",
"(",
"value",
")",
"return",
"self"
] | Set the margin in pixels. | [
"Set",
"the",
"margin",
"in",
"pixels",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L279-L283 | train | 199,971 |
jwkvam/bowtie | bowtie/_app.py | Gap.ems | def ems(self, value: int) -> 'Gap':
"""Set the margin in ems."""
raise_not_number(value)
self.gap = '{}em'.format(value)
return self | python | def ems(self, value: int) -> 'Gap':
"""Set the margin in ems."""
raise_not_number(value)
self.gap = '{}em'.format(value)
return self | [
"def",
"ems",
"(",
"self",
",",
"value",
":",
"int",
")",
"->",
"'Gap'",
":",
"raise_not_number",
"(",
"value",
")",
"self",
".",
"gap",
"=",
"'{}em'",
".",
"format",
"(",
"value",
")",
"return",
"self"
] | Set the margin in ems. | [
"Set",
"the",
"margin",
"in",
"ems",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L285-L289 | train | 199,972 |
jwkvam/bowtie | bowtie/_app.py | Gap.percent | def percent(self, value) -> 'Gap':
"""Set the margin as a percentage."""
raise_not_number(value)
self.gap = '{}%'.format(value)
return self | python | def percent(self, value) -> 'Gap':
"""Set the margin as a percentage."""
raise_not_number(value)
self.gap = '{}%'.format(value)
return self | [
"def",
"percent",
"(",
"self",
",",
"value",
")",
"->",
"'Gap'",
":",
"raise_not_number",
"(",
"value",
")",
"self",
".",
"gap",
"=",
"'{}%'",
".",
"format",
"(",
"value",
")",
"return",
"self"
] | Set the margin as a percentage. | [
"Set",
"the",
"margin",
"as",
"a",
"percentage",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L291-L295 | train | 199,973 |
jwkvam/bowtie | bowtie/_app.py | View.add | def add(self, component: Union[Component, Sequence[Component]]) -> None:
"""Add a widget to the grid in the next available cell.
Searches over columns then rows for available cells.
Parameters
----------
components : bowtie._Component
A Bowtie widget instance.
"""
try:
self[Span(*self._available_cell())] = component
except NoUnusedCellsError:
span = list(self._spans.keys())[-1]
self._spans[span] += component | python | def add(self, component: Union[Component, Sequence[Component]]) -> None:
"""Add a widget to the grid in the next available cell.
Searches over columns then rows for available cells.
Parameters
----------
components : bowtie._Component
A Bowtie widget instance.
"""
try:
self[Span(*self._available_cell())] = component
except NoUnusedCellsError:
span = list(self._spans.keys())[-1]
self._spans[span] += component | [
"def",
"add",
"(",
"self",
",",
"component",
":",
"Union",
"[",
"Component",
",",
"Sequence",
"[",
"Component",
"]",
"]",
")",
"->",
"None",
":",
"try",
":",
"self",
"[",
"Span",
"(",
"*",
"self",
".",
"_available_cell",
"(",
")",
")",
"]",
"=",
... | Add a widget to the grid in the next available cell.
Searches over columns then rows for available cells.
Parameters
----------
components : bowtie._Component
A Bowtie widget instance. | [
"Add",
"a",
"widget",
"to",
"the",
"grid",
"in",
"the",
"next",
"available",
"cell",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L526-L541 | train | 199,974 |
jwkvam/bowtie | bowtie/_app.py | View._available_cell | def _available_cell(self) -> Tuple[int, int]:
"""Find next available cell first by row then column.
First, construct a set containing all cells.
Then iterate over the spans and remove occupied cells.
"""
cells = set(itertools.product(range(len(self.rows)), range(len(self.columns))))
for span in self._spans:
for cell in span.cells:
cells.remove(cell)
if not cells:
raise NoUnusedCellsError('No available cells')
return min(cells) | python | def _available_cell(self) -> Tuple[int, int]:
"""Find next available cell first by row then column.
First, construct a set containing all cells.
Then iterate over the spans and remove occupied cells.
"""
cells = set(itertools.product(range(len(self.rows)), range(len(self.columns))))
for span in self._spans:
for cell in span.cells:
cells.remove(cell)
if not cells:
raise NoUnusedCellsError('No available cells')
return min(cells) | [
"def",
"_available_cell",
"(",
"self",
")",
"->",
"Tuple",
"[",
"int",
",",
"int",
"]",
":",
"cells",
"=",
"set",
"(",
"itertools",
".",
"product",
"(",
"range",
"(",
"len",
"(",
"self",
".",
"rows",
")",
")",
",",
"range",
"(",
"len",
"(",
"self... | Find next available cell first by row then column.
First, construct a set containing all cells.
Then iterate over the spans and remove occupied cells. | [
"Find",
"next",
"available",
"cell",
"first",
"by",
"row",
"then",
"column",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L543-L556 | train | 199,975 |
jwkvam/bowtie | bowtie/_app.py | View.add_sidebar | def add_sidebar(self, component: Component) -> None:
"""Add a widget to the sidebar.
Parameters
----------
component : bowtie._Component
Add this component to the sidebar, it will be appended to the end.
"""
if not self.sidebar:
raise NoSidebarError('Set `sidebar=True` if you want to use the sidebar.')
if not isinstance(component, Component):
raise ValueError('component must be Component type, found {}'.format(component))
# self._track_widget(widget)
self._controllers.append(component) | python | def add_sidebar(self, component: Component) -> None:
"""Add a widget to the sidebar.
Parameters
----------
component : bowtie._Component
Add this component to the sidebar, it will be appended to the end.
"""
if not self.sidebar:
raise NoSidebarError('Set `sidebar=True` if you want to use the sidebar.')
if not isinstance(component, Component):
raise ValueError('component must be Component type, found {}'.format(component))
# self._track_widget(widget)
self._controllers.append(component) | [
"def",
"add_sidebar",
"(",
"self",
",",
"component",
":",
"Component",
")",
"->",
"None",
":",
"if",
"not",
"self",
".",
"sidebar",
":",
"raise",
"NoSidebarError",
"(",
"'Set `sidebar=True` if you want to use the sidebar.'",
")",
"if",
"not",
"isinstance",
"(",
... | Add a widget to the sidebar.
Parameters
----------
component : bowtie._Component
Add this component to the sidebar, it will be appended to the end. | [
"Add",
"a",
"widget",
"to",
"the",
"sidebar",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L558-L573 | train | 199,976 |
jwkvam/bowtie | bowtie/_app.py | App.add_route | def add_route(self, view: View, path: str, exact: bool = True) -> None:
"""Add a view to the app.
Parameters
----------
view : View
path : str
exact : bool, optional
"""
if path[0] != '/':
path = '/' + path
for route in self._routes:
assert path != route.path, 'Cannot use the same path twice'
self._routes.append(Route(view=view, path=path, exact=exact))
self.app.add_url_rule(
path, path[1:], lambda: render_template('bowtie.html', title=self.title)
) | python | def add_route(self, view: View, path: str, exact: bool = True) -> None:
"""Add a view to the app.
Parameters
----------
view : View
path : str
exact : bool, optional
"""
if path[0] != '/':
path = '/' + path
for route in self._routes:
assert path != route.path, 'Cannot use the same path twice'
self._routes.append(Route(view=view, path=path, exact=exact))
self.app.add_url_rule(
path, path[1:], lambda: render_template('bowtie.html', title=self.title)
) | [
"def",
"add_route",
"(",
"self",
",",
"view",
":",
"View",
",",
"path",
":",
"str",
",",
"exact",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"if",
"path",
"[",
"0",
"]",
"!=",
"'/'",
":",
"path",
"=",
"'/'",
"+",
"path",
"for",
"route",
... | Add a view to the app.
Parameters
----------
view : View
path : str
exact : bool, optional | [
"Add",
"a",
"view",
"to",
"the",
"app",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L720-L738 | train | 199,977 |
jwkvam/bowtie | bowtie/_app.py | App.subscribe | def subscribe(self, *events: Union[Event, Pager]) -> Callable:
"""Call a function in response to an event.
If more than one event is given, `func` will be given
as many arguments as there are events.
If the pager calls notify, the decorated function will be called.
Parameters
----------
*event : event or pager
Bowtie event, must have at least one.
Examples
--------
Subscribing a function to multiple events.
>>> from bowtie.control import Dropdown, Slider
>>> app = App()
>>> dd = Dropdown()
>>> slide = Slider()
>>> @app.subscribe(dd.on_change, slide.on_change)
... def callback(dd_item, slide_value):
... pass
>>> @app.subscribe(dd.on_change)
... @app.subscribe(slide.on_change)
... def callback2(value):
... pass
Using the pager to run a callback function.
>>> from bowtie.pager import Pager
>>> app = App()
>>> pager = Pager()
>>> @app.subscribe(pager)
... def callback():
... pass
>>> def scheduledtask():
... pager.notify()
"""
try:
first_event = events[0]
except IndexError:
raise IndexError('Must subscribe to at least one event.')
if len(events) != len(set(events)):
raise ValueError(
'Subscribed to the same event multiple times. All events must be unique.'
)
if len(events) > 1:
# check if we are using any non stateful events
for event in events:
if isinstance(event, Pager):
raise NotStatefulEvent('Pagers must be subscribed by itself.')
if event.getter is None:
raise NotStatefulEvent(
f'{event.uuid}.on_{event.name} is not a stateful event. '
'It must be used alone.'
)
def decorator(func: Callable) -> Callable:
"""Handle three types of events: pages, uploads, and normal events."""
if isinstance(first_event, Pager):
self._pages[first_event] = func
elif first_event.name == 'upload':
if first_event.uuid in self._uploads:
warnings.warn(
('Overwriting function "{func1}" with function '
'"{func2}" for upload object "{obj}".').format(
func1=self._uploads[first_event.uuid],
func2=func.__name__,
obj=COMPONENT_REGISTRY[first_event.uuid]
), Warning)
self._uploads[first_event.uuid] = func
else:
for event in events:
# need to have `events` here to maintain order of arguments
# not sure how to deal with mypy typing errors on events so ignoring
self._subscriptions[event].append((events, func)) # type: ignore
return func
return decorator | python | def subscribe(self, *events: Union[Event, Pager]) -> Callable:
"""Call a function in response to an event.
If more than one event is given, `func` will be given
as many arguments as there are events.
If the pager calls notify, the decorated function will be called.
Parameters
----------
*event : event or pager
Bowtie event, must have at least one.
Examples
--------
Subscribing a function to multiple events.
>>> from bowtie.control import Dropdown, Slider
>>> app = App()
>>> dd = Dropdown()
>>> slide = Slider()
>>> @app.subscribe(dd.on_change, slide.on_change)
... def callback(dd_item, slide_value):
... pass
>>> @app.subscribe(dd.on_change)
... @app.subscribe(slide.on_change)
... def callback2(value):
... pass
Using the pager to run a callback function.
>>> from bowtie.pager import Pager
>>> app = App()
>>> pager = Pager()
>>> @app.subscribe(pager)
... def callback():
... pass
>>> def scheduledtask():
... pager.notify()
"""
try:
first_event = events[0]
except IndexError:
raise IndexError('Must subscribe to at least one event.')
if len(events) != len(set(events)):
raise ValueError(
'Subscribed to the same event multiple times. All events must be unique.'
)
if len(events) > 1:
# check if we are using any non stateful events
for event in events:
if isinstance(event, Pager):
raise NotStatefulEvent('Pagers must be subscribed by itself.')
if event.getter is None:
raise NotStatefulEvent(
f'{event.uuid}.on_{event.name} is not a stateful event. '
'It must be used alone.'
)
def decorator(func: Callable) -> Callable:
"""Handle three types of events: pages, uploads, and normal events."""
if isinstance(first_event, Pager):
self._pages[first_event] = func
elif first_event.name == 'upload':
if first_event.uuid in self._uploads:
warnings.warn(
('Overwriting function "{func1}" with function '
'"{func2}" for upload object "{obj}".').format(
func1=self._uploads[first_event.uuid],
func2=func.__name__,
obj=COMPONENT_REGISTRY[first_event.uuid]
), Warning)
self._uploads[first_event.uuid] = func
else:
for event in events:
# need to have `events` here to maintain order of arguments
# not sure how to deal with mypy typing errors on events so ignoring
self._subscriptions[event].append((events, func)) # type: ignore
return func
return decorator | [
"def",
"subscribe",
"(",
"self",
",",
"*",
"events",
":",
"Union",
"[",
"Event",
",",
"Pager",
"]",
")",
"->",
"Callable",
":",
"try",
":",
"first_event",
"=",
"events",
"[",
"0",
"]",
"except",
"IndexError",
":",
"raise",
"IndexError",
"(",
"'Must sub... | Call a function in response to an event.
If more than one event is given, `func` will be given
as many arguments as there are events.
If the pager calls notify, the decorated function will be called.
Parameters
----------
*event : event or pager
Bowtie event, must have at least one.
Examples
--------
Subscribing a function to multiple events.
>>> from bowtie.control import Dropdown, Slider
>>> app = App()
>>> dd = Dropdown()
>>> slide = Slider()
>>> @app.subscribe(dd.on_change, slide.on_change)
... def callback(dd_item, slide_value):
... pass
>>> @app.subscribe(dd.on_change)
... @app.subscribe(slide.on_change)
... def callback2(value):
... pass
Using the pager to run a callback function.
>>> from bowtie.pager import Pager
>>> app = App()
>>> pager = Pager()
>>> @app.subscribe(pager)
... def callback():
... pass
>>> def scheduledtask():
... pager.notify() | [
"Call",
"a",
"function",
"in",
"response",
"to",
"an",
"event",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L740-L822 | train | 199,978 |
jwkvam/bowtie | bowtie/_app.py | App.schedule | def schedule(self, seconds: float):
"""Call a function periodically.
Parameters
----------
seconds : float
Minimum interval of function calls.
func : callable
Function to be called.
"""
def wrap(func: Callable):
self._schedules.append(Scheduler(self.app, seconds, func))
return wrap | python | def schedule(self, seconds: float):
"""Call a function periodically.
Parameters
----------
seconds : float
Minimum interval of function calls.
func : callable
Function to be called.
"""
def wrap(func: Callable):
self._schedules.append(Scheduler(self.app, seconds, func))
return wrap | [
"def",
"schedule",
"(",
"self",
",",
"seconds",
":",
"float",
")",
":",
"def",
"wrap",
"(",
"func",
":",
"Callable",
")",
":",
"self",
".",
"_schedules",
".",
"append",
"(",
"Scheduler",
"(",
"self",
".",
"app",
",",
"seconds",
",",
"func",
")",
")... | Call a function periodically.
Parameters
----------
seconds : float
Minimum interval of function calls.
func : callable
Function to be called. | [
"Call",
"a",
"function",
"periodically",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L836-L849 | train | 199,979 |
jwkvam/bowtie | bowtie/_app.py | App._build | def _build(self, notebook: Optional[str] = None) -> None:
"""Compile the Bowtie application."""
if node_version() < _MIN_NODE_VERSION:
raise WebpackError(
f'Webpack requires at least version {_MIN_NODE_VERSION} of Node, '
f'found version {node_version}.'
)
packages = self._write_templates()
for filename in ['package.json', 'webpack.prod.js', 'webpack.dev.js']:
if not (self._build_dir / filename).is_file():
sourcefile = self._package_dir / 'src' / filename
shutil.copy(sourcefile, self._build_dir)
if self._run(['yarn', '--ignore-engines', 'install'], notebook=notebook) > 1:
raise YarnError('Error installing node packages')
if packages:
installed = self._installed_packages()
new_packages = [x for x in packages if x.split('@')[0] not in installed]
if new_packages:
retval = self._run(
['yarn', '--ignore-engines', 'add'] + new_packages, notebook=notebook
)
if retval > 1:
raise YarnError('Error installing node packages')
elif retval == 1:
print('Yarn error but trying to continue build')
retval = self._run([_WEBPACK, '--config', 'webpack.dev.js'], notebook=notebook)
if retval != 0:
raise WebpackError('Error building with webpack') | python | def _build(self, notebook: Optional[str] = None) -> None:
"""Compile the Bowtie application."""
if node_version() < _MIN_NODE_VERSION:
raise WebpackError(
f'Webpack requires at least version {_MIN_NODE_VERSION} of Node, '
f'found version {node_version}.'
)
packages = self._write_templates()
for filename in ['package.json', 'webpack.prod.js', 'webpack.dev.js']:
if not (self._build_dir / filename).is_file():
sourcefile = self._package_dir / 'src' / filename
shutil.copy(sourcefile, self._build_dir)
if self._run(['yarn', '--ignore-engines', 'install'], notebook=notebook) > 1:
raise YarnError('Error installing node packages')
if packages:
installed = self._installed_packages()
new_packages = [x for x in packages if x.split('@')[0] not in installed]
if new_packages:
retval = self._run(
['yarn', '--ignore-engines', 'add'] + new_packages, notebook=notebook
)
if retval > 1:
raise YarnError('Error installing node packages')
elif retval == 1:
print('Yarn error but trying to continue build')
retval = self._run([_WEBPACK, '--config', 'webpack.dev.js'], notebook=notebook)
if retval != 0:
raise WebpackError('Error building with webpack') | [
"def",
"_build",
"(",
"self",
",",
"notebook",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"None",
":",
"if",
"node_version",
"(",
")",
"<",
"_MIN_NODE_VERSION",
":",
"raise",
"WebpackError",
"(",
"f'Webpack requires at least version {_MIN_NODE_VER... | Compile the Bowtie application. | [
"Compile",
"the",
"Bowtie",
"application",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L917-L949 | train | 199,980 |
jwkvam/bowtie | bowtie/_app.py | App._installed_packages | def _installed_packages(self) -> Generator[str, None, None]:
"""Extract installed packages as list from `package.json`."""
with (self._build_dir / 'package.json').open('r') as f:
packages = json.load(f)
yield from packages['dependencies'].keys() | python | def _installed_packages(self) -> Generator[str, None, None]:
"""Extract installed packages as list from `package.json`."""
with (self._build_dir / 'package.json').open('r') as f:
packages = json.load(f)
yield from packages['dependencies'].keys() | [
"def",
"_installed_packages",
"(",
"self",
")",
"->",
"Generator",
"[",
"str",
",",
"None",
",",
"None",
"]",
":",
"with",
"(",
"self",
".",
"_build_dir",
"/",
"'package.json'",
")",
".",
"open",
"(",
"'r'",
")",
"as",
"f",
":",
"packages",
"=",
"jso... | Extract installed packages as list from `package.json`. | [
"Extract",
"installed",
"packages",
"as",
"list",
"from",
"package",
".",
"json",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L1059-L1063 | train | 199,981 |
jwkvam/bowtie | bowtie/_app.py | App._create_jspath | def _create_jspath(self) -> Path:
"""Create the source directory for the build."""
src = self._build_dir / 'bowtiejs'
os.makedirs(src, exist_ok=True)
return src | python | def _create_jspath(self) -> Path:
"""Create the source directory for the build."""
src = self._build_dir / 'bowtiejs'
os.makedirs(src, exist_ok=True)
return src | [
"def",
"_create_jspath",
"(",
"self",
")",
"->",
"Path",
":",
"src",
"=",
"self",
".",
"_build_dir",
"/",
"'bowtiejs'",
"os",
".",
"makedirs",
"(",
"src",
",",
"exist_ok",
"=",
"True",
")",
"return",
"src"
] | Create the source directory for the build. | [
"Create",
"the",
"source",
"directory",
"for",
"the",
"build",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L1065-L1069 | train | 199,982 |
jwkvam/bowtie | bowtie/_app.py | App._run | def _run(self, command: List[str], notebook: Optional[str] = None) -> int:
"""Run command from terminal and notebook and view output from subprocess."""
if notebook is None:
return Popen(command, cwd=self._build_dir).wait()
cmd = Popen(command, cwd=self._build_dir, stdout=PIPE, stderr=STDOUT)
while True:
line = cmd.stdout.readline()
if line == b'' and cmd.poll() is not None:
return cmd.poll()
print(line.decode('utf-8'), end='')
raise Exception() | python | def _run(self, command: List[str], notebook: Optional[str] = None) -> int:
"""Run command from terminal and notebook and view output from subprocess."""
if notebook is None:
return Popen(command, cwd=self._build_dir).wait()
cmd = Popen(command, cwd=self._build_dir, stdout=PIPE, stderr=STDOUT)
while True:
line = cmd.stdout.readline()
if line == b'' and cmd.poll() is not None:
return cmd.poll()
print(line.decode('utf-8'), end='')
raise Exception() | [
"def",
"_run",
"(",
"self",
",",
"command",
":",
"List",
"[",
"str",
"]",
",",
"notebook",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"int",
":",
"if",
"notebook",
"is",
"None",
":",
"return",
"Popen",
"(",
"command",
",",
"cwd",
"... | Run command from terminal and notebook and view output from subprocess. | [
"Run",
"command",
"from",
"terminal",
"and",
"notebook",
"and",
"view",
"output",
"from",
"subprocess",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L1071-L1081 | train | 199,983 |
jwkvam/bowtie | bowtie/auth.py | BasicAuth.before_request | def before_request(self) -> Optional[Response]:
"""Determine if a user is allowed to view this route."""
auth = request.authorization
if not auth or not self._check_auth(auth.username, auth.password):
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'}
)
session['logged_in'] = auth.username
# pylint wants this return statement
return None | python | def before_request(self) -> Optional[Response]:
"""Determine if a user is allowed to view this route."""
auth = request.authorization
if not auth or not self._check_auth(auth.username, auth.password):
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'}
)
session['logged_in'] = auth.username
# pylint wants this return statement
return None | [
"def",
"before_request",
"(",
"self",
")",
"->",
"Optional",
"[",
"Response",
"]",
":",
"auth",
"=",
"request",
".",
"authorization",
"if",
"not",
"auth",
"or",
"not",
"self",
".",
"_check_auth",
"(",
"auth",
".",
"username",
",",
"auth",
".",
"password"... | Determine if a user is allowed to view this route. | [
"Determine",
"if",
"a",
"user",
"is",
"allowed",
"to",
"view",
"this",
"route",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/auth.py#L85-L96 | train | 199,984 |
jwkvam/bowtie | bowtie/_magic.py | load_notebook | def load_notebook(fullname: str):
"""Import a notebook as a module."""
shell = InteractiveShell.instance()
path = fullname
# load the notebook object
with open(path, 'r', encoding='utf-8') as f:
notebook = read(f, 4)
# create the module and add it to sys.modules
mod = types.ModuleType(fullname)
mod.__file__ = path
# mod.__loader__ = self
mod.__dict__['get_ipython'] = get_ipython
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = shell.user_ns
shell.user_ns = mod.__dict__
try:
for cell in notebook.cells:
if cell.cell_type == 'code':
try:
# only run valid python code
ast.parse(cell.source)
except SyntaxError:
continue
try:
# pylint: disable=exec-used
exec(cell.source, mod.__dict__)
except NameError:
print(cell.source)
raise
finally:
shell.user_ns = save_user_ns
return mod | python | def load_notebook(fullname: str):
"""Import a notebook as a module."""
shell = InteractiveShell.instance()
path = fullname
# load the notebook object
with open(path, 'r', encoding='utf-8') as f:
notebook = read(f, 4)
# create the module and add it to sys.modules
mod = types.ModuleType(fullname)
mod.__file__ = path
# mod.__loader__ = self
mod.__dict__['get_ipython'] = get_ipython
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = shell.user_ns
shell.user_ns = mod.__dict__
try:
for cell in notebook.cells:
if cell.cell_type == 'code':
try:
# only run valid python code
ast.parse(cell.source)
except SyntaxError:
continue
try:
# pylint: disable=exec-used
exec(cell.source, mod.__dict__)
except NameError:
print(cell.source)
raise
finally:
shell.user_ns = save_user_ns
return mod | [
"def",
"load_notebook",
"(",
"fullname",
":",
"str",
")",
":",
"shell",
"=",
"InteractiveShell",
".",
"instance",
"(",
")",
"path",
"=",
"fullname",
"# load the notebook object",
"with",
"open",
"(",
"path",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
... | Import a notebook as a module. | [
"Import",
"a",
"notebook",
"as",
"a",
"module",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_magic.py#L50-L87 | train | 199,985 |
jwkvam/bowtie | bowtie/feedback/message.py | _message | def _message(status, content):
"""Send message interface.
Parameters
----------
status : str
The type of message
content : str
"""
event = f'message.{status}'
if flask.has_request_context():
emit(event, dict(data=pack(content)))
else:
sio = flask.current_app.extensions['socketio']
sio.emit(event, dict(data=pack(content)))
eventlet.sleep() | python | def _message(status, content):
"""Send message interface.
Parameters
----------
status : str
The type of message
content : str
"""
event = f'message.{status}'
if flask.has_request_context():
emit(event, dict(data=pack(content)))
else:
sio = flask.current_app.extensions['socketio']
sio.emit(event, dict(data=pack(content)))
eventlet.sleep() | [
"def",
"_message",
"(",
"status",
",",
"content",
")",
":",
"event",
"=",
"f'message.{status}'",
"if",
"flask",
".",
"has_request_context",
"(",
")",
":",
"emit",
"(",
"event",
",",
"dict",
"(",
"data",
"=",
"pack",
"(",
"content",
")",
")",
")",
"else... | Send message interface.
Parameters
----------
status : str
The type of message
content : str | [
"Send",
"message",
"interface",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/feedback/message.py#L16-L32 | train | 199,986 |
jwkvam/bowtie | bowtie/visual.py | Table._make_columns | def _make_columns(columns: List[Union[int, str]]) -> List[Dict]:
"""Transform list of columns into AntTable format."""
return [dict(title=str(c),
dataIndex=str(c),
key=str(c))
for c in columns] | python | def _make_columns(columns: List[Union[int, str]]) -> List[Dict]:
"""Transform list of columns into AntTable format."""
return [dict(title=str(c),
dataIndex=str(c),
key=str(c))
for c in columns] | [
"def",
"_make_columns",
"(",
"columns",
":",
"List",
"[",
"Union",
"[",
"int",
",",
"str",
"]",
"]",
")",
"->",
"List",
"[",
"Dict",
"]",
":",
"return",
"[",
"dict",
"(",
"title",
"=",
"str",
"(",
"c",
")",
",",
"dataIndex",
"=",
"str",
"(",
"c... | Transform list of columns into AntTable format. | [
"Transform",
"list",
"of",
"columns",
"into",
"AntTable",
"format",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/visual.py#L67-L72 | train | 199,987 |
jwkvam/bowtie | bowtie/visual.py | Table._make_data | def _make_data(data) -> Tuple[List[Dict], List[Dict]]:
"""Transform table data into JSON."""
jsdata = []
for idx, row in data.iterrows():
row.index = row.index.astype(str)
rdict = row.to_dict()
rdict.update(dict(key=str(idx)))
jsdata.append(rdict)
return jsdata, Table._make_columns(data.columns) | python | def _make_data(data) -> Tuple[List[Dict], List[Dict]]:
"""Transform table data into JSON."""
jsdata = []
for idx, row in data.iterrows():
row.index = row.index.astype(str)
rdict = row.to_dict()
rdict.update(dict(key=str(idx)))
jsdata.append(rdict)
return jsdata, Table._make_columns(data.columns) | [
"def",
"_make_data",
"(",
"data",
")",
"->",
"Tuple",
"[",
"List",
"[",
"Dict",
"]",
",",
"List",
"[",
"Dict",
"]",
"]",
":",
"jsdata",
"=",
"[",
"]",
"for",
"idx",
",",
"row",
"in",
"data",
".",
"iterrows",
"(",
")",
":",
"row",
".",
"index",
... | Transform table data into JSON. | [
"Transform",
"table",
"data",
"into",
"JSON",
"."
] | c494850671ac805bf186fbf2bdb07d2a34ae876d | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/visual.py#L75-L84 | train | 199,988 |
CleanCut/green | green/loader.py | toParallelTargets | def toParallelTargets(suite, targets):
"""
Produce a list of targets which should be tested in parallel.
For the most part this will be a list of test modules. The exception is
when a dotted name representing something more granular than a module
was input (like an individal test case or test method)
"""
targets = filter(lambda x: x != '.', targets)
# First, convert the suite to a proto test list - proto tests nicely
# parse things like the fully dotted name of the test and the
# finest-grained module it belongs to, which simplifies our job.
proto_test_list = toProtoTestList(suite)
# Extract a list of the modules that all of the discovered tests are in
modules = set([x.module for x in proto_test_list])
# Get the list of user-specified targets that are NOT modules
non_module_targets = []
for target in targets:
if not list(filter(None, [target in x for x in modules])):
non_module_targets.append(target)
# Main loop -- iterating through all loaded test methods
parallel_targets = []
for test in proto_test_list:
found = False
for target in non_module_targets:
# target is a dotted name of either a test case or test method
# here test.dotted name is always a dotted name of a method
if (target in test.dotted_name):
if target not in parallel_targets:
# Explicitly specified targets get their own entry to
# run parallel to everything else
parallel_targets.append(target)
found = True
break
if found:
continue
# This test does not appear to be part of a specified target, so
# its entire module must have been discovered, so just add the
# whole module to the list if we haven't already.
if test.module not in parallel_targets:
parallel_targets.append(test.module)
return parallel_targets | python | def toParallelTargets(suite, targets):
"""
Produce a list of targets which should be tested in parallel.
For the most part this will be a list of test modules. The exception is
when a dotted name representing something more granular than a module
was input (like an individal test case or test method)
"""
targets = filter(lambda x: x != '.', targets)
# First, convert the suite to a proto test list - proto tests nicely
# parse things like the fully dotted name of the test and the
# finest-grained module it belongs to, which simplifies our job.
proto_test_list = toProtoTestList(suite)
# Extract a list of the modules that all of the discovered tests are in
modules = set([x.module for x in proto_test_list])
# Get the list of user-specified targets that are NOT modules
non_module_targets = []
for target in targets:
if not list(filter(None, [target in x for x in modules])):
non_module_targets.append(target)
# Main loop -- iterating through all loaded test methods
parallel_targets = []
for test in proto_test_list:
found = False
for target in non_module_targets:
# target is a dotted name of either a test case or test method
# here test.dotted name is always a dotted name of a method
if (target in test.dotted_name):
if target not in parallel_targets:
# Explicitly specified targets get their own entry to
# run parallel to everything else
parallel_targets.append(target)
found = True
break
if found:
continue
# This test does not appear to be part of a specified target, so
# its entire module must have been discovered, so just add the
# whole module to the list if we haven't already.
if test.module not in parallel_targets:
parallel_targets.append(test.module)
return parallel_targets | [
"def",
"toParallelTargets",
"(",
"suite",
",",
"targets",
")",
":",
"targets",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
"!=",
"'.'",
",",
"targets",
")",
"# First, convert the suite to a proto test list - proto tests nicely",
"# parse things like the fully dotted name... | Produce a list of targets which should be tested in parallel.
For the most part this will be a list of test modules. The exception is
when a dotted name representing something more granular than a module
was input (like an individal test case or test method) | [
"Produce",
"a",
"list",
"of",
"targets",
"which",
"should",
"be",
"tested",
"in",
"parallel",
"."
] | 6434515302472363b7d10135be76ed8cd3934d80 | https://github.com/CleanCut/green/blob/6434515302472363b7d10135be76ed8cd3934d80/green/loader.py#L333-L375 | train | 199,989 |
CleanCut/green | green/config.py | getConfig | def getConfig(filepath=None): # pragma: no cover
"""
Get the Green config file settings.
All available config files are read. If settings are in multiple configs,
the last value encountered wins. Values specified on the command-line take
precedence over all config file settings.
Returns: A ConfigParser object.
"""
parser = configparser.ConfigParser()
filepaths = []
# Lowest priority goes first in the list
home = os.getenv("HOME")
if home:
default_filepath = os.path.join(home, ".green")
if os.path.isfile(default_filepath):
filepaths.append(default_filepath)
# Low priority
env_filepath = os.getenv("GREEN_CONFIG")
if env_filepath and os.path.isfile(env_filepath):
filepaths.append(env_filepath)
# Medium priority
for cfg_file in ("setup.cfg", ".green"):
cwd_filepath = os.path.join(os.getcwd(), cfg_file)
if os.path.isfile(cwd_filepath):
filepaths.append(cwd_filepath)
# High priority
if filepath and os.path.isfile(filepath):
filepaths.append(filepath)
if filepaths:
global files_loaded
files_loaded = filepaths
# Python 3 has parser.read_file(iterator) while Python2 has
# parser.readfp(obj_with_readline)
read_func = getattr(parser, 'read_file', getattr(parser, 'readfp'))
for filepath in filepaths:
# Users are expected to put a [green] section
# only if they use setup.cfg
if filepath.endswith('setup.cfg'):
with open(filepath) as f:
read_func(f)
else:
read_func(ConfigFile(filepath))
return parser | python | def getConfig(filepath=None): # pragma: no cover
"""
Get the Green config file settings.
All available config files are read. If settings are in multiple configs,
the last value encountered wins. Values specified on the command-line take
precedence over all config file settings.
Returns: A ConfigParser object.
"""
parser = configparser.ConfigParser()
filepaths = []
# Lowest priority goes first in the list
home = os.getenv("HOME")
if home:
default_filepath = os.path.join(home, ".green")
if os.path.isfile(default_filepath):
filepaths.append(default_filepath)
# Low priority
env_filepath = os.getenv("GREEN_CONFIG")
if env_filepath and os.path.isfile(env_filepath):
filepaths.append(env_filepath)
# Medium priority
for cfg_file in ("setup.cfg", ".green"):
cwd_filepath = os.path.join(os.getcwd(), cfg_file)
if os.path.isfile(cwd_filepath):
filepaths.append(cwd_filepath)
# High priority
if filepath and os.path.isfile(filepath):
filepaths.append(filepath)
if filepaths:
global files_loaded
files_loaded = filepaths
# Python 3 has parser.read_file(iterator) while Python2 has
# parser.readfp(obj_with_readline)
read_func = getattr(parser, 'read_file', getattr(parser, 'readfp'))
for filepath in filepaths:
# Users are expected to put a [green] section
# only if they use setup.cfg
if filepath.endswith('setup.cfg'):
with open(filepath) as f:
read_func(f)
else:
read_func(ConfigFile(filepath))
return parser | [
"def",
"getConfig",
"(",
"filepath",
"=",
"None",
")",
":",
"# pragma: no cover",
"parser",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"filepaths",
"=",
"[",
"]",
"# Lowest priority goes first in the list",
"home",
"=",
"os",
".",
"getenv",
"(",
"\"HOME... | Get the Green config file settings.
All available config files are read. If settings are in multiple configs,
the last value encountered wins. Values specified on the command-line take
precedence over all config file settings.
Returns: A ConfigParser object. | [
"Get",
"the",
"Green",
"config",
"file",
"settings",
"."
] | 6434515302472363b7d10135be76ed8cd3934d80 | https://github.com/CleanCut/green/blob/6434515302472363b7d10135be76ed8cd3934d80/green/config.py#L366-L416 | train | 199,990 |
CleanCut/green | green/output.py | debug | def debug(message, level=1):
"""
So we can tune how much debug output we get when we turn it on.
"""
if level <= debug_level:
logging.debug(' ' * (level - 1) * 2 + str(message)) | python | def debug(message, level=1):
"""
So we can tune how much debug output we get when we turn it on.
"""
if level <= debug_level:
logging.debug(' ' * (level - 1) * 2 + str(message)) | [
"def",
"debug",
"(",
"message",
",",
"level",
"=",
"1",
")",
":",
"if",
"level",
"<=",
"debug_level",
":",
"logging",
".",
"debug",
"(",
"' '",
"*",
"(",
"level",
"-",
"1",
")",
"*",
"2",
"+",
"str",
"(",
"message",
")",
")"
] | So we can tune how much debug output we get when we turn it on. | [
"So",
"we",
"can",
"tune",
"how",
"much",
"debug",
"output",
"we",
"get",
"when",
"we",
"turn",
"it",
"on",
"."
] | 6434515302472363b7d10135be76ed8cd3934d80 | https://github.com/CleanCut/green/blob/6434515302472363b7d10135be76ed8cd3934d80/green/output.py#L22-L27 | train | 199,991 |
CleanCut/green | green/runner.py | run | def run(suite, stream, args, testing=False):
"""
Run the given test case or test suite with the specified arguments.
Any args.stream passed in will be wrapped in a GreenStream
"""
if not issubclass(GreenStream, type(stream)):
stream = GreenStream(stream, disable_windows=args.disable_windows,
disable_unidecode=args.disable_unidecode)
result = GreenTestResult(args, stream)
# Note: Catching SIGINT isn't supported by Python on windows (python
# "WONTFIX" issue 18040)
installHandler()
registerResult(result)
with warnings.catch_warnings():
if args.warnings: # pragma: no cover
# if args.warnings is set, use it to filter all the warnings
warnings.simplefilter(args.warnings)
# if the filter is 'default' or 'always', special-case the
# warnings from the deprecated unittest methods to show them
# no more than once per module, because they can be fairly
# noisy. The -Wd and -Wa flags can be used to bypass this
# only when args.warnings is None.
if args.warnings in ['default', 'always']:
warnings.filterwarnings('module',
category=DeprecationWarning,
message='Please use assert\w+ instead.')
result.startTestRun()
pool = LoggingDaemonlessPool(processes=args.processes or None,
initializer=InitializerOrFinalizer(args.initializer),
finalizer=InitializerOrFinalizer(args.finalizer))
manager = multiprocessing.Manager()
targets = [(target, manager.Queue())
for target in toParallelTargets(suite, args.targets)]
if targets:
for index, (target, queue) in enumerate(targets):
if args.run_coverage:
coverage_number = index + 1
else:
coverage_number = None
debug("Sending {} to runner {}".format(target, poolRunner))
pool.apply_async(
poolRunner,
(target, queue, coverage_number, args.omit_patterns, args.cov_config_file))
pool.close()
for target, queue in targets:
abort = False
while True:
msg = queue.get()
# Sentinel value, we're done
if not msg:
break
else:
# Result guaranteed after this message, we're
# currently waiting on this test, so print out
# the white 'processing...' version of the output
result.startTest(msg)
proto_test_result = queue.get()
result.addProtoTestResult(proto_test_result)
if result.shouldStop:
abort = True
break
if abort:
break
pool.close()
pool.join()
result.stopTestRun()
removeResult(result)
return result | python | def run(suite, stream, args, testing=False):
"""
Run the given test case or test suite with the specified arguments.
Any args.stream passed in will be wrapped in a GreenStream
"""
if not issubclass(GreenStream, type(stream)):
stream = GreenStream(stream, disable_windows=args.disable_windows,
disable_unidecode=args.disable_unidecode)
result = GreenTestResult(args, stream)
# Note: Catching SIGINT isn't supported by Python on windows (python
# "WONTFIX" issue 18040)
installHandler()
registerResult(result)
with warnings.catch_warnings():
if args.warnings: # pragma: no cover
# if args.warnings is set, use it to filter all the warnings
warnings.simplefilter(args.warnings)
# if the filter is 'default' or 'always', special-case the
# warnings from the deprecated unittest methods to show them
# no more than once per module, because they can be fairly
# noisy. The -Wd and -Wa flags can be used to bypass this
# only when args.warnings is None.
if args.warnings in ['default', 'always']:
warnings.filterwarnings('module',
category=DeprecationWarning,
message='Please use assert\w+ instead.')
result.startTestRun()
pool = LoggingDaemonlessPool(processes=args.processes or None,
initializer=InitializerOrFinalizer(args.initializer),
finalizer=InitializerOrFinalizer(args.finalizer))
manager = multiprocessing.Manager()
targets = [(target, manager.Queue())
for target in toParallelTargets(suite, args.targets)]
if targets:
for index, (target, queue) in enumerate(targets):
if args.run_coverage:
coverage_number = index + 1
else:
coverage_number = None
debug("Sending {} to runner {}".format(target, poolRunner))
pool.apply_async(
poolRunner,
(target, queue, coverage_number, args.omit_patterns, args.cov_config_file))
pool.close()
for target, queue in targets:
abort = False
while True:
msg = queue.get()
# Sentinel value, we're done
if not msg:
break
else:
# Result guaranteed after this message, we're
# currently waiting on this test, so print out
# the white 'processing...' version of the output
result.startTest(msg)
proto_test_result = queue.get()
result.addProtoTestResult(proto_test_result)
if result.shouldStop:
abort = True
break
if abort:
break
pool.close()
pool.join()
result.stopTestRun()
removeResult(result)
return result | [
"def",
"run",
"(",
"suite",
",",
"stream",
",",
"args",
",",
"testing",
"=",
"False",
")",
":",
"if",
"not",
"issubclass",
"(",
"GreenStream",
",",
"type",
"(",
"stream",
")",
")",
":",
"stream",
"=",
"GreenStream",
"(",
"stream",
",",
"disable_windows... | Run the given test case or test suite with the specified arguments.
Any args.stream passed in will be wrapped in a GreenStream | [
"Run",
"the",
"given",
"test",
"case",
"or",
"test",
"suite",
"with",
"the",
"specified",
"arguments",
"."
] | 6434515302472363b7d10135be76ed8cd3934d80 | https://github.com/CleanCut/green/blob/6434515302472363b7d10135be76ed8cd3934d80/green/runner.py#L52-L132 | train | 199,992 |
locuslab/qpth | qpth/solvers/pdipm/batch.py | solve_kkt_ir | def solve_kkt_ir(Q, D, G, A, rx, rs, rz, ry, niter=1):
"""Inefficient iterative refinement."""
nineq, nz, neq, nBatch = get_sizes(G, A)
eps = 1e-7
Q_tilde = Q + eps * torch.eye(nz).type_as(Q).repeat(nBatch, 1, 1)
D_tilde = D + eps * torch.eye(nineq).type_as(Q).repeat(nBatch, 1, 1)
dx, ds, dz, dy = factor_solve_kkt_reg(
Q_tilde, D_tilde, G, A, rx, rs, rz, ry, eps)
res = kkt_resid_reg(Q, D, G, A, eps,
dx, ds, dz, dy, rx, rs, rz, ry)
resx, ress, resz, resy = res
res = resx
for k in range(niter):
ddx, dds, ddz, ddy = factor_solve_kkt_reg(Q_tilde, D_tilde, G, A, -resx, -ress, -resz,
-resy if resy is not None else None,
eps)
dx, ds, dz, dy = [v + dv if v is not None else None
for v, dv in zip((dx, ds, dz, dy), (ddx, dds, ddz, ddy))]
res = kkt_resid_reg(Q, D, G, A, eps,
dx, ds, dz, dy, rx, rs, rz, ry)
resx, ress, resz, resy = res
# res = torch.cat(resx)
res = resx
return dx, ds, dz, dy | python | def solve_kkt_ir(Q, D, G, A, rx, rs, rz, ry, niter=1):
"""Inefficient iterative refinement."""
nineq, nz, neq, nBatch = get_sizes(G, A)
eps = 1e-7
Q_tilde = Q + eps * torch.eye(nz).type_as(Q).repeat(nBatch, 1, 1)
D_tilde = D + eps * torch.eye(nineq).type_as(Q).repeat(nBatch, 1, 1)
dx, ds, dz, dy = factor_solve_kkt_reg(
Q_tilde, D_tilde, G, A, rx, rs, rz, ry, eps)
res = kkt_resid_reg(Q, D, G, A, eps,
dx, ds, dz, dy, rx, rs, rz, ry)
resx, ress, resz, resy = res
res = resx
for k in range(niter):
ddx, dds, ddz, ddy = factor_solve_kkt_reg(Q_tilde, D_tilde, G, A, -resx, -ress, -resz,
-resy if resy is not None else None,
eps)
dx, ds, dz, dy = [v + dv if v is not None else None
for v, dv in zip((dx, ds, dz, dy), (ddx, dds, ddz, ddy))]
res = kkt_resid_reg(Q, D, G, A, eps,
dx, ds, dz, dy, rx, rs, rz, ry)
resx, ress, resz, resy = res
# res = torch.cat(resx)
res = resx
return dx, ds, dz, dy | [
"def",
"solve_kkt_ir",
"(",
"Q",
",",
"D",
",",
"G",
",",
"A",
",",
"rx",
",",
"rs",
",",
"rz",
",",
"ry",
",",
"niter",
"=",
"1",
")",
":",
"nineq",
",",
"nz",
",",
"neq",
",",
"nBatch",
"=",
"get_sizes",
"(",
"G",
",",
"A",
")",
"eps",
... | Inefficient iterative refinement. | [
"Inefficient",
"iterative",
"refinement",
"."
] | cc4d49808d6b4a8e87a8b75beffbb7f4ba81b2e5 | https://github.com/locuslab/qpth/blob/cc4d49808d6b4a8e87a8b75beffbb7f4ba81b2e5/qpth/solvers/pdipm/batch.py#L249-L275 | train | 199,993 |
pnpnpn/timeout-decorator | timeout_decorator/timeout_decorator.py | timeout | def timeout(seconds=None, use_signals=True, timeout_exception=TimeoutError, exception_message=None):
"""Add a timeout parameter to a function and return it.
:param seconds: optional time limit in seconds or fractions of a second. If None is passed, no timeout is applied.
This adds some flexibility to the usage: you can disable timing out depending on the settings.
:type seconds: float
:param use_signals: flag indicating whether signals should be used for timing function out or the multiprocessing
When using multiprocessing, timeout granularity is limited to 10ths of a second.
:type use_signals: bool
:raises: TimeoutError if time limit is reached
It is illegal to pass anything other than a function as the first
parameter. The function is wrapped and returned to the caller.
"""
def decorate(function):
if not seconds:
return function
if use_signals:
def handler(signum, frame):
_raise_exception(timeout_exception, exception_message)
@wraps(function)
def new_function(*args, **kwargs):
new_seconds = kwargs.pop('timeout', seconds)
if new_seconds:
old = signal.signal(signal.SIGALRM, handler)
signal.setitimer(signal.ITIMER_REAL, new_seconds)
try:
return function(*args, **kwargs)
finally:
if new_seconds:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old)
return new_function
else:
@wraps(function)
def new_function(*args, **kwargs):
timeout_wrapper = _Timeout(function, timeout_exception, exception_message, seconds)
return timeout_wrapper(*args, **kwargs)
return new_function
return decorate | python | def timeout(seconds=None, use_signals=True, timeout_exception=TimeoutError, exception_message=None):
"""Add a timeout parameter to a function and return it.
:param seconds: optional time limit in seconds or fractions of a second. If None is passed, no timeout is applied.
This adds some flexibility to the usage: you can disable timing out depending on the settings.
:type seconds: float
:param use_signals: flag indicating whether signals should be used for timing function out or the multiprocessing
When using multiprocessing, timeout granularity is limited to 10ths of a second.
:type use_signals: bool
:raises: TimeoutError if time limit is reached
It is illegal to pass anything other than a function as the first
parameter. The function is wrapped and returned to the caller.
"""
def decorate(function):
if not seconds:
return function
if use_signals:
def handler(signum, frame):
_raise_exception(timeout_exception, exception_message)
@wraps(function)
def new_function(*args, **kwargs):
new_seconds = kwargs.pop('timeout', seconds)
if new_seconds:
old = signal.signal(signal.SIGALRM, handler)
signal.setitimer(signal.ITIMER_REAL, new_seconds)
try:
return function(*args, **kwargs)
finally:
if new_seconds:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old)
return new_function
else:
@wraps(function)
def new_function(*args, **kwargs):
timeout_wrapper = _Timeout(function, timeout_exception, exception_message, seconds)
return timeout_wrapper(*args, **kwargs)
return new_function
return decorate | [
"def",
"timeout",
"(",
"seconds",
"=",
"None",
",",
"use_signals",
"=",
"True",
",",
"timeout_exception",
"=",
"TimeoutError",
",",
"exception_message",
"=",
"None",
")",
":",
"def",
"decorate",
"(",
"function",
")",
":",
"if",
"not",
"seconds",
":",
"retu... | Add a timeout parameter to a function and return it.
:param seconds: optional time limit in seconds or fractions of a second. If None is passed, no timeout is applied.
This adds some flexibility to the usage: you can disable timing out depending on the settings.
:type seconds: float
:param use_signals: flag indicating whether signals should be used for timing function out or the multiprocessing
When using multiprocessing, timeout granularity is limited to 10ths of a second.
:type use_signals: bool
:raises: TimeoutError if time limit is reached
It is illegal to pass anything other than a function as the first
parameter. The function is wrapped and returned to the caller. | [
"Add",
"a",
"timeout",
"parameter",
"to",
"a",
"function",
"and",
"return",
"it",
"."
] | 3c4bad7f66e1109ccdcb79c2cb62cd669b7666d8 | https://github.com/pnpnpn/timeout-decorator/blob/3c4bad7f66e1109ccdcb79c2cb62cd669b7666d8/timeout_decorator/timeout_decorator.py#L50-L94 | train | 199,994 |
pnpnpn/timeout-decorator | timeout_decorator/timeout_decorator.py | _target | def _target(queue, function, *args, **kwargs):
"""Run a function with arguments and return output via a queue.
This is a helper function for the Process created in _Timeout. It runs
the function with positional arguments and keyword arguments and then
returns the function's output by way of a queue. If an exception gets
raised, it is returned to _Timeout to be raised by the value property.
"""
try:
queue.put((True, function(*args, **kwargs)))
except:
queue.put((False, sys.exc_info()[1])) | python | def _target(queue, function, *args, **kwargs):
"""Run a function with arguments and return output via a queue.
This is a helper function for the Process created in _Timeout. It runs
the function with positional arguments and keyword arguments and then
returns the function's output by way of a queue. If an exception gets
raised, it is returned to _Timeout to be raised by the value property.
"""
try:
queue.put((True, function(*args, **kwargs)))
except:
queue.put((False, sys.exc_info()[1])) | [
"def",
"_target",
"(",
"queue",
",",
"function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"queue",
".",
"put",
"(",
"(",
"True",
",",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
")",
"except",
":",
... | Run a function with arguments and return output via a queue.
This is a helper function for the Process created in _Timeout. It runs
the function with positional arguments and keyword arguments and then
returns the function's output by way of a queue. If an exception gets
raised, it is returned to _Timeout to be raised by the value property. | [
"Run",
"a",
"function",
"with",
"arguments",
"and",
"return",
"output",
"via",
"a",
"queue",
"."
] | 3c4bad7f66e1109ccdcb79c2cb62cd669b7666d8 | https://github.com/pnpnpn/timeout-decorator/blob/3c4bad7f66e1109ccdcb79c2cb62cd669b7666d8/timeout_decorator/timeout_decorator.py#L97-L108 | train | 199,995 |
pnpnpn/timeout-decorator | timeout_decorator/timeout_decorator.py | _Timeout.cancel | def cancel(self):
"""Terminate any possible execution of the embedded function."""
if self.__process.is_alive():
self.__process.terminate()
_raise_exception(self.__timeout_exception, self.__exception_message) | python | def cancel(self):
"""Terminate any possible execution of the embedded function."""
if self.__process.is_alive():
self.__process.terminate()
_raise_exception(self.__timeout_exception, self.__exception_message) | [
"def",
"cancel",
"(",
"self",
")",
":",
"if",
"self",
".",
"__process",
".",
"is_alive",
"(",
")",
":",
"self",
".",
"__process",
".",
"terminate",
"(",
")",
"_raise_exception",
"(",
"self",
".",
"__timeout_exception",
",",
"self",
".",
"__exception_messag... | Terminate any possible execution of the embedded function. | [
"Terminate",
"any",
"possible",
"execution",
"of",
"the",
"embedded",
"function",
"."
] | 3c4bad7f66e1109ccdcb79c2cb62cd669b7666d8 | https://github.com/pnpnpn/timeout-decorator/blob/3c4bad7f66e1109ccdcb79c2cb62cd669b7666d8/timeout_decorator/timeout_decorator.py#L152-L157 | train | 199,996 |
pnpnpn/timeout-decorator | timeout_decorator/timeout_decorator.py | _Timeout.ready | def ready(self):
"""Read-only property indicating status of "value" property."""
if self.__timeout < time.time():
self.cancel()
return self.__queue.full() and not self.__queue.empty() | python | def ready(self):
"""Read-only property indicating status of "value" property."""
if self.__timeout < time.time():
self.cancel()
return self.__queue.full() and not self.__queue.empty() | [
"def",
"ready",
"(",
"self",
")",
":",
"if",
"self",
".",
"__timeout",
"<",
"time",
".",
"time",
"(",
")",
":",
"self",
".",
"cancel",
"(",
")",
"return",
"self",
".",
"__queue",
".",
"full",
"(",
")",
"and",
"not",
"self",
".",
"__queue",
".",
... | Read-only property indicating status of "value" property. | [
"Read",
"-",
"only",
"property",
"indicating",
"status",
"of",
"value",
"property",
"."
] | 3c4bad7f66e1109ccdcb79c2cb62cd669b7666d8 | https://github.com/pnpnpn/timeout-decorator/blob/3c4bad7f66e1109ccdcb79c2cb62cd669b7666d8/timeout_decorator/timeout_decorator.py#L160-L164 | train | 199,997 |
pnpnpn/timeout-decorator | timeout_decorator/timeout_decorator.py | _Timeout.value | def value(self):
"""Read-only property containing data returned from function."""
if self.ready is True:
flag, load = self.__queue.get()
if flag:
return load
raise load | python | def value(self):
"""Read-only property containing data returned from function."""
if self.ready is True:
flag, load = self.__queue.get()
if flag:
return load
raise load | [
"def",
"value",
"(",
"self",
")",
":",
"if",
"self",
".",
"ready",
"is",
"True",
":",
"flag",
",",
"load",
"=",
"self",
".",
"__queue",
".",
"get",
"(",
")",
"if",
"flag",
":",
"return",
"load",
"raise",
"load"
] | Read-only property containing data returned from function. | [
"Read",
"-",
"only",
"property",
"containing",
"data",
"returned",
"from",
"function",
"."
] | 3c4bad7f66e1109ccdcb79c2cb62cd669b7666d8 | https://github.com/pnpnpn/timeout-decorator/blob/3c4bad7f66e1109ccdcb79c2cb62cd669b7666d8/timeout_decorator/timeout_decorator.py#L167-L173 | train | 199,998 |
mathandy/svgpathtools | svgpathtools/path.py | bbox2path | def bbox2path(xmin, xmax, ymin, ymax):
"""Converts a bounding box 4-tuple to a Path object."""
b = Line(xmin + 1j*ymin, xmax + 1j*ymin)
t = Line(xmin + 1j*ymax, xmax + 1j*ymax)
r = Line(xmax + 1j*ymin, xmax + 1j*ymax)
l = Line(xmin + 1j*ymin, xmin + 1j*ymax)
return Path(b, r, t.reversed(), l.reversed()) | python | def bbox2path(xmin, xmax, ymin, ymax):
"""Converts a bounding box 4-tuple to a Path object."""
b = Line(xmin + 1j*ymin, xmax + 1j*ymin)
t = Line(xmin + 1j*ymax, xmax + 1j*ymax)
r = Line(xmax + 1j*ymin, xmax + 1j*ymax)
l = Line(xmin + 1j*ymin, xmin + 1j*ymax)
return Path(b, r, t.reversed(), l.reversed()) | [
"def",
"bbox2path",
"(",
"xmin",
",",
"xmax",
",",
"ymin",
",",
"ymax",
")",
":",
"b",
"=",
"Line",
"(",
"xmin",
"+",
"1j",
"*",
"ymin",
",",
"xmax",
"+",
"1j",
"*",
"ymin",
")",
"t",
"=",
"Line",
"(",
"xmin",
"+",
"1j",
"*",
"ymax",
",",
"... | Converts a bounding box 4-tuple to a Path object. | [
"Converts",
"a",
"bounding",
"box",
"4",
"-",
"tuple",
"to",
"a",
"Path",
"object",
"."
] | fd7348a1dfd88b65ea61da02325c6605aedf8c4f | https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/path.py#L92-L98 | train | 199,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.