repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
ChrisBeaumont/smother | smother/python.py | PythonFile.context_range | python | def context_range(self, context):
if not context.startswith(self.prefix):
context = self.prefix + '.' + context
lo = hi = None
for idx, line_context in enumerate(self.lines, 1):
# context is hierarchical -- context spans itself
# and any suffix.
if line_context.startswith(context):
lo = lo or idx
hi = idx
if lo is None:
raise ValueError("Context %s does not exist in file %s" %
(context, self.filename))
return lo, hi + 1 | Return the 1-offset, right-open range of lines spanned by
a particular context name.
Parameters
----------
context : str
Raises
------
ValueError, if context is not present in the file. | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L179-L208 | null | class PythonFile(object):
"""
A file of python source.
"""
def __init__(self, filename, source=None, prefix=None):
"""
Parameters
----------
filename : str
The path to the file
source : str (optional)
The contents of the file. Will be read from `filename`
if not provided.
prefix : str (optional)
Name to give to the outermost context in the file.
If not provided, will be the "." form of filename
(ie a/b/c.py -> a.b.c)
"""
self.filename = filename
if prefix is None:
self.prefix = self._module_name(filename)
else:
self.prefix = prefix
if source is None:
with open(filename) as infile:
self.source = infile.read()
else:
self.source = source
try:
self.ast = parse(self.source)
except SyntaxError:
raise InvalidPythonFile(self.filename)
visitor = Visitor(prefix=self.prefix)
visitor.visit(self.ast)
self.lines = visitor.lines
@staticmethod
def _module_name(filename):
"""
Try to find a module name for a file path
by stripping off a prefix found in sys.modules.
"""
absfile = os.path.abspath(filename)
match = filename
for base in [''] + sys.path:
base = os.path.abspath(base)
if absfile.startswith(base):
match = absfile[len(base):]
break
return SUFFIX_RE.sub('', match).lstrip('/').replace('/', '.')
@classmethod
def from_modulename(cls, module_name):
"""
Build a PythonFile given a dotted module name like a.b.c
"""
# XXX make this more robust (pyc files? zip archives? etc)
slug = module_name.replace('.', '/')
paths = [slug + '.py', slug + '/__init__.py']
# always search from current directory
for base in [''] + sys.path:
for path in paths:
fullpath = os.path.join(base, path)
if os.path.exists(fullpath):
return cls(fullpath, prefix=module_name)
else:
raise ValueError("Module not found: %s" % module_name)
@property
def line_count(self):
return len(self.lines)
def context(self, line):
"""
Return the context for a given 1-offset line number.
"""
# XXX due to a limitation in Visitor,
# non-python code after the last python code
# in a file is not added to self.lines, so we
# have to guard against IndexErrors.
idx = line - 1
if idx >= len(self.lines):
return self.prefix
return self.lines[idx]
|
ChrisBeaumont/smother | smother/python.py | PythonFile.context | python | def context(self, line):
# XXX due to a limitation in Visitor,
# non-python code after the last python code
# in a file is not added to self.lines, so we
# have to guard against IndexErrors.
idx = line - 1
if idx >= len(self.lines):
return self.prefix
return self.lines[idx] | Return the context for a given 1-offset line number. | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L210-L221 | null | class PythonFile(object):
"""
A file of python source.
"""
def __init__(self, filename, source=None, prefix=None):
"""
Parameters
----------
filename : str
The path to the file
source : str (optional)
The contents of the file. Will be read from `filename`
if not provided.
prefix : str (optional)
Name to give to the outermost context in the file.
If not provided, will be the "." form of filename
(ie a/b/c.py -> a.b.c)
"""
self.filename = filename
if prefix is None:
self.prefix = self._module_name(filename)
else:
self.prefix = prefix
if source is None:
with open(filename) as infile:
self.source = infile.read()
else:
self.source = source
try:
self.ast = parse(self.source)
except SyntaxError:
raise InvalidPythonFile(self.filename)
visitor = Visitor(prefix=self.prefix)
visitor.visit(self.ast)
self.lines = visitor.lines
@staticmethod
def _module_name(filename):
"""
Try to find a module name for a file path
by stripping off a prefix found in sys.modules.
"""
absfile = os.path.abspath(filename)
match = filename
for base in [''] + sys.path:
base = os.path.abspath(base)
if absfile.startswith(base):
match = absfile[len(base):]
break
return SUFFIX_RE.sub('', match).lstrip('/').replace('/', '.')
@classmethod
def from_modulename(cls, module_name):
"""
Build a PythonFile given a dotted module name like a.b.c
"""
# XXX make this more robust (pyc files? zip archives? etc)
slug = module_name.replace('.', '/')
paths = [slug + '.py', slug + '/__init__.py']
# always search from current directory
for base in [''] + sys.path:
for path in paths:
fullpath = os.path.join(base, path)
if os.path.exists(fullpath):
return cls(fullpath, prefix=module_name)
else:
raise ValueError("Module not found: %s" % module_name)
@property
def line_count(self):
return len(self.lines)
def context_range(self, context):
"""
Return the 1-offset, right-open range of lines spanned by
a particular context name.
Parameters
----------
context : str
Raises
------
ValueError, if context is not present in the file.
"""
if not context.startswith(self.prefix):
context = self.prefix + '.' + context
lo = hi = None
for idx, line_context in enumerate(self.lines, 1):
# context is hierarchical -- context spans itself
# and any suffix.
if line_context.startswith(context):
lo = lo or idx
hi = idx
if lo is None:
raise ValueError("Context %s does not exist in file %s" %
(context, self.filename))
return lo, hi + 1
|
ChrisBeaumont/smother | smother/control.py | Smother.write | python | def write(self, file_or_path, append=False, timeout=10):
if isinstance(file_or_path, six.string_types):
if self.coverage:
file_or_path = get_smother_filename(
file_or_path, self.coverage.config.parallel)
outfile = Lock(
file_or_path, mode='a+',
timeout=timeout,
fail_when_locked=False
)
else:
outfile = noclose(file_or_path)
with outfile as fh:
if append:
fh.seek(0)
try:
other = Smother.load(fh)
except ValueError: # no smother data
pass
else:
self |= other
fh.seek(0)
fh.truncate() # required to overwrite data in a+ mode
json.dump(self.data, fh) | Write Smother results to a file.
Parameters
----------
fiile_or_path : str
Path to write report to
append : bool
If True, read an existing smother report from `outpath`
and combine it with this file before writing.
timeout : int
Time in seconds to wait to acquire a file lock, before
raising an error.
Note
----
Append mode is atomic when file_or_path is a path,
and can be safely run in a multithreaded or
multiprocess test environment.
When using `parallel_mode`, file_or_path is given a unique
suffix based on the machine name and process id. | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/control.py#L87-L137 | [
"def get_smother_filename(base_name, parallel_mode):\n if parallel_mode:\n suffix = \"%s.%s.%06d\" % (\n socket.gethostname(), os.getpid(),\n random.randint(0, 999999)\n )\n base_name += \".\" + suffix\n return base_name\n",
"def load(cls, file_or_path):\n if isinstance(file_or_path, six.string_types):\n infile = open(file_or_path)\n else:\n infile = noclose(file_or_path)\n\n with infile as fh:\n data = json.load(fh)\n\n result = cls()\n result.data = data\n return result\n"
] | class Smother(object):
def __init__(self, coverage=None):
self.coverage = coverage
self.data = {}
self.aliases = create_path_aliases_from_coverage(self.coverage)
def start(self):
self.coverage.collector.reset()
self.coverage.start()
def save_context(self, label):
self.data[label] = {
key: sorted(map(int, val.keys()))
for key, val in self.coverage.collector.data.items()
}
def write_coverage(self):
# coverage won't write data if it hasn't been started.
self.coverage.start()
self.coverage.stop()
data = {}
for cover in six.itervalues(self.data):
for path, lines in six.iteritems(cover):
data.setdefault(path, {}).update(
{line: None for line in lines}
)
self.coverage.collector.data = data
self.coverage.save()
@classmethod
def load(cls, file_or_path):
if isinstance(file_or_path, six.string_types):
infile = open(file_or_path)
else:
infile = noclose(file_or_path)
with infile as fh:
data = json.load(fh)
result = cls()
result.data = data
return result
@classmethod
def convert_to_relative_paths(cls, smother_obj):
data = defaultdict(lambda: dict())
set_relative_directory()
for ctx, cover in smother_obj.data.items():
for src, lines in cover.items():
src = relative_filename(src)
data[ctx][src] = lines
result = cls()
result.data = dict(data)
return result
def __ior__(self, other):
for ctx, cover in other.data.items():
for src, lines in cover.items():
src = self.aliases.map(src)
old = self.data.setdefault(ctx, {}).setdefault(src, [])
self.data[ctx][src] = sorted(set(old + lines))
return self
def query_context(self, regions, file_factory=PythonFile):
"""
Return which set of test contexts intersect a set of code regions.
Parameters
----------
regions: A sequence of Intervals
file_factory: Callable (optional, default PythonFile)
A callable that takes a filename and
returns a PythonFile object.
Returns
-------
A QueryResult
"""
result = set()
for region in regions:
try:
pf = file_factory(region.filename)
except InvalidPythonFile:
continue
# region and/or coverage report may use paths
# relative to this directory. Ensure we find a match
# if they use different conventions.
paths = {
os.path.abspath(region.filename),
os.path.relpath(region.filename)
}
for test_context, hits in six.iteritems(self.data):
if test_context in result:
continue
for path in paths:
if region.intersects(pf, hits.get(path, [])):
result.add(test_context)
return QueryResult(result)
def _invert(self):
"""
Invert coverage data from {test_context: {file: line}}
to {file: {test_context: line}}
"""
result = defaultdict(dict)
for test_context, src_context in six.iteritems(self.data):
for src, lines in six.iteritems(src_context):
result[src][test_context] = lines
return result
def iter_records(self, semantic=False, sort=True):
inverted = self._invert()
for src, coverage in six.iteritems(inverted):
if semantic:
try:
pf = PythonFile(src)
except IOError:
continue
source2test = defaultdict(set)
for test_context, lines in six.iteritems(coverage):
for line in lines:
if semantic:
# coverage line count is 1-based
src_context = pf.context(line)
else:
src_context = "{}:{}".format(src, line)
source2test[src_context].add(test_context)
for src_context in sorted(source2test) if sort else source2test:
test_contexts = source2test[src_context]
if sort:
test_contexts = sorted(test_contexts)
for test_context in test_contexts:
yield src_context, test_context
|
ChrisBeaumont/smother | smother/control.py | Smother.query_context | python | def query_context(self, regions, file_factory=PythonFile):
result = set()
for region in regions:
try:
pf = file_factory(region.filename)
except InvalidPythonFile:
continue
# region and/or coverage report may use paths
# relative to this directory. Ensure we find a match
# if they use different conventions.
paths = {
os.path.abspath(region.filename),
os.path.relpath(region.filename)
}
for test_context, hits in six.iteritems(self.data):
if test_context in result:
continue
for path in paths:
if region.intersects(pf, hits.get(path, [])):
result.add(test_context)
return QueryResult(result) | Return which set of test contexts intersect a set of code regions.
Parameters
----------
regions: A sequence of Intervals
file_factory: Callable (optional, default PythonFile)
A callable that takes a filename and
returns a PythonFile object.
Returns
-------
A QueryResult | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/control.py#L174-L214 | null | class Smother(object):
def __init__(self, coverage=None):
self.coverage = coverage
self.data = {}
self.aliases = create_path_aliases_from_coverage(self.coverage)
def start(self):
self.coverage.collector.reset()
self.coverage.start()
def save_context(self, label):
self.data[label] = {
key: sorted(map(int, val.keys()))
for key, val in self.coverage.collector.data.items()
}
def write_coverage(self):
# coverage won't write data if it hasn't been started.
self.coverage.start()
self.coverage.stop()
data = {}
for cover in six.itervalues(self.data):
for path, lines in six.iteritems(cover):
data.setdefault(path, {}).update(
{line: None for line in lines}
)
self.coverage.collector.data = data
self.coverage.save()
def write(self, file_or_path, append=False, timeout=10):
"""
Write Smother results to a file.
Parameters
----------
fiile_or_path : str
Path to write report to
append : bool
If True, read an existing smother report from `outpath`
and combine it with this file before writing.
timeout : int
Time in seconds to wait to acquire a file lock, before
raising an error.
Note
----
Append mode is atomic when file_or_path is a path,
and can be safely run in a multithreaded or
multiprocess test environment.
When using `parallel_mode`, file_or_path is given a unique
suffix based on the machine name and process id.
"""
if isinstance(file_or_path, six.string_types):
if self.coverage:
file_or_path = get_smother_filename(
file_or_path, self.coverage.config.parallel)
outfile = Lock(
file_or_path, mode='a+',
timeout=timeout,
fail_when_locked=False
)
else:
outfile = noclose(file_or_path)
with outfile as fh:
if append:
fh.seek(0)
try:
other = Smother.load(fh)
except ValueError: # no smother data
pass
else:
self |= other
fh.seek(0)
fh.truncate() # required to overwrite data in a+ mode
json.dump(self.data, fh)
@classmethod
def load(cls, file_or_path):
if isinstance(file_or_path, six.string_types):
infile = open(file_or_path)
else:
infile = noclose(file_or_path)
with infile as fh:
data = json.load(fh)
result = cls()
result.data = data
return result
@classmethod
def convert_to_relative_paths(cls, smother_obj):
data = defaultdict(lambda: dict())
set_relative_directory()
for ctx, cover in smother_obj.data.items():
for src, lines in cover.items():
src = relative_filename(src)
data[ctx][src] = lines
result = cls()
result.data = dict(data)
return result
def __ior__(self, other):
for ctx, cover in other.data.items():
for src, lines in cover.items():
src = self.aliases.map(src)
old = self.data.setdefault(ctx, {}).setdefault(src, [])
self.data[ctx][src] = sorted(set(old + lines))
return self
def _invert(self):
"""
Invert coverage data from {test_context: {file: line}}
to {file: {test_context: line}}
"""
result = defaultdict(dict)
for test_context, src_context in six.iteritems(self.data):
for src, lines in six.iteritems(src_context):
result[src][test_context] = lines
return result
def iter_records(self, semantic=False, sort=True):
inverted = self._invert()
for src, coverage in six.iteritems(inverted):
if semantic:
try:
pf = PythonFile(src)
except IOError:
continue
source2test = defaultdict(set)
for test_context, lines in six.iteritems(coverage):
for line in lines:
if semantic:
# coverage line count is 1-based
src_context = pf.context(line)
else:
src_context = "{}:{}".format(src, line)
source2test[src_context].add(test_context)
for src_context in sorted(source2test) if sort else source2test:
test_contexts = source2test[src_context]
if sort:
test_contexts = sorted(test_contexts)
for test_context in test_contexts:
yield src_context, test_context
|
ChrisBeaumont/smother | smother/control.py | Smother._invert | python | def _invert(self):
result = defaultdict(dict)
for test_context, src_context in six.iteritems(self.data):
for src, lines in six.iteritems(src_context):
result[src][test_context] = lines
return result | Invert coverage data from {test_context: {file: line}}
to {file: {test_context: line}} | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/control.py#L216-L225 | null | class Smother(object):
def __init__(self, coverage=None):
self.coverage = coverage
self.data = {}
self.aliases = create_path_aliases_from_coverage(self.coverage)
def start(self):
self.coverage.collector.reset()
self.coverage.start()
def save_context(self, label):
self.data[label] = {
key: sorted(map(int, val.keys()))
for key, val in self.coverage.collector.data.items()
}
def write_coverage(self):
# coverage won't write data if it hasn't been started.
self.coverage.start()
self.coverage.stop()
data = {}
for cover in six.itervalues(self.data):
for path, lines in six.iteritems(cover):
data.setdefault(path, {}).update(
{line: None for line in lines}
)
self.coverage.collector.data = data
self.coverage.save()
def write(self, file_or_path, append=False, timeout=10):
"""
Write Smother results to a file.
Parameters
----------
fiile_or_path : str
Path to write report to
append : bool
If True, read an existing smother report from `outpath`
and combine it with this file before writing.
timeout : int
Time in seconds to wait to acquire a file lock, before
raising an error.
Note
----
Append mode is atomic when file_or_path is a path,
and can be safely run in a multithreaded or
multiprocess test environment.
When using `parallel_mode`, file_or_path is given a unique
suffix based on the machine name and process id.
"""
if isinstance(file_or_path, six.string_types):
if self.coverage:
file_or_path = get_smother_filename(
file_or_path, self.coverage.config.parallel)
outfile = Lock(
file_or_path, mode='a+',
timeout=timeout,
fail_when_locked=False
)
else:
outfile = noclose(file_or_path)
with outfile as fh:
if append:
fh.seek(0)
try:
other = Smother.load(fh)
except ValueError: # no smother data
pass
else:
self |= other
fh.seek(0)
fh.truncate() # required to overwrite data in a+ mode
json.dump(self.data, fh)
@classmethod
def load(cls, file_or_path):
if isinstance(file_or_path, six.string_types):
infile = open(file_or_path)
else:
infile = noclose(file_or_path)
with infile as fh:
data = json.load(fh)
result = cls()
result.data = data
return result
@classmethod
def convert_to_relative_paths(cls, smother_obj):
data = defaultdict(lambda: dict())
set_relative_directory()
for ctx, cover in smother_obj.data.items():
for src, lines in cover.items():
src = relative_filename(src)
data[ctx][src] = lines
result = cls()
result.data = dict(data)
return result
def __ior__(self, other):
for ctx, cover in other.data.items():
for src, lines in cover.items():
src = self.aliases.map(src)
old = self.data.setdefault(ctx, {}).setdefault(src, [])
self.data[ctx][src] = sorted(set(old + lines))
return self
def query_context(self, regions, file_factory=PythonFile):
"""
Return which set of test contexts intersect a set of code regions.
Parameters
----------
regions: A sequence of Intervals
file_factory: Callable (optional, default PythonFile)
A callable that takes a filename and
returns a PythonFile object.
Returns
-------
A QueryResult
"""
result = set()
for region in regions:
try:
pf = file_factory(region.filename)
except InvalidPythonFile:
continue
# region and/or coverage report may use paths
# relative to this directory. Ensure we find a match
# if they use different conventions.
paths = {
os.path.abspath(region.filename),
os.path.relpath(region.filename)
}
for test_context, hits in six.iteritems(self.data):
if test_context in result:
continue
for path in paths:
if region.intersects(pf, hits.get(path, [])):
result.add(test_context)
return QueryResult(result)
def iter_records(self, semantic=False, sort=True):
inverted = self._invert()
for src, coverage in six.iteritems(inverted):
if semantic:
try:
pf = PythonFile(src)
except IOError:
continue
source2test = defaultdict(set)
for test_context, lines in six.iteritems(coverage):
for line in lines:
if semantic:
# coverage line count is 1-based
src_context = pf.context(line)
else:
src_context = "{}:{}".format(src, line)
source2test[src_context].add(test_context)
for src_context in sorted(source2test) if sort else source2test:
test_contexts = source2test[src_context]
if sort:
test_contexts = sorted(test_contexts)
for test_context in test_contexts:
yield src_context, test_context
|
ChrisBeaumont/smother | smother/interval.py | parse_intervals | python | def parse_intervals(path, as_context=False):
def _regions_from_range():
if as_context:
ctxs = list(set(pf.lines[start - 1: stop - 1]))
return [
ContextInterval(filename, ctx)
for ctx in ctxs
]
else:
return [LineInterval(filename, start, stop)]
if ':' in path:
path, subpath = path.split(':')
else:
subpath = ''
pf = PythonFile.from_modulename(path)
filename = pf.filename
rng = NUMBER_RE.match(subpath)
if rng: # specified a line or line range
start, stop = map(int, rng.groups(0))
stop = stop or start + 1
return _regions_from_range()
elif not subpath: # asked for entire module
if as_context:
return [ContextInterval(filename, pf.prefix)]
start, stop = 1, pf.line_count + 1
return _regions_from_range()
else: # specified a context name
context = pf.prefix + ':' + subpath
if context not in pf.lines:
raise ValueError("%s is not a valid context for %s"
% (context, pf.prefix))
if as_context:
return [ContextInterval(filename, context)]
else:
start, stop = pf.context_range(context)
return [LineInterval(filename, start, stop)] | Parse path strings into a collection of Intervals.
`path` is a string describing a region in a file. It's format is
dotted.module.name:[line | start-stop | context]
`dotted.module.name` is a python module
`line` is a single line number in the module (1-offset)
`start-stop` is a right-open interval of line numbers
`context` is a '.' delimited, nested name of a class or function.
For example FooClass.method_a.inner_method
identifies the innermost function in code like
class FooClass:
def method_a(self):
def inner_method():
pass
Parameters
----------
path : str
Region description (see above)
as_context : bool (optional, default=False)
If `True`, return `ContextInterval`s instead of `LineInterval`s.
If `path` provides a line number or range, the result will include
all contexts that intersect this line range.
Returns
-------
list of `Interval`s | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/interval.py#L55-L127 | [
"def from_modulename(cls, module_name):\n \"\"\"\n Build a PythonFile given a dotted module name like a.b.c\n \"\"\"\n # XXX make this more robust (pyc files? zip archives? etc)\n slug = module_name.replace('.', '/')\n paths = [slug + '.py', slug + '/__init__.py']\n\n # always search from current directory\n for base in [''] + sys.path:\n for path in paths:\n fullpath = os.path.join(base, path)\n if os.path.exists(fullpath):\n return cls(fullpath, prefix=module_name)\n",
"def context_range(self, context):\n \"\"\"\n Return the 1-offset, right-open range of lines spanned by\n a particular context name.\n\n Parameters\n ----------\n context : str\n\n Raises\n ------\n ValueError, if context is not present in the file.\n \"\"\"\n if not context.startswith(self.prefix):\n context = self.prefix + '.' + context\n\n lo = hi = None\n for idx, line_context in enumerate(self.lines, 1):\n\n # context is hierarchical -- context spans itself\n # and any suffix.\n if line_context.startswith(context):\n lo = lo or idx\n hi = idx\n\n if lo is None:\n raise ValueError(\"Context %s does not exist in file %s\" %\n (context, self.filename))\n\n return lo, hi + 1\n",
"def _regions_from_range():\n if as_context:\n ctxs = list(set(pf.lines[start - 1: stop - 1]))\n return [\n ContextInterval(filename, ctx)\n for ctx in ctxs\n ]\n else:\n return [LineInterval(filename, start, stop)]\n"
] | """
Datastructures to represent code regions.
"""
import re
from collections import namedtuple
from smother.python import PythonFile
NUMBER_RE = re.compile('([0-9]+)(?:-([0-9]+))?')
class Interval(object):
"""
Abstract base class to represent a region of code.
"""
def intersects(self, python_file, lines):
"""
Test whether a `PythonFile` and list of line numbers
intersects the given Interval.
"""
raise NotImplementedError()
class LineInterval(namedtuple('LineInterval', 'filename start stop'),
Interval):
"""
Interval defined by a right-open interval of 1-offset line numbers.
"""
def intersects(self, python_file, lines):
assert python_file.filename == self.filename
for line in lines:
if (line >= self.start) and (line < self.stop):
return True
return False
class ContextInterval(namedtuple('ContextInterval', 'filename context'),
Interval):
"""
Interval defined by a `context` identifier within a file.
"""
def intersects(self, python_file, lines):
assert python_file.filename == self.filename
for line in lines:
if python_file.context(line) == self.context:
return True
return False
|
ChrisBeaumont/smother | smother/git.py | execute | python | def execute(cmd):
proc = Popen(cmd, stdout=PIPE)
stdout, _ = proc.communicate()
if proc.returncode != 0:
raise CalledProcessError(proc.returncode, " ".join(cmd))
return stdout.decode('utf8') | Run a shell command and return stdout | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/git.py#L11-L18 | null | from subprocess import CalledProcessError
from subprocess import PIPE
from subprocess import Popen
from unidiff import PatchSet
from smother.diff import DiffReporter
from smother.python import PythonFile
def git_diff(ref=None):
cmd = [
'git',
'-c', 'diff.mnemonicprefix=no',
'diff',
ref,
'--no-color',
'--no-ext-diff'
]
data = execute(list(filter(None, cmd)))
return PatchSet(data.splitlines())
def git_show(ref, path):
ref = ref or ''
cmd = [
'git',
'show',
"{}:{}".format(ref, path),
]
return execute(cmd)
class GitDiffReporter(DiffReporter):
def __init__(self, ref='HEAD', diff=None):
self.ref = ref
self._patch_set = diff or git_diff(ref)
@property
def patch_set(self):
return self._patch_set
def old_file(self, path):
if path == '/dev/null':
return
# git diff may prefix the path with a/
if path.startswith('a/'):
filename = path[2:]
else:
filename = path
source = git_show(self.ref, filename)
return PythonFile(filename, source=source)
def new_file(self, path):
if path == '/dev/null':
return
# git diff may prefix the path with b/
if path.startswith('b/'):
filename = path[2:]
else:
filename = path
return PythonFile(filename)
|
ChrisBeaumont/smother | smother/cli.py | cli | python | def cli(ctx, report, semantic, rcfile):
ctx.obj = {
'report': report,
'semantic': semantic,
'rcfile': rcfile,
} | Query or manipulate smother reports | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/cli.py#L27-L35 | null | import csv as _csv
import os
import click
import coverage
from smother.control import Smother
from smother.git import GitDiffReporter
from smother.interval import parse_intervals
@click.group()
@click.option('--report', '-r', default='.smother', help='Smother report file')
@click.option(
'--semantic', '-s',
help='Map coverage to semantic blocks (functions and classes) '
'instead of individual line numbers.',
is_flag=True,
)
@click.option(
'--rcfile',
default=True,
help='Coverage config file'
)
@click.version_option()
@click.pass_context
def _report_from_regions(regions, opts, **kwargs):
report_file = opts['report']
smother = Smother.load(report_file)
result = smother.query_context(regions, **kwargs)
result.report()
@cli.command()
@click.argument("path")
@click.pass_context
def lookup(ctx, path):
"""
Determine which tests intersect a source interval.
"""
regions = parse_intervals(path, as_context=ctx.obj['semantic'])
_report_from_regions(regions, ctx.obj)
@cli.command()
@click.argument("branch", default="")
@click.pass_context
def diff(ctx, branch):
"""
Determine which tests intersect a git diff.
"""
diff = GitDiffReporter(branch)
regions = diff.changed_intervals()
_report_from_regions(regions, ctx.obj, file_factory=diff.old_file)
@cli.command()
@click.argument('src', nargs=-1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
@click.pass_context
def combine(ctx, src, dst):
"""
Combine several smother reports.
"""
c = coverage.Coverage(config_file=ctx.obj['rcfile'])
result = Smother(c)
for infile in src:
result |= Smother.load(infile)
result.write(dst)
@cli.command()
@click.argument('src', nargs=1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
def convert_to_relative_paths(src, dst):
"""
Converts all file paths in a smother report to relative paths, relative
to the current directory.
"""
result = Smother.convert_to_relative_paths(Smother.load(src))
result.write(dst)
@cli.command()
@click.argument('dst', type=click.File('w'))
@click.pass_context
def csv(ctx, dst):
"""
Flatten a coverage file into a CSV
of source_context, testname
"""
sm = Smother.load(ctx.obj['report'])
semantic = ctx.obj['semantic']
writer = _csv.writer(dst, lineterminator='\n')
dst.write("source_context, test_context\n")
writer.writerows(sm.iter_records(semantic=semantic))
@cli.command()
@click.pass_context
def erase(ctx):
"""
Erase the existing smother report.
"""
if os.path.exists(ctx.obj['report']):
os.remove(ctx.obj['report'])
@cli.command()
@click.pass_context
def to_coverage(ctx):
"""
Produce a .coverage file from a smother file
"""
sm = Smother.load(ctx.obj['report'])
sm.coverage = coverage.coverage()
sm.write_coverage()
|
ChrisBeaumont/smother | smother/cli.py | lookup | python | def lookup(ctx, path):
regions = parse_intervals(path, as_context=ctx.obj['semantic'])
_report_from_regions(regions, ctx.obj) | Determine which tests intersect a source interval. | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/cli.py#L48-L53 | [
"def parse_intervals(path, as_context=False):\n \"\"\"\n Parse path strings into a collection of Intervals.\n\n `path` is a string describing a region in a file. It's format is\n\n dotted.module.name:[line | start-stop | context]\n\n `dotted.module.name` is a python module\n `line` is a single line number in the module (1-offset)\n `start-stop` is a right-open interval of line numbers\n `context` is a '.' delimited, nested name of a class or function.\n For example FooClass.method_a.inner_method\n\n identifies the innermost function in code like\n\n class FooClass:\n def method_a(self):\n def inner_method():\n pass\n\n Parameters\n ----------\n path : str\n Region description (see above)\n as_context : bool (optional, default=False)\n If `True`, return `ContextInterval`s instead of `LineInterval`s.\n If `path` provides a line number or range, the result will include\n all contexts that intersect this line range.\n\n Returns\n -------\n list of `Interval`s\n \"\"\"\n\n def _regions_from_range():\n if as_context:\n ctxs = list(set(pf.lines[start - 1: stop - 1]))\n return [\n ContextInterval(filename, ctx)\n for ctx in ctxs\n ]\n else:\n return [LineInterval(filename, start, stop)]\n\n if ':' in path:\n path, subpath = path.split(':')\n else:\n subpath = ''\n\n pf = PythonFile.from_modulename(path)\n filename = pf.filename\n rng = NUMBER_RE.match(subpath)\n\n if rng: # specified a line or line range\n start, stop = map(int, rng.groups(0))\n stop = stop or start + 1\n return _regions_from_range()\n elif not subpath: # asked for entire module\n if as_context:\n return [ContextInterval(filename, pf.prefix)]\n start, stop = 1, pf.line_count + 1\n return _regions_from_range()\n else: # specified a context name\n context = pf.prefix + ':' + subpath\n if context not in pf.lines:\n raise ValueError(\"%s is not a valid context for %s\"\n % (context, pf.prefix))\n if as_context:\n return [ContextInterval(filename, context)]\n else:\n start, stop = pf.context_range(context)\n return [LineInterval(filename, start, stop)]\n",
"def _report_from_regions(regions, opts, **kwargs):\n report_file = opts['report']\n smother = Smother.load(report_file)\n result = smother.query_context(regions, **kwargs)\n result.report()\n"
] | import csv as _csv
import os
import click
import coverage
from smother.control import Smother
from smother.git import GitDiffReporter
from smother.interval import parse_intervals
@click.group()
@click.option('--report', '-r', default='.smother', help='Smother report file')
@click.option(
'--semantic', '-s',
help='Map coverage to semantic blocks (functions and classes) '
'instead of individual line numbers.',
is_flag=True,
)
@click.option(
'--rcfile',
default=True,
help='Coverage config file'
)
@click.version_option()
@click.pass_context
def cli(ctx, report, semantic, rcfile):
"""
Query or manipulate smother reports
"""
ctx.obj = {
'report': report,
'semantic': semantic,
'rcfile': rcfile,
}
def _report_from_regions(regions, opts, **kwargs):
report_file = opts['report']
smother = Smother.load(report_file)
result = smother.query_context(regions, **kwargs)
result.report()
@cli.command()
@click.argument("path")
@click.pass_context
@cli.command()
@click.argument("branch", default="")
@click.pass_context
def diff(ctx, branch):
"""
Determine which tests intersect a git diff.
"""
diff = GitDiffReporter(branch)
regions = diff.changed_intervals()
_report_from_regions(regions, ctx.obj, file_factory=diff.old_file)
@cli.command()
@click.argument('src', nargs=-1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
@click.pass_context
def combine(ctx, src, dst):
"""
Combine several smother reports.
"""
c = coverage.Coverage(config_file=ctx.obj['rcfile'])
result = Smother(c)
for infile in src:
result |= Smother.load(infile)
result.write(dst)
@cli.command()
@click.argument('src', nargs=1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
def convert_to_relative_paths(src, dst):
"""
Converts all file paths in a smother report to relative paths, relative
to the current directory.
"""
result = Smother.convert_to_relative_paths(Smother.load(src))
result.write(dst)
@cli.command()
@click.argument('dst', type=click.File('w'))
@click.pass_context
def csv(ctx, dst):
"""
Flatten a coverage file into a CSV
of source_context, testname
"""
sm = Smother.load(ctx.obj['report'])
semantic = ctx.obj['semantic']
writer = _csv.writer(dst, lineterminator='\n')
dst.write("source_context, test_context\n")
writer.writerows(sm.iter_records(semantic=semantic))
@cli.command()
@click.pass_context
def erase(ctx):
"""
Erase the existing smother report.
"""
if os.path.exists(ctx.obj['report']):
os.remove(ctx.obj['report'])
@cli.command()
@click.pass_context
def to_coverage(ctx):
"""
Produce a .coverage file from a smother file
"""
sm = Smother.load(ctx.obj['report'])
sm.coverage = coverage.coverage()
sm.write_coverage()
|
ChrisBeaumont/smother | smother/cli.py | diff | python | def diff(ctx, branch):
diff = GitDiffReporter(branch)
regions = diff.changed_intervals()
_report_from_regions(regions, ctx.obj, file_factory=diff.old_file) | Determine which tests intersect a git diff. | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/cli.py#L59-L65 | [
"def _report_from_regions(regions, opts, **kwargs):\n report_file = opts['report']\n smother = Smother.load(report_file)\n result = smother.query_context(regions, **kwargs)\n result.report()\n",
"def changed_intervals(self):\n return parse_intervals(self)\n"
] | import csv as _csv
import os
import click
import coverage
from smother.control import Smother
from smother.git import GitDiffReporter
from smother.interval import parse_intervals
@click.group()
@click.option('--report', '-r', default='.smother', help='Smother report file')
@click.option(
'--semantic', '-s',
help='Map coverage to semantic blocks (functions and classes) '
'instead of individual line numbers.',
is_flag=True,
)
@click.option(
'--rcfile',
default=True,
help='Coverage config file'
)
@click.version_option()
@click.pass_context
def cli(ctx, report, semantic, rcfile):
"""
Query or manipulate smother reports
"""
ctx.obj = {
'report': report,
'semantic': semantic,
'rcfile': rcfile,
}
def _report_from_regions(regions, opts, **kwargs):
report_file = opts['report']
smother = Smother.load(report_file)
result = smother.query_context(regions, **kwargs)
result.report()
@cli.command()
@click.argument("path")
@click.pass_context
def lookup(ctx, path):
"""
Determine which tests intersect a source interval.
"""
regions = parse_intervals(path, as_context=ctx.obj['semantic'])
_report_from_regions(regions, ctx.obj)
@cli.command()
@click.argument("branch", default="")
@click.pass_context
@cli.command()
@click.argument('src', nargs=-1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
@click.pass_context
def combine(ctx, src, dst):
"""
Combine several smother reports.
"""
c = coverage.Coverage(config_file=ctx.obj['rcfile'])
result = Smother(c)
for infile in src:
result |= Smother.load(infile)
result.write(dst)
@cli.command()
@click.argument('src', nargs=1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
def convert_to_relative_paths(src, dst):
"""
Converts all file paths in a smother report to relative paths, relative
to the current directory.
"""
result = Smother.convert_to_relative_paths(Smother.load(src))
result.write(dst)
@cli.command()
@click.argument('dst', type=click.File('w'))
@click.pass_context
def csv(ctx, dst):
"""
Flatten a coverage file into a CSV
of source_context, testname
"""
sm = Smother.load(ctx.obj['report'])
semantic = ctx.obj['semantic']
writer = _csv.writer(dst, lineterminator='\n')
dst.write("source_context, test_context\n")
writer.writerows(sm.iter_records(semantic=semantic))
@cli.command()
@click.pass_context
def erase(ctx):
"""
Erase the existing smother report.
"""
if os.path.exists(ctx.obj['report']):
os.remove(ctx.obj['report'])
@cli.command()
@click.pass_context
def to_coverage(ctx):
"""
Produce a .coverage file from a smother file
"""
sm = Smother.load(ctx.obj['report'])
sm.coverage = coverage.coverage()
sm.write_coverage()
|
ChrisBeaumont/smother | smother/cli.py | combine | python | def combine(ctx, src, dst):
c = coverage.Coverage(config_file=ctx.obj['rcfile'])
result = Smother(c)
for infile in src:
result |= Smother.load(infile)
result.write(dst) | Combine several smother reports. | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/cli.py#L72-L82 | [
"def write(self, file_or_path, append=False, timeout=10):\n \"\"\"\n Write Smother results to a file.\n\n Parameters\n ----------\n fiile_or_path : str\n Path to write report to\n append : bool\n If True, read an existing smother report from `outpath`\n and combine it with this file before writing.\n timeout : int\n Time in seconds to wait to acquire a file lock, before\n raising an error.\n\n Note\n ----\n Append mode is atomic when file_or_path is a path,\n and can be safely run in a multithreaded or\n multiprocess test environment.\n\n When using `parallel_mode`, file_or_path is given a unique\n suffix based on the machine name and process id.\n \"\"\"\n if isinstance(file_or_path, six.string_types):\n if self.coverage:\n file_or_path = get_smother_filename(\n file_or_path, self.coverage.config.parallel)\n\n outfile = Lock(\n file_or_path, mode='a+',\n timeout=timeout,\n fail_when_locked=False\n )\n else:\n outfile = noclose(file_or_path)\n\n with outfile as fh:\n\n if append:\n fh.seek(0)\n try:\n other = Smother.load(fh)\n except ValueError: # no smother data\n pass\n else:\n self |= other\n\n fh.seek(0)\n fh.truncate() # required to overwrite data in a+ mode\n json.dump(self.data, fh)\n",
"def load(cls, file_or_path):\n if isinstance(file_or_path, six.string_types):\n infile = open(file_or_path)\n else:\n infile = noclose(file_or_path)\n\n with infile as fh:\n data = json.load(fh)\n\n result = cls()\n result.data = data\n return result\n"
] | import csv as _csv
import os
import click
import coverage
from smother.control import Smother
from smother.git import GitDiffReporter
from smother.interval import parse_intervals
@click.group()
@click.option('--report', '-r', default='.smother', help='Smother report file')
@click.option(
'--semantic', '-s',
help='Map coverage to semantic blocks (functions and classes) '
'instead of individual line numbers.',
is_flag=True,
)
@click.option(
'--rcfile',
default=True,
help='Coverage config file'
)
@click.version_option()
@click.pass_context
def cli(ctx, report, semantic, rcfile):
"""
Query or manipulate smother reports
"""
ctx.obj = {
'report': report,
'semantic': semantic,
'rcfile': rcfile,
}
def _report_from_regions(regions, opts, **kwargs):
report_file = opts['report']
smother = Smother.load(report_file)
result = smother.query_context(regions, **kwargs)
result.report()
@cli.command()
@click.argument("path")
@click.pass_context
def lookup(ctx, path):
"""
Determine which tests intersect a source interval.
"""
regions = parse_intervals(path, as_context=ctx.obj['semantic'])
_report_from_regions(regions, ctx.obj)
@cli.command()
@click.argument("branch", default="")
@click.pass_context
def diff(ctx, branch):
"""
Determine which tests intersect a git diff.
"""
diff = GitDiffReporter(branch)
regions = diff.changed_intervals()
_report_from_regions(regions, ctx.obj, file_factory=diff.old_file)
@cli.command()
@click.argument('src', nargs=-1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
@click.pass_context
@cli.command()
@click.argument('src', nargs=1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
def convert_to_relative_paths(src, dst):
"""
Converts all file paths in a smother report to relative paths, relative
to the current directory.
"""
result = Smother.convert_to_relative_paths(Smother.load(src))
result.write(dst)
@cli.command()
@click.argument('dst', type=click.File('w'))
@click.pass_context
def csv(ctx, dst):
"""
Flatten a coverage file into a CSV
of source_context, testname
"""
sm = Smother.load(ctx.obj['report'])
semantic = ctx.obj['semantic']
writer = _csv.writer(dst, lineterminator='\n')
dst.write("source_context, test_context\n")
writer.writerows(sm.iter_records(semantic=semantic))
@cli.command()
@click.pass_context
def erase(ctx):
"""
Erase the existing smother report.
"""
if os.path.exists(ctx.obj['report']):
os.remove(ctx.obj['report'])
@cli.command()
@click.pass_context
def to_coverage(ctx):
"""
Produce a .coverage file from a smother file
"""
sm = Smother.load(ctx.obj['report'])
sm.coverage = coverage.coverage()
sm.write_coverage()
|
ChrisBeaumont/smother | smother/cli.py | convert_to_relative_paths | python | def convert_to_relative_paths(src, dst):
result = Smother.convert_to_relative_paths(Smother.load(src))
result.write(dst) | Converts all file paths in a smother report to relative paths, relative
to the current directory. | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/cli.py#L88-L94 | [
"def write(self, file_or_path, append=False, timeout=10):\n \"\"\"\n Write Smother results to a file.\n\n Parameters\n ----------\n fiile_or_path : str\n Path to write report to\n append : bool\n If True, read an existing smother report from `outpath`\n and combine it with this file before writing.\n timeout : int\n Time in seconds to wait to acquire a file lock, before\n raising an error.\n\n Note\n ----\n Append mode is atomic when file_or_path is a path,\n and can be safely run in a multithreaded or\n multiprocess test environment.\n\n When using `parallel_mode`, file_or_path is given a unique\n suffix based on the machine name and process id.\n \"\"\"\n if isinstance(file_or_path, six.string_types):\n if self.coverage:\n file_or_path = get_smother_filename(\n file_or_path, self.coverage.config.parallel)\n\n outfile = Lock(\n file_or_path, mode='a+',\n timeout=timeout,\n fail_when_locked=False\n )\n else:\n outfile = noclose(file_or_path)\n\n with outfile as fh:\n\n if append:\n fh.seek(0)\n try:\n other = Smother.load(fh)\n except ValueError: # no smother data\n pass\n else:\n self |= other\n\n fh.seek(0)\n fh.truncate() # required to overwrite data in a+ mode\n json.dump(self.data, fh)\n",
"def load(cls, file_or_path):\n if isinstance(file_or_path, six.string_types):\n infile = open(file_or_path)\n else:\n infile = noclose(file_or_path)\n\n with infile as fh:\n data = json.load(fh)\n\n result = cls()\n result.data = data\n return result\n",
"def convert_to_relative_paths(cls, smother_obj):\n data = defaultdict(lambda: dict())\n set_relative_directory()\n for ctx, cover in smother_obj.data.items():\n for src, lines in cover.items():\n src = relative_filename(src)\n data[ctx][src] = lines\n\n result = cls()\n result.data = dict(data)\n return result\n"
] | import csv as _csv
import os
import click
import coverage
from smother.control import Smother
from smother.git import GitDiffReporter
from smother.interval import parse_intervals
@click.group()
@click.option('--report', '-r', default='.smother', help='Smother report file')
@click.option(
'--semantic', '-s',
help='Map coverage to semantic blocks (functions and classes) '
'instead of individual line numbers.',
is_flag=True,
)
@click.option(
'--rcfile',
default=True,
help='Coverage config file'
)
@click.version_option()
@click.pass_context
def cli(ctx, report, semantic, rcfile):
"""
Query or manipulate smother reports
"""
ctx.obj = {
'report': report,
'semantic': semantic,
'rcfile': rcfile,
}
def _report_from_regions(regions, opts, **kwargs):
report_file = opts['report']
smother = Smother.load(report_file)
result = smother.query_context(regions, **kwargs)
result.report()
@cli.command()
@click.argument("path")
@click.pass_context
def lookup(ctx, path):
"""
Determine which tests intersect a source interval.
"""
regions = parse_intervals(path, as_context=ctx.obj['semantic'])
_report_from_regions(regions, ctx.obj)
@cli.command()
@click.argument("branch", default="")
@click.pass_context
def diff(ctx, branch):
"""
Determine which tests intersect a git diff.
"""
diff = GitDiffReporter(branch)
regions = diff.changed_intervals()
_report_from_regions(regions, ctx.obj, file_factory=diff.old_file)
@cli.command()
@click.argument('src', nargs=-1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
@click.pass_context
def combine(ctx, src, dst):
"""
Combine several smother reports.
"""
c = coverage.Coverage(config_file=ctx.obj['rcfile'])
result = Smother(c)
for infile in src:
result |= Smother.load(infile)
result.write(dst)
@cli.command()
@click.argument('src', nargs=1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
@cli.command()
@click.argument('dst', type=click.File('w'))
@click.pass_context
def csv(ctx, dst):
"""
Flatten a coverage file into a CSV
of source_context, testname
"""
sm = Smother.load(ctx.obj['report'])
semantic = ctx.obj['semantic']
writer = _csv.writer(dst, lineterminator='\n')
dst.write("source_context, test_context\n")
writer.writerows(sm.iter_records(semantic=semantic))
@cli.command()
@click.pass_context
def erase(ctx):
"""
Erase the existing smother report.
"""
if os.path.exists(ctx.obj['report']):
os.remove(ctx.obj['report'])
@cli.command()
@click.pass_context
def to_coverage(ctx):
"""
Produce a .coverage file from a smother file
"""
sm = Smother.load(ctx.obj['report'])
sm.coverage = coverage.coverage()
sm.write_coverage()
|
ChrisBeaumont/smother | smother/cli.py | csv | python | def csv(ctx, dst):
sm = Smother.load(ctx.obj['report'])
semantic = ctx.obj['semantic']
writer = _csv.writer(dst, lineterminator='\n')
dst.write("source_context, test_context\n")
writer.writerows(sm.iter_records(semantic=semantic)) | Flatten a coverage file into a CSV
of source_context, testname | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/cli.py#L100-L109 | [
"def load(cls, file_or_path):\n if isinstance(file_or_path, six.string_types):\n infile = open(file_or_path)\n else:\n infile = noclose(file_or_path)\n\n with infile as fh:\n data = json.load(fh)\n\n result = cls()\n result.data = data\n return result\n",
"def iter_records(self, semantic=False, sort=True):\n\n inverted = self._invert()\n for src, coverage in six.iteritems(inverted):\n if semantic:\n try:\n pf = PythonFile(src)\n except IOError:\n continue\n\n source2test = defaultdict(set)\n for test_context, lines in six.iteritems(coverage):\n for line in lines:\n if semantic:\n # coverage line count is 1-based\n src_context = pf.context(line)\n else:\n src_context = \"{}:{}\".format(src, line)\n source2test[src_context].add(test_context)\n\n for src_context in sorted(source2test) if sort else source2test:\n test_contexts = source2test[src_context]\n if sort:\n test_contexts = sorted(test_contexts)\n for test_context in test_contexts:\n yield src_context, test_context\n"
] | import csv as _csv
import os
import click
import coverage
from smother.control import Smother
from smother.git import GitDiffReporter
from smother.interval import parse_intervals
@click.group()
@click.option('--report', '-r', default='.smother', help='Smother report file')
@click.option(
'--semantic', '-s',
help='Map coverage to semantic blocks (functions and classes) '
'instead of individual line numbers.',
is_flag=True,
)
@click.option(
'--rcfile',
default=True,
help='Coverage config file'
)
@click.version_option()
@click.pass_context
def cli(ctx, report, semantic, rcfile):
"""
Query or manipulate smother reports
"""
ctx.obj = {
'report': report,
'semantic': semantic,
'rcfile': rcfile,
}
def _report_from_regions(regions, opts, **kwargs):
report_file = opts['report']
smother = Smother.load(report_file)
result = smother.query_context(regions, **kwargs)
result.report()
@cli.command()
@click.argument("path")
@click.pass_context
def lookup(ctx, path):
"""
Determine which tests intersect a source interval.
"""
regions = parse_intervals(path, as_context=ctx.obj['semantic'])
_report_from_regions(regions, ctx.obj)
@cli.command()
@click.argument("branch", default="")
@click.pass_context
def diff(ctx, branch):
"""
Determine which tests intersect a git diff.
"""
diff = GitDiffReporter(branch)
regions = diff.changed_intervals()
_report_from_regions(regions, ctx.obj, file_factory=diff.old_file)
@cli.command()
@click.argument('src', nargs=-1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
@click.pass_context
def combine(ctx, src, dst):
"""
Combine several smother reports.
"""
c = coverage.Coverage(config_file=ctx.obj['rcfile'])
result = Smother(c)
for infile in src:
result |= Smother.load(infile)
result.write(dst)
@cli.command()
@click.argument('src', nargs=1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
def convert_to_relative_paths(src, dst):
"""
Converts all file paths in a smother report to relative paths, relative
to the current directory.
"""
result = Smother.convert_to_relative_paths(Smother.load(src))
result.write(dst)
@cli.command()
@click.argument('dst', type=click.File('w'))
@click.pass_context
@cli.command()
@click.pass_context
def erase(ctx):
"""
Erase the existing smother report.
"""
if os.path.exists(ctx.obj['report']):
os.remove(ctx.obj['report'])
@cli.command()
@click.pass_context
def to_coverage(ctx):
"""
Produce a .coverage file from a smother file
"""
sm = Smother.load(ctx.obj['report'])
sm.coverage = coverage.coverage()
sm.write_coverage()
|
ChrisBeaumont/smother | smother/cli.py | erase | python | def erase(ctx):
if os.path.exists(ctx.obj['report']):
os.remove(ctx.obj['report']) | Erase the existing smother report. | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/cli.py#L114-L119 | null | import csv as _csv
import os
import click
import coverage
from smother.control import Smother
from smother.git import GitDiffReporter
from smother.interval import parse_intervals
@click.group()
@click.option('--report', '-r', default='.smother', help='Smother report file')
@click.option(
'--semantic', '-s',
help='Map coverage to semantic blocks (functions and classes) '
'instead of individual line numbers.',
is_flag=True,
)
@click.option(
'--rcfile',
default=True,
help='Coverage config file'
)
@click.version_option()
@click.pass_context
def cli(ctx, report, semantic, rcfile):
"""
Query or manipulate smother reports
"""
ctx.obj = {
'report': report,
'semantic': semantic,
'rcfile': rcfile,
}
def _report_from_regions(regions, opts, **kwargs):
report_file = opts['report']
smother = Smother.load(report_file)
result = smother.query_context(regions, **kwargs)
result.report()
@cli.command()
@click.argument("path")
@click.pass_context
def lookup(ctx, path):
"""
Determine which tests intersect a source interval.
"""
regions = parse_intervals(path, as_context=ctx.obj['semantic'])
_report_from_regions(regions, ctx.obj)
@cli.command()
@click.argument("branch", default="")
@click.pass_context
def diff(ctx, branch):
"""
Determine which tests intersect a git diff.
"""
diff = GitDiffReporter(branch)
regions = diff.changed_intervals()
_report_from_regions(regions, ctx.obj, file_factory=diff.old_file)
@cli.command()
@click.argument('src', nargs=-1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
@click.pass_context
def combine(ctx, src, dst):
"""
Combine several smother reports.
"""
c = coverage.Coverage(config_file=ctx.obj['rcfile'])
result = Smother(c)
for infile in src:
result |= Smother.load(infile)
result.write(dst)
@cli.command()
@click.argument('src', nargs=1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
def convert_to_relative_paths(src, dst):
"""
Converts all file paths in a smother report to relative paths, relative
to the current directory.
"""
result = Smother.convert_to_relative_paths(Smother.load(src))
result.write(dst)
@cli.command()
@click.argument('dst', type=click.File('w'))
@click.pass_context
def csv(ctx, dst):
"""
Flatten a coverage file into a CSV
of source_context, testname
"""
sm = Smother.load(ctx.obj['report'])
semantic = ctx.obj['semantic']
writer = _csv.writer(dst, lineterminator='\n')
dst.write("source_context, test_context\n")
writer.writerows(sm.iter_records(semantic=semantic))
@cli.command()
@click.pass_context
@cli.command()
@click.pass_context
def to_coverage(ctx):
"""
Produce a .coverage file from a smother file
"""
sm = Smother.load(ctx.obj['report'])
sm.coverage = coverage.coverage()
sm.write_coverage()
|
ChrisBeaumont/smother | smother/cli.py | to_coverage | python | def to_coverage(ctx):
sm = Smother.load(ctx.obj['report'])
sm.coverage = coverage.coverage()
sm.write_coverage() | Produce a .coverage file from a smother file | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/cli.py#L124-L130 | [
"def write_coverage(self):\n # coverage won't write data if it hasn't been started.\n self.coverage.start()\n self.coverage.stop()\n data = {}\n for cover in six.itervalues(self.data):\n for path, lines in six.iteritems(cover):\n data.setdefault(path, {}).update(\n {line: None for line in lines}\n )\n\n self.coverage.collector.data = data\n self.coverage.save()\n",
"def load(cls, file_or_path):\n if isinstance(file_or_path, six.string_types):\n infile = open(file_or_path)\n else:\n infile = noclose(file_or_path)\n\n with infile as fh:\n data = json.load(fh)\n\n result = cls()\n result.data = data\n return result\n"
] | import csv as _csv
import os
import click
import coverage
from smother.control import Smother
from smother.git import GitDiffReporter
from smother.interval import parse_intervals
@click.group()
@click.option('--report', '-r', default='.smother', help='Smother report file')
@click.option(
'--semantic', '-s',
help='Map coverage to semantic blocks (functions and classes) '
'instead of individual line numbers.',
is_flag=True,
)
@click.option(
'--rcfile',
default=True,
help='Coverage config file'
)
@click.version_option()
@click.pass_context
def cli(ctx, report, semantic, rcfile):
"""
Query or manipulate smother reports
"""
ctx.obj = {
'report': report,
'semantic': semantic,
'rcfile': rcfile,
}
def _report_from_regions(regions, opts, **kwargs):
report_file = opts['report']
smother = Smother.load(report_file)
result = smother.query_context(regions, **kwargs)
result.report()
@cli.command()
@click.argument("path")
@click.pass_context
def lookup(ctx, path):
"""
Determine which tests intersect a source interval.
"""
regions = parse_intervals(path, as_context=ctx.obj['semantic'])
_report_from_regions(regions, ctx.obj)
@cli.command()
@click.argument("branch", default="")
@click.pass_context
def diff(ctx, branch):
"""
Determine which tests intersect a git diff.
"""
diff = GitDiffReporter(branch)
regions = diff.changed_intervals()
_report_from_regions(regions, ctx.obj, file_factory=diff.old_file)
@cli.command()
@click.argument('src', nargs=-1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
@click.pass_context
def combine(ctx, src, dst):
"""
Combine several smother reports.
"""
c = coverage.Coverage(config_file=ctx.obj['rcfile'])
result = Smother(c)
for infile in src:
result |= Smother.load(infile)
result.write(dst)
@cli.command()
@click.argument('src', nargs=1, type=click.File())
@click.argument('dst', nargs=1, type=click.Path())
def convert_to_relative_paths(src, dst):
"""
Converts all file paths in a smother report to relative paths, relative
to the current directory.
"""
result = Smother.convert_to_relative_paths(Smother.load(src))
result.write(dst)
@cli.command()
@click.argument('dst', type=click.File('w'))
@click.pass_context
def csv(ctx, dst):
"""
Flatten a coverage file into a CSV
of source_context, testname
"""
sm = Smother.load(ctx.obj['report'])
semantic = ctx.obj['semantic']
writer = _csv.writer(dst, lineterminator='\n')
dst.write("source_context, test_context\n")
writer.writerows(sm.iter_records(semantic=semantic))
@cli.command()
@click.pass_context
def erase(ctx):
"""
Erase the existing smother report.
"""
if os.path.exists(ctx.obj['report']):
os.remove(ctx.obj['report'])
@cli.command()
@click.pass_context
|
spotify/gordon-gcp | src/gordon_gcp/schema/validate.py | MessageValidator.validate | python | def validate(self, message, schema_name):
err = None
try:
jsonschema.validate(message, self.schemas[schema_name])
except KeyError:
msg = (f'Schema "{schema_name}" was not found (available: '
f'{", ".join(self.schemas.keys())})')
err = {'msg': msg}
except jsonschema.ValidationError as e:
msg = (f'Given message was not valid against the schema '
f'"{schema_name}": {e.message}')
err = {'msg': msg}
if err:
logging.error(**err)
raise exceptions.InvalidMessageError(err['msg']) | Validate a message given a schema.
Args:
message (dict): Loaded JSON of pulled message from Google
PubSub.
schema_name (str): Name of schema to validate ``message``
against. ``schema_name`` will be used to look up
schema from :py:attr:`.MessageValidator.schemas` dict
Raises:
InvalidMessageError: if message is invalid against the
given schema.
InvalidMessageError: if given schema name can not be found. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/schema/validate.py#L111-L141 | null | class MessageValidator:
"""Load packaged JSON schemas and validate a given JSON message.
Attributes:
schemas (dict): schema name based on filename mapped to its
loaded JSON schema.
Raises:
GCPGordonError: if unable to find or load schemas.
"""
HERE = os.path.dirname(__file__)
SCHEMA_DIR = 'schemas'
def __init__(self):
self.schemas = self._load_schemas()
def _load_schemas(self):
schema_path = pathlib.Path(self.HERE, self.SCHEMA_DIR).absolute()
schema_path_contents = schema_path.glob('*.json')
schemas = {}
for schema_file in schema_path_contents:
schema_name = schema_file.name.split('.')[0]
try:
with open(schema_file, 'r') as f:
schemas[schema_name] = json.load(f)
logging.info(f'Successfully loaded schema "{schema_name}".')
except (FileNotFoundError, json.JSONDecodeError) as e:
msg = f'Error loading schema "{schema_name}": {e}.'
logging.error(msg, exc_info=e)
raise exceptions.GCPGordonError(msg)
if not schemas:
msg = 'Unable to load any schemas.'
logging.error(msg)
raise exceptions.GCPGordonError(msg)
return schemas
|
spotify/gordon-gcp | src/gordon_gcp/clients/http.py | AIOConnection.valid_token_set | python | async def valid_token_set(self):
is_valid = False
if self._auth_client.token:
# Account for a token near expiration
now = datetime.datetime.utcnow()
skew = datetime.timedelta(seconds=60)
if self._auth_client.expiry > (now + skew):
is_valid = True
return is_valid | Check for validity of token, and refresh if none or expired. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/http.py#L72-L82 | null | class AIOConnection:
"""Async HTTP client to Google APIs with service-account-based auth.
Args:
auth_client (.GAuthClient): client to manage authentication for
HTTP API requests.
session (aiohttp.ClientSession): (optional) ``aiohttp`` HTTP
session to use for sending requests. Defaults to the session
object attached to :obj:`auth_client` if not provided.
"""
def __init__(self, auth_client=None, session=None):
self._auth_client = auth_client
self._session = session or auth_client._session
async def request(self, method, url, params=None, headers=None,
data=None, json=None, token_refresh_attempts=2,
**kwargs):
"""Make an asynchronous HTTP request.
Args:
method (str): HTTP method to use for the request.
url (str): URL to be requested.
params (dict): (optional) Query parameters for the request.
Defaults to ``None``.
headers (dict): (optional) HTTP headers to send with the
request. Headers pass through to the request will
include :attr:`DEFAULT_REQUEST_HEADERS`.
data (obj): (optional) A dictionary, bytes, or file-like
object to send in the body of the request.
json (obj): (optional) Any json compatible python
object.
NOTE: json and body parameters cannot be used at the same time.
token_refresh_attempts (int): (optional) Number of attempts a token
refresh should be performed.
Returns:
(str) HTTP response body.
Raises:
:exc:`.GCPHTTPError`: if any exception occurred,
specifically a :exc:`.GCPHTTPResponseError`, if the
exception is associated with a response status code.
"""
if all([data, json]):
msg = ('"data" and "json" request parameters can not be used '
'at the same time')
logging.warn(msg)
raise exceptions.GCPHTTPError(msg)
req_headers = headers or {}
req_headers.update(_utils.DEFAULT_REQUEST_HEADERS)
req_kwargs = {
'params': params,
'headers': req_headers,
}
if data:
req_kwargs['data'] = data
if json:
req_kwargs['json'] = json
if token_refresh_attempts:
if not await self.valid_token_set():
await self._auth_client.refresh_token()
token_refresh_attempts -= 1
req_headers.update(
{'Authorization': f'Bearer {self._auth_client.token}'}
)
request_id = kwargs.get('request_id', uuid.uuid4())
logging.debug(_utils.REQ_LOG_FMT.format(
request_id=request_id,
method=method.upper(),
url=url,
kwargs=req_kwargs))
try:
async with self._session.request(method, url, **req_kwargs) as resp:
log_kw = {
'request_id': request_id,
'method': method.upper(),
'url': resp.url,
'status': resp.status,
'reason': resp.reason
}
logging.debug(_utils.RESP_LOG_FMT.format(**log_kw))
if resp.status in REFRESH_STATUS_CODES:
logging.warning(
f'[{request_id}] HTTP Status Code {resp.status}'
f' returned requesting {resp.url}: {resp.reason}')
if token_refresh_attempts:
logging.info(
f'[{request_id}] Attempting request to {resp.url} '
'again.')
return await self.request(
method, url,
token_refresh_attempts=token_refresh_attempts,
request_id=request_id,
**req_kwargs)
logging.warning(
f'[{request_id}] Max attempts refreshing auth token '
f'exhausted while requesting {resp.url}')
resp.raise_for_status()
return await resp.text()
except aiohttp.ClientResponseError as e:
# bad HTTP status; avoid leaky abstractions and wrap HTTP errors
# with our own
msg = f'[{request_id}] HTTP error response from {resp.url}: {e}'
logging.error(msg, exc_info=e)
raise exceptions.GCPHTTPResponseError(msg, resp.status)
except exceptions.GCPHTTPResponseError as e:
# from recursive call
raise e
except Exception as e:
msg = f'[{request_id}] Request call failed: {e}'
logging.error(msg, exc_info=e)
raise exceptions.GCPHTTPError(msg)
async def get_json(self, url, json_callback=None, **kwargs):
"""Get a URL and return its JSON response.
Args:
url (str): URL to be requested.
json_callback (func): Custom JSON loader function. Defaults
to :meth:`json.loads`.
kwargs (dict): Additional arguments to pass through to the
request.
Returns:
response body returned by :func:`json_callback` function.
"""
if not json_callback:
json_callback = json.loads
response = await self.request(method='get', url=url, **kwargs)
return json_callback(response)
async def get_all(self, url, params=None):
"""Aggregate data from all pages of an API query.
Args:
url (str): Google API endpoint URL.
params (dict): (optional) URL query parameters.
Returns:
list: Parsed JSON query response results.
"""
if not params:
params = {}
items = []
next_page_token = None
while True:
if next_page_token:
params['pageToken'] = next_page_token
response = await self.get_json(url, params=params)
items.append(response)
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
return items
|
spotify/gordon-gcp | src/gordon_gcp/clients/http.py | AIOConnection.request | python | async def request(self, method, url, params=None, headers=None,
data=None, json=None, token_refresh_attempts=2,
**kwargs):
if all([data, json]):
msg = ('"data" and "json" request parameters can not be used '
'at the same time')
logging.warn(msg)
raise exceptions.GCPHTTPError(msg)
req_headers = headers or {}
req_headers.update(_utils.DEFAULT_REQUEST_HEADERS)
req_kwargs = {
'params': params,
'headers': req_headers,
}
if data:
req_kwargs['data'] = data
if json:
req_kwargs['json'] = json
if token_refresh_attempts:
if not await self.valid_token_set():
await self._auth_client.refresh_token()
token_refresh_attempts -= 1
req_headers.update(
{'Authorization': f'Bearer {self._auth_client.token}'}
)
request_id = kwargs.get('request_id', uuid.uuid4())
logging.debug(_utils.REQ_LOG_FMT.format(
request_id=request_id,
method=method.upper(),
url=url,
kwargs=req_kwargs))
try:
async with self._session.request(method, url, **req_kwargs) as resp:
log_kw = {
'request_id': request_id,
'method': method.upper(),
'url': resp.url,
'status': resp.status,
'reason': resp.reason
}
logging.debug(_utils.RESP_LOG_FMT.format(**log_kw))
if resp.status in REFRESH_STATUS_CODES:
logging.warning(
f'[{request_id}] HTTP Status Code {resp.status}'
f' returned requesting {resp.url}: {resp.reason}')
if token_refresh_attempts:
logging.info(
f'[{request_id}] Attempting request to {resp.url} '
'again.')
return await self.request(
method, url,
token_refresh_attempts=token_refresh_attempts,
request_id=request_id,
**req_kwargs)
logging.warning(
f'[{request_id}] Max attempts refreshing auth token '
f'exhausted while requesting {resp.url}')
resp.raise_for_status()
return await resp.text()
except aiohttp.ClientResponseError as e:
# bad HTTP status; avoid leaky abstractions and wrap HTTP errors
# with our own
msg = f'[{request_id}] HTTP error response from {resp.url}: {e}'
logging.error(msg, exc_info=e)
raise exceptions.GCPHTTPResponseError(msg, resp.status)
except exceptions.GCPHTTPResponseError as e:
# from recursive call
raise e
except Exception as e:
msg = f'[{request_id}] Request call failed: {e}'
logging.error(msg, exc_info=e)
raise exceptions.GCPHTTPError(msg) | Make an asynchronous HTTP request.
Args:
method (str): HTTP method to use for the request.
url (str): URL to be requested.
params (dict): (optional) Query parameters for the request.
Defaults to ``None``.
headers (dict): (optional) HTTP headers to send with the
request. Headers pass through to the request will
include :attr:`DEFAULT_REQUEST_HEADERS`.
data (obj): (optional) A dictionary, bytes, or file-like
object to send in the body of the request.
json (obj): (optional) Any json compatible python
object.
NOTE: json and body parameters cannot be used at the same time.
token_refresh_attempts (int): (optional) Number of attempts a token
refresh should be performed.
Returns:
(str) HTTP response body.
Raises:
:exc:`.GCPHTTPError`: if any exception occurred,
specifically a :exc:`.GCPHTTPResponseError`, if the
exception is associated with a response status code. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/http.py#L84-L189 | [
"async def valid_token_set(self):\n \"\"\"Check for validity of token, and refresh if none or expired.\"\"\"\n is_valid = False\n\n if self._auth_client.token:\n # Account for a token near expiration\n now = datetime.datetime.utcnow()\n skew = datetime.timedelta(seconds=60)\n if self._auth_client.expiry > (now + skew):\n is_valid = True\n return is_valid\n"
] | class AIOConnection:
"""Async HTTP client to Google APIs with service-account-based auth.
Args:
auth_client (.GAuthClient): client to manage authentication for
HTTP API requests.
session (aiohttp.ClientSession): (optional) ``aiohttp`` HTTP
session to use for sending requests. Defaults to the session
object attached to :obj:`auth_client` if not provided.
"""
def __init__(self, auth_client=None, session=None):
self._auth_client = auth_client
self._session = session or auth_client._session
async def valid_token_set(self):
"""Check for validity of token, and refresh if none or expired."""
is_valid = False
if self._auth_client.token:
# Account for a token near expiration
now = datetime.datetime.utcnow()
skew = datetime.timedelta(seconds=60)
if self._auth_client.expiry > (now + skew):
is_valid = True
return is_valid
async def get_json(self, url, json_callback=None, **kwargs):
"""Get a URL and return its JSON response.
Args:
url (str): URL to be requested.
json_callback (func): Custom JSON loader function. Defaults
to :meth:`json.loads`.
kwargs (dict): Additional arguments to pass through to the
request.
Returns:
response body returned by :func:`json_callback` function.
"""
if not json_callback:
json_callback = json.loads
response = await self.request(method='get', url=url, **kwargs)
return json_callback(response)
async def get_all(self, url, params=None):
"""Aggregate data from all pages of an API query.
Args:
url (str): Google API endpoint URL.
params (dict): (optional) URL query parameters.
Returns:
list: Parsed JSON query response results.
"""
if not params:
params = {}
items = []
next_page_token = None
while True:
if next_page_token:
params['pageToken'] = next_page_token
response = await self.get_json(url, params=params)
items.append(response)
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
return items
|
spotify/gordon-gcp | src/gordon_gcp/clients/http.py | AIOConnection.get_json | python | async def get_json(self, url, json_callback=None, **kwargs):
if not json_callback:
json_callback = json.loads
response = await self.request(method='get', url=url, **kwargs)
return json_callback(response) | Get a URL and return its JSON response.
Args:
url (str): URL to be requested.
json_callback (func): Custom JSON loader function. Defaults
to :meth:`json.loads`.
kwargs (dict): Additional arguments to pass through to the
request.
Returns:
response body returned by :func:`json_callback` function. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/http.py#L191-L206 | [
"async def request(self, method, url, params=None, headers=None,\n data=None, json=None, token_refresh_attempts=2,\n **kwargs):\n \"\"\"Make an asynchronous HTTP request.\n\n Args:\n method (str): HTTP method to use for the request.\n url (str): URL to be requested.\n params (dict): (optional) Query parameters for the request.\n Defaults to ``None``.\n headers (dict): (optional) HTTP headers to send with the\n request. Headers pass through to the request will\n include :attr:`DEFAULT_REQUEST_HEADERS`.\n data (obj): (optional) A dictionary, bytes, or file-like\n object to send in the body of the request.\n json (obj): (optional) Any json compatible python\n object.\n NOTE: json and body parameters cannot be used at the same time.\n token_refresh_attempts (int): (optional) Number of attempts a token\n refresh should be performed.\n Returns:\n (str) HTTP response body.\n Raises:\n :exc:`.GCPHTTPError`: if any exception occurred,\n specifically a :exc:`.GCPHTTPResponseError`, if the\n exception is associated with a response status code.\n\n \"\"\"\n if all([data, json]):\n msg = ('\"data\" and \"json\" request parameters can not be used '\n 'at the same time')\n logging.warn(msg)\n raise exceptions.GCPHTTPError(msg)\n\n req_headers = headers or {}\n req_headers.update(_utils.DEFAULT_REQUEST_HEADERS)\n req_kwargs = {\n 'params': params,\n 'headers': req_headers,\n }\n\n if data:\n req_kwargs['data'] = data\n if json:\n req_kwargs['json'] = json\n\n if token_refresh_attempts:\n if not await self.valid_token_set():\n await self._auth_client.refresh_token()\n token_refresh_attempts -= 1\n\n req_headers.update(\n {'Authorization': f'Bearer {self._auth_client.token}'}\n )\n\n request_id = kwargs.get('request_id', uuid.uuid4())\n logging.debug(_utils.REQ_LOG_FMT.format(\n request_id=request_id,\n method=method.upper(),\n url=url,\n kwargs=req_kwargs))\n try:\n async with self._session.request(method, url, **req_kwargs) as resp:\n log_kw = {\n 'request_id': request_id,\n 'method': method.upper(),\n 'url': resp.url,\n 'status': resp.status,\n 'reason': resp.reason\n }\n logging.debug(_utils.RESP_LOG_FMT.format(**log_kw))\n\n if resp.status in REFRESH_STATUS_CODES:\n logging.warning(\n f'[{request_id}] HTTP Status Code {resp.status}'\n f' returned requesting {resp.url}: {resp.reason}')\n if token_refresh_attempts:\n logging.info(\n f'[{request_id}] Attempting request to {resp.url} '\n 'again.')\n return await self.request(\n method, url,\n token_refresh_attempts=token_refresh_attempts,\n request_id=request_id,\n **req_kwargs)\n\n logging.warning(\n f'[{request_id}] Max attempts refreshing auth token '\n f'exhausted while requesting {resp.url}')\n\n resp.raise_for_status()\n\n return await resp.text()\n except aiohttp.ClientResponseError as e:\n # bad HTTP status; avoid leaky abstractions and wrap HTTP errors\n # with our own\n msg = f'[{request_id}] HTTP error response from {resp.url}: {e}'\n logging.error(msg, exc_info=e)\n raise exceptions.GCPHTTPResponseError(msg, resp.status)\n except exceptions.GCPHTTPResponseError as e:\n # from recursive call\n raise e\n except Exception as e:\n msg = f'[{request_id}] Request call failed: {e}'\n logging.error(msg, exc_info=e)\n raise exceptions.GCPHTTPError(msg)\n"
] | class AIOConnection:
"""Async HTTP client to Google APIs with service-account-based auth.
Args:
auth_client (.GAuthClient): client to manage authentication for
HTTP API requests.
session (aiohttp.ClientSession): (optional) ``aiohttp`` HTTP
session to use for sending requests. Defaults to the session
object attached to :obj:`auth_client` if not provided.
"""
def __init__(self, auth_client=None, session=None):
self._auth_client = auth_client
self._session = session or auth_client._session
async def valid_token_set(self):
"""Check for validity of token, and refresh if none or expired."""
is_valid = False
if self._auth_client.token:
# Account for a token near expiration
now = datetime.datetime.utcnow()
skew = datetime.timedelta(seconds=60)
if self._auth_client.expiry > (now + skew):
is_valid = True
return is_valid
async def request(self, method, url, params=None, headers=None,
data=None, json=None, token_refresh_attempts=2,
**kwargs):
"""Make an asynchronous HTTP request.
Args:
method (str): HTTP method to use for the request.
url (str): URL to be requested.
params (dict): (optional) Query parameters for the request.
Defaults to ``None``.
headers (dict): (optional) HTTP headers to send with the
request. Headers pass through to the request will
include :attr:`DEFAULT_REQUEST_HEADERS`.
data (obj): (optional) A dictionary, bytes, or file-like
object to send in the body of the request.
json (obj): (optional) Any json compatible python
object.
NOTE: json and body parameters cannot be used at the same time.
token_refresh_attempts (int): (optional) Number of attempts a token
refresh should be performed.
Returns:
(str) HTTP response body.
Raises:
:exc:`.GCPHTTPError`: if any exception occurred,
specifically a :exc:`.GCPHTTPResponseError`, if the
exception is associated with a response status code.
"""
if all([data, json]):
msg = ('"data" and "json" request parameters can not be used '
'at the same time')
logging.warn(msg)
raise exceptions.GCPHTTPError(msg)
req_headers = headers or {}
req_headers.update(_utils.DEFAULT_REQUEST_HEADERS)
req_kwargs = {
'params': params,
'headers': req_headers,
}
if data:
req_kwargs['data'] = data
if json:
req_kwargs['json'] = json
if token_refresh_attempts:
if not await self.valid_token_set():
await self._auth_client.refresh_token()
token_refresh_attempts -= 1
req_headers.update(
{'Authorization': f'Bearer {self._auth_client.token}'}
)
request_id = kwargs.get('request_id', uuid.uuid4())
logging.debug(_utils.REQ_LOG_FMT.format(
request_id=request_id,
method=method.upper(),
url=url,
kwargs=req_kwargs))
try:
async with self._session.request(method, url, **req_kwargs) as resp:
log_kw = {
'request_id': request_id,
'method': method.upper(),
'url': resp.url,
'status': resp.status,
'reason': resp.reason
}
logging.debug(_utils.RESP_LOG_FMT.format(**log_kw))
if resp.status in REFRESH_STATUS_CODES:
logging.warning(
f'[{request_id}] HTTP Status Code {resp.status}'
f' returned requesting {resp.url}: {resp.reason}')
if token_refresh_attempts:
logging.info(
f'[{request_id}] Attempting request to {resp.url} '
'again.')
return await self.request(
method, url,
token_refresh_attempts=token_refresh_attempts,
request_id=request_id,
**req_kwargs)
logging.warning(
f'[{request_id}] Max attempts refreshing auth token '
f'exhausted while requesting {resp.url}')
resp.raise_for_status()
return await resp.text()
except aiohttp.ClientResponseError as e:
# bad HTTP status; avoid leaky abstractions and wrap HTTP errors
# with our own
msg = f'[{request_id}] HTTP error response from {resp.url}: {e}'
logging.error(msg, exc_info=e)
raise exceptions.GCPHTTPResponseError(msg, resp.status)
except exceptions.GCPHTTPResponseError as e:
# from recursive call
raise e
except Exception as e:
msg = f'[{request_id}] Request call failed: {e}'
logging.error(msg, exc_info=e)
raise exceptions.GCPHTTPError(msg)
async def get_all(self, url, params=None):
"""Aggregate data from all pages of an API query.
Args:
url (str): Google API endpoint URL.
params (dict): (optional) URL query parameters.
Returns:
list: Parsed JSON query response results.
"""
if not params:
params = {}
items = []
next_page_token = None
while True:
if next_page_token:
params['pageToken'] = next_page_token
response = await self.get_json(url, params=params)
items.append(response)
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
return items
|
spotify/gordon-gcp | src/gordon_gcp/clients/http.py | AIOConnection.get_all | python | async def get_all(self, url, params=None):
if not params:
params = {}
items = []
next_page_token = None
while True:
if next_page_token:
params['pageToken'] = next_page_token
response = await self.get_json(url, params=params)
items.append(response)
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
return items | Aggregate data from all pages of an API query.
Args:
url (str): Google API endpoint URL.
params (dict): (optional) URL query parameters.
Returns:
list: Parsed JSON query response results. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/http.py#L208-L232 | [
"async def get_json(self, url, json_callback=None, **kwargs):\n \"\"\"Get a URL and return its JSON response.\n\n Args:\n url (str): URL to be requested.\n json_callback (func): Custom JSON loader function. Defaults\n to :meth:`json.loads`.\n kwargs (dict): Additional arguments to pass through to the\n request.\n Returns:\n response body returned by :func:`json_callback` function.\n \"\"\"\n if not json_callback:\n json_callback = json.loads\n response = await self.request(method='get', url=url, **kwargs)\n return json_callback(response)\n"
] | class AIOConnection:
"""Async HTTP client to Google APIs with service-account-based auth.
Args:
auth_client (.GAuthClient): client to manage authentication for
HTTP API requests.
session (aiohttp.ClientSession): (optional) ``aiohttp`` HTTP
session to use for sending requests. Defaults to the session
object attached to :obj:`auth_client` if not provided.
"""
def __init__(self, auth_client=None, session=None):
self._auth_client = auth_client
self._session = session or auth_client._session
async def valid_token_set(self):
"""Check for validity of token, and refresh if none or expired."""
is_valid = False
if self._auth_client.token:
# Account for a token near expiration
now = datetime.datetime.utcnow()
skew = datetime.timedelta(seconds=60)
if self._auth_client.expiry > (now + skew):
is_valid = True
return is_valid
async def request(self, method, url, params=None, headers=None,
data=None, json=None, token_refresh_attempts=2,
**kwargs):
"""Make an asynchronous HTTP request.
Args:
method (str): HTTP method to use for the request.
url (str): URL to be requested.
params (dict): (optional) Query parameters for the request.
Defaults to ``None``.
headers (dict): (optional) HTTP headers to send with the
request. Headers pass through to the request will
include :attr:`DEFAULT_REQUEST_HEADERS`.
data (obj): (optional) A dictionary, bytes, or file-like
object to send in the body of the request.
json (obj): (optional) Any json compatible python
object.
NOTE: json and body parameters cannot be used at the same time.
token_refresh_attempts (int): (optional) Number of attempts a token
refresh should be performed.
Returns:
(str) HTTP response body.
Raises:
:exc:`.GCPHTTPError`: if any exception occurred,
specifically a :exc:`.GCPHTTPResponseError`, if the
exception is associated with a response status code.
"""
if all([data, json]):
msg = ('"data" and "json" request parameters can not be used '
'at the same time')
logging.warn(msg)
raise exceptions.GCPHTTPError(msg)
req_headers = headers or {}
req_headers.update(_utils.DEFAULT_REQUEST_HEADERS)
req_kwargs = {
'params': params,
'headers': req_headers,
}
if data:
req_kwargs['data'] = data
if json:
req_kwargs['json'] = json
if token_refresh_attempts:
if not await self.valid_token_set():
await self._auth_client.refresh_token()
token_refresh_attempts -= 1
req_headers.update(
{'Authorization': f'Bearer {self._auth_client.token}'}
)
request_id = kwargs.get('request_id', uuid.uuid4())
logging.debug(_utils.REQ_LOG_FMT.format(
request_id=request_id,
method=method.upper(),
url=url,
kwargs=req_kwargs))
try:
async with self._session.request(method, url, **req_kwargs) as resp:
log_kw = {
'request_id': request_id,
'method': method.upper(),
'url': resp.url,
'status': resp.status,
'reason': resp.reason
}
logging.debug(_utils.RESP_LOG_FMT.format(**log_kw))
if resp.status in REFRESH_STATUS_CODES:
logging.warning(
f'[{request_id}] HTTP Status Code {resp.status}'
f' returned requesting {resp.url}: {resp.reason}')
if token_refresh_attempts:
logging.info(
f'[{request_id}] Attempting request to {resp.url} '
'again.')
return await self.request(
method, url,
token_refresh_attempts=token_refresh_attempts,
request_id=request_id,
**req_kwargs)
logging.warning(
f'[{request_id}] Max attempts refreshing auth token '
f'exhausted while requesting {resp.url}')
resp.raise_for_status()
return await resp.text()
except aiohttp.ClientResponseError as e:
# bad HTTP status; avoid leaky abstractions and wrap HTTP errors
# with our own
msg = f'[{request_id}] HTTP error response from {resp.url}: {e}'
logging.error(msg, exc_info=e)
raise exceptions.GCPHTTPResponseError(msg, resp.status)
except exceptions.GCPHTTPResponseError as e:
# from recursive call
raise e
except Exception as e:
msg = f'[{request_id}] Request call failed: {e}'
logging.error(msg, exc_info=e)
raise exceptions.GCPHTTPError(msg)
async def get_json(self, url, json_callback=None, **kwargs):
"""Get a URL and return its JSON response.
Args:
url (str): URL to be requested.
json_callback (func): Custom JSON loader function. Defaults
to :meth:`json.loads`.
kwargs (dict): Additional arguments to pass through to the
request.
Returns:
response body returned by :func:`json_callback` function.
"""
if not json_callback:
json_callback = json.loads
response = await self.request(method='get', url=url, **kwargs)
return json_callback(response)
|
spotify/gordon-gcp | src/gordon_gcp/clients/gce.py | GCEClient.list_instances | python | async def list_instances(self,
project,
page_size=100,
instance_filter=None):
url = (f'{self.BASE_URL}{self.api_version}/projects/{project}'
'/aggregated/instances')
params = {'maxResults': page_size}
if instance_filter:
params['filter'] = instance_filter
responses = await self.list_all(url, params)
instances = self._parse_rsps_for_instances(responses)
return instances | Fetch all instances in a GCE project.
You can find the endpoint documentation `here <https://cloud.
google.com/compute/docs/reference/latest/instances/
aggregatedList>`__.
Args:
project (str): unique, user-provided project ID.
page_size (int): hint for the client to only retrieve up to
this number of results per API call.
instance_filter (str): endpoint-specific filter string used
to retrieve a subset of instances. This is passed
directly to the endpoint's "filter" URL query parameter.
Returns:
list(dicts): data of all instances in the given
:obj:`project` | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/gce.py#L89-L118 | [
"async def list_all(self, url, params):\n \"\"\"Aggregate data from all pages of an API query.\n\n Args:\n url (str): Google API endpoint URL.\n params (dict): URL query parameters.\n Returns:\n list: parsed query response results.\n \"\"\"\n items = []\n next_page_token = None\n\n while True:\n if next_page_token:\n params['pageToken'] = next_page_token\n response = await self.get_json(url, params=params)\n\n items.append(response)\n next_page_token = response.get('nextPageToken')\n if not next_page_token:\n break\n return items\n",
"def _parse_rsps_for_instances(self, responses):\n instances = []\n for response in responses:\n for zone in response.get('items', {}).values():\n instances.extend(self._filter_zone_instances(zone))\n return instances\n"
] | class GCEClient(http.AIOConnection, _utils.GPaginatorMixin):
"""Async client to interact with Google Cloud Compute API.
Attributes:
BASE_URL (str): base compute endpoint URL.
Args:
auth_client (.GAuthClient): client to manage authentication for
HTTP API requests.
session (aiohttp.ClientSession): (optional) ``aiohttp`` HTTP
session to use for sending requests. Defaults to the
session object attached to :obj:`auth_client` if not
provided.
api_version (str): version of API endpoint to send requests to.
blacklisted_tags (list): Do not collect an instance if it has
been tagged with any of these.
blacklisted_metadata (list): Do not collect an instance if its
metadata key:val matches a {key:val} dict in this list.
"""
BASE_URL = 'https://www.googleapis.com/compute/'
def __init__(self,
auth_client=None,
session=None,
api_version='v1',
blacklisted_tags=None,
blacklisted_metadata=None):
super().__init__(auth_client=auth_client, session=session)
self.api_version = api_version
self.blacklisted_tags = blacklisted_tags or []
self.blacklisted_metadata = blacklisted_metadata or []
def _parse_rsps_for_instances(self, responses):
instances = []
for response in responses:
for zone in response.get('items', {}).values():
instances.extend(self._filter_zone_instances(zone))
return instances
def _filter_zone_instances(self, zone):
instances = []
for instance in zone.get('instances', []):
if not any([
self._blacklisted_by_tag(instance),
self._blacklisted_by_metadata(instance)
]):
instances.append(instance)
return instances
def _blacklisted_by_tag(self, instance):
instance_tags = instance.get('tags', {}).get('items', [])
for tag in instance_tags:
if tag in self.blacklisted_tags:
msg = (f'Instance "{instance["name"]}" filtered out for '
f'blacklisted tag: "{tag}"')
logging.debug(msg)
return True
return False
def _blacklisted_by_metadata(self, instance):
# NOTE: Both key and value are used when comparing the instance and
# blacklist metadata.
instance_metadata = instance.get('metadata', {}).get('items', [])
for metadata in instance_metadata:
for bl_meta in self.blacklisted_metadata:
if bl_meta.get(metadata['key']) == metadata['value']:
msg = (f'Instance "{instance["name"]}" filtered out for '
f'blacklisted metadata: "{bl_meta}"')
logging.debug(msg)
return True
return False
|
spotify/gordon-gcp | src/gordon_gcp/schema/parse.py | MessageParser.parse | python | def parse(self, message, schema):
func = {
'audit-log': self._parse_audit_log_msg,
'event': self._parse_event_msg,
}[schema]
return func(message) | Parse message according to schema.
`message` should already be validated against the given schema.
See :ref:`schemadef` for more information.
Args:
message (dict): message data to parse.
schema (str): valid message schema.
Returns:
(dict): parsed message | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/schema/parse.py#L69-L85 | null | class MessageParser:
"""Parse a message provided a given GCP schema."""
ACTION_MAPPER = {
'v1.compute.instances.insert': 'additions',
'v1.compute.instances.delete': 'deletions',
}
def _parse_audit_log_msg(self, message):
# TODO (lynn): what about first vs last messages?
payload = message['protoPayload']
data = {
'action': self.ACTION_MAPPER[payload['methodName']],
'resourceName': payload['resourceName'],
'resourceRecords': [],
'timestamp': message['timestamp'],
}
return data
def _parse_event_msg(self, message):
# TODO (lynn): potentially should update schema to require a
# list of records rather than shoehorning it here
message['resourceRecords'] = [message['resourceRecords']]
return message
|
spotify/gordon-gcp | src/gordon_gcp/plugins/janitor/__init__.py | get_gpubsub_publisher | python | def get_gpubsub_publisher(config, metrics, changes_channel, **kw):
builder = gpubsub_publisher.GPubsubPublisherBuilder(
config, metrics, changes_channel, **kw)
return builder.build_publisher() | Get a GPubsubPublisher client.
A factory function that validates configuration, creates an auth
and pubsub API client, and returns a Google Pub/Sub Publisher
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
changes_channel (asyncio.Queue): Queue to publish message to
make corrections to Cloud DNS.
kw (dict): Additional keyword arguments to pass to the
Publisher.
Returns:
A :class:`GPubsubPublisher` instance. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/plugins/janitor/__init__.py#L34-L53 | [
"def build_publisher(self):\n self._validate_config()\n auth_client = self._init_auth()\n pubsub_client = self._init_client(auth_client)\n return GPubsubPublisher(\n self.config, self.metrics, pubsub_client, self.changes_channel,\n **self.kwargs)\n"
] | # -*- coding: utf-8 -*-
#
# Copyright 2018 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gordon_gcp.plugins.janitor import authority
from gordon_gcp.plugins.janitor import gpubsub_publisher
from gordon_gcp.plugins.janitor import reconciler
# Mainly for easier documentation reading
from gordon_gcp.plugins.janitor.authority import * # noqa: F401, F403
from gordon_gcp.plugins.janitor.gpubsub_publisher import * # noqa: F401, F403
from gordon_gcp.plugins.janitor.reconciler import * # noqa: F401, F403
__all__ = (
authority.__all__ + # noqa: F405
gpubsub_publisher.__all__ + # noqa: F405
reconciler.__all__ + # noqa: F405
('get_gpubsub_publisher', 'get_reconciler', 'get_authority')
)
def get_reconciler(config, metrics, rrset_channel, changes_channel, **kw):
"""Get a GDNSReconciler client.
A factory function that validates configuration, creates an auth
and :class:`GDNSClient` instance, and returns a GDNSReconciler
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
rrset_channel (asyncio.Queue): Queue from which to consume
record set messages to validate.
changes_channel (asyncio.Queue): Queue to publish message to
make corrections to Cloud DNS.
kw (dict): Additional keyword arguments to pass to the
Reconciler.
Returns:
A :class:`GDNSReconciler` instance.
"""
builder = reconciler.GDNSReconcilerBuilder(
config, metrics, rrset_channel, changes_channel, **kw)
return builder.build_reconciler()
def get_authority(config, metrics, rrset_channel, **kwargs):
"""Get a GCEAuthority client.
A factory function that validates configuration and creates a
proper GCEAuthority.
Args:
config (dict): GCEAuthority related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
rrset_channel (asyncio.Queue): Queue used for sending messages
to the reconciler plugin.
kw (dict): Additional keyword arguments to pass to the
Authority.
Returns:
A :class:`GCEAuthority` instance.
"""
builder = authority.GCEAuthorityBuilder(
config, metrics, rrset_channel, **kwargs)
return builder.build_authority()
|
spotify/gordon-gcp | src/gordon_gcp/plugins/janitor/__init__.py | get_reconciler | python | def get_reconciler(config, metrics, rrset_channel, changes_channel, **kw):
builder = reconciler.GDNSReconcilerBuilder(
config, metrics, rrset_channel, changes_channel, **kw)
return builder.build_reconciler() | Get a GDNSReconciler client.
A factory function that validates configuration, creates an auth
and :class:`GDNSClient` instance, and returns a GDNSReconciler
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
rrset_channel (asyncio.Queue): Queue from which to consume
record set messages to validate.
changes_channel (asyncio.Queue): Queue to publish message to
make corrections to Cloud DNS.
kw (dict): Additional keyword arguments to pass to the
Reconciler.
Returns:
A :class:`GDNSReconciler` instance. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/plugins/janitor/__init__.py#L56-L77 | [
"def build_reconciler(self):\n self._validate_config()\n auth_client = self._init_auth()\n dns_client = self._init_client(auth_client)\n return GDNSReconciler(\n self.config, self.metrics, dns_client, self.rrset_channel,\n self.changes_channel, **self.kwargs)\n"
] | # -*- coding: utf-8 -*-
#
# Copyright 2018 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gordon_gcp.plugins.janitor import authority
from gordon_gcp.plugins.janitor import gpubsub_publisher
from gordon_gcp.plugins.janitor import reconciler
# Mainly for easier documentation reading
from gordon_gcp.plugins.janitor.authority import * # noqa: F401, F403
from gordon_gcp.plugins.janitor.gpubsub_publisher import * # noqa: F401, F403
from gordon_gcp.plugins.janitor.reconciler import * # noqa: F401, F403
__all__ = (
authority.__all__ + # noqa: F405
gpubsub_publisher.__all__ + # noqa: F405
reconciler.__all__ + # noqa: F405
('get_gpubsub_publisher', 'get_reconciler', 'get_authority')
)
def get_gpubsub_publisher(config, metrics, changes_channel, **kw):
"""Get a GPubsubPublisher client.
A factory function that validates configuration, creates an auth
and pubsub API client, and returns a Google Pub/Sub Publisher
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
changes_channel (asyncio.Queue): Queue to publish message to
make corrections to Cloud DNS.
kw (dict): Additional keyword arguments to pass to the
Publisher.
Returns:
A :class:`GPubsubPublisher` instance.
"""
builder = gpubsub_publisher.GPubsubPublisherBuilder(
config, metrics, changes_channel, **kw)
return builder.build_publisher()
def get_authority(config, metrics, rrset_channel, **kwargs):
"""Get a GCEAuthority client.
A factory function that validates configuration and creates a
proper GCEAuthority.
Args:
config (dict): GCEAuthority related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
rrset_channel (asyncio.Queue): Queue used for sending messages
to the reconciler plugin.
kw (dict): Additional keyword arguments to pass to the
Authority.
Returns:
A :class:`GCEAuthority` instance.
"""
builder = authority.GCEAuthorityBuilder(
config, metrics, rrset_channel, **kwargs)
return builder.build_authority()
|
spotify/gordon-gcp | src/gordon_gcp/plugins/janitor/__init__.py | get_authority | python | def get_authority(config, metrics, rrset_channel, **kwargs):
builder = authority.GCEAuthorityBuilder(
config, metrics, rrset_channel, **kwargs)
return builder.build_authority() | Get a GCEAuthority client.
A factory function that validates configuration and creates a
proper GCEAuthority.
Args:
config (dict): GCEAuthority related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
rrset_channel (asyncio.Queue): Queue used for sending messages
to the reconciler plugin.
kw (dict): Additional keyword arguments to pass to the
Authority.
Returns:
A :class:`GCEAuthority` instance. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/plugins/janitor/__init__.py#L80-L98 | [
"def build_authority(self):\n self._validate_config()\n keyfile_path = self.config['keyfile']\n scopes = self.config.get('scopes')\n self.session = aiohttp.ClientSession()\n crm_client = self._get_crm_client(keyfile_path, scopes)\n gce_client = self._get_gce_client(keyfile_path, scopes)\n\n return GCEAuthority(self.config, self.metrics, crm_client, gce_client,\n self.rrset_channel, **self.kwargs)\n"
] | # -*- coding: utf-8 -*-
#
# Copyright 2018 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gordon_gcp.plugins.janitor import authority
from gordon_gcp.plugins.janitor import gpubsub_publisher
from gordon_gcp.plugins.janitor import reconciler
# Mainly for easier documentation reading
from gordon_gcp.plugins.janitor.authority import * # noqa: F401, F403
from gordon_gcp.plugins.janitor.gpubsub_publisher import * # noqa: F401, F403
from gordon_gcp.plugins.janitor.reconciler import * # noqa: F401, F403
__all__ = (
authority.__all__ + # noqa: F405
gpubsub_publisher.__all__ + # noqa: F405
reconciler.__all__ + # noqa: F405
('get_gpubsub_publisher', 'get_reconciler', 'get_authority')
)
def get_gpubsub_publisher(config, metrics, changes_channel, **kw):
"""Get a GPubsubPublisher client.
A factory function that validates configuration, creates an auth
and pubsub API client, and returns a Google Pub/Sub Publisher
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
changes_channel (asyncio.Queue): Queue to publish message to
make corrections to Cloud DNS.
kw (dict): Additional keyword arguments to pass to the
Publisher.
Returns:
A :class:`GPubsubPublisher` instance.
"""
builder = gpubsub_publisher.GPubsubPublisherBuilder(
config, metrics, changes_channel, **kw)
return builder.build_publisher()
def get_reconciler(config, metrics, rrset_channel, changes_channel, **kw):
"""Get a GDNSReconciler client.
A factory function that validates configuration, creates an auth
and :class:`GDNSClient` instance, and returns a GDNSReconciler
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
rrset_channel (asyncio.Queue): Queue from which to consume
record set messages to validate.
changes_channel (asyncio.Queue): Queue to publish message to
make corrections to Cloud DNS.
kw (dict): Additional keyword arguments to pass to the
Reconciler.
Returns:
A :class:`GDNSReconciler` instance.
"""
builder = reconciler.GDNSReconcilerBuilder(
config, metrics, rrset_channel, changes_channel, **kw)
return builder.build_reconciler()
|
spotify/gordon-gcp | src/gordon_gcp/clients/auth.py | GAuthClient.refresh_token | python | async def refresh_token(self):
url, headers, body = self._setup_token_request()
request_id = uuid.uuid4()
logging.debug(_utils.REQ_LOG_FMT.format(
request_id=request_id, method='POST', url=url, kwargs=None))
async with self._session.post(url, headers=headers, data=body) as resp:
log_kw = {
'request_id': request_id,
'method': 'POST',
'url': resp.url,
'status': resp.status,
'reason': resp.reason,
}
logging.debug(_utils.RESP_LOG_FMT.format(**log_kw))
# avoid leaky abstractions and wrap http errors with our own
try:
resp.raise_for_status()
except aiohttp.ClientResponseError as e:
msg = f'[{request_id}] Issue connecting to {resp.url}: {e}'
logging.error(msg, exc_info=e)
raise exceptions.GCPHTTPResponseError(msg, resp.status)
response = await resp.json()
try:
self.token = response['access_token']
except KeyError:
msg = '[{request_id}] No access token in response.'
logging.error(msg)
raise exceptions.GCPAuthError(msg)
self.expiry = _client._parse_expiry(response) | Refresh oauth access token attached to this HTTP session.
Raises:
:exc:`.GCPAuthError`: if no token was found in the
response.
:exc:`.GCPHTTPError`: if any exception occurred,
specifically a :exc:`.GCPHTTPResponseError`, if the
exception is associated with a response status code. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/auth.py#L168-L208 | [
"def _setup_token_request(self):\n url = self.creds._token_uri\n\n headers = _utils.DEFAULT_REQUEST_HEADERS.copy()\n headers.update(\n {'Content-type': 'application/x-www-form-urlencoded'}\n )\n body = self._setup_request_body()\n body = urllib.parse.urlencode(body)\n return url, headers, bytes(body.encode('utf-8'))\n"
] | class GAuthClient:
"""Async client to authenticate against Google Cloud APIs.
Attributes:
SCOPE_TMPL_URL (str): template URL for Google auth scopes.
DEFAULT_SCOPE (str): default scope if not provided.
JWT_GRANT_TYPE (str): grant type header value when
requesting/refreshing an access token.
Args:
keyfile (str): path to service account (SA) keyfile.
scopes (list): (optional) scopes with which to authorize the SA.
Default is ``'cloud-platform'``.
session (aiohttp.ClientSession): (optional) ``aiohttp`` HTTP
session to use for sending requests.
loop: (optional) asyncio event loop to use for HTTP requests.
NOTE: if :obj:`session` is given, then :obj:`loop` will be
ignored. Otherwise, :obj:`loop` will be used to create a
session, if provided.
"""
SCOPE_TMPL_URL = 'https://www.googleapis.com/auth/{scope}'
DEFAULT_SCOPE = 'cloud-platform'
JWT_GRANT_TYPE = 'urn:ietf:params:oauth:grant-type:jwt-bearer'
def __init__(self, keyfile=None, scopes=None, session=None, loop=None):
self._keydata = self._load_keyfile(keyfile)
self.scopes = self._set_scopes(scopes)
self.creds = self._load_credentials()
self._session = self._set_session(session, loop)
self.token = None
self.expiry = None # UTC time
def _load_keyfile(self, keyfile):
if not keyfile:
return None
try:
with open(keyfile, 'r') as f:
return json.load(f)
except FileNotFoundError as e:
msg = f'Keyfile {keyfile} was not found.'
logging.error(msg, exc_info=e)
raise exceptions.GCPGordonError(msg)
except json.JSONDecodeError as e:
msg = f'Keyfile {keyfile} is not valid JSON.'
logging.error(msg, exc_info=e)
raise exceptions.GCPGordonError(msg)
def _set_scopes(self, scopes):
if not scopes:
scopes = [self.DEFAULT_SCOPE]
return [self.SCOPE_TMPL_URL.format(scope=s) for s in scopes]
def _load_credentials(self):
# load credentials with two options:
# 1. using key data 2. using Application Default Credentials
if self._keydata:
return service_account.Credentials.from_service_account_info(
self._keydata, scopes=self.scopes)
credentials, _ = gauth.default(
scopes=['https://www.googleapis.com/auth/userinfo.email'])
return credentials
def _set_session(self, session, loop):
if session is not None:
return session
if not loop:
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession(loop=loop)
return session
def _setup_token_request(self):
url = self.creds._token_uri
headers = _utils.DEFAULT_REQUEST_HEADERS.copy()
headers.update(
{'Content-type': 'application/x-www-form-urlencoded'}
)
body = self._setup_request_body()
body = urllib.parse.urlencode(body)
return url, headers, bytes(body.encode('utf-8'))
def _setup_request_body(self):
if self._keydata:
return {
'assertion': self.creds._make_authorization_grant_assertion(),
'grant_type': self.JWT_GRANT_TYPE,
}
return {
'refresh_token': self.creds._refresh_token,
'client_id': self.creds._client_id,
'client_secret': self.creds._client_secret,
'grant_type': 'refresh_token'
}
|
spotify/gordon-gcp | src/gordon_gcp/clients/gdns.py | GDNSClient.get_managed_zone | python | def get_managed_zone(self, zone):
if zone.endswith('.in-addr.arpa.'):
return self.reverse_prefix + '-'.join(zone.split('.')[-5:-3])
return self.forward_prefix + '-'.join(zone.split('.')[:-1]) | Get the GDNS managed zone name for a DNS zone.
Google uses custom string names with specific `requirements
<https://cloud.google.com/dns/api/v1/managedZones#resource>`_
for storing records. The scheme implemented here chooses a
managed zone name which removes the trailing dot and replaces
other dots with dashes, and in the case of reverse records,
uses only the two most significant octets, prepended with
'reverse'. At least two octets are required for reverse DNS zones.
Example:
get_managed_zone('example.com.') = 'example-com'
get_managed_zone('20.10.in-addr.arpa.) = 'reverse-20-10'
get_managed_zone('30.20.10.in-addr.arpa.) = 'reverse-20-10'
get_managed_zone('40.30.20.10.in-addr.arpa.) = 'reverse-20-10'
Args:
zone (str): DNS zone.
Returns:
str of managed zone name. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/gdns.py#L81-L106 | null | class GDNSClient(http.AIOConnection):
"""Async HTTP client to interact with Google Cloud DNS API.
Attributes:
BASE_URL (str): base call url for the DNS API
Args:
project (str): Google project ID that hosts the managed DNS.
auth_client (.GAuthClient): client to manage authentication for
HTTP API requests.
api_version (str): DNS API endpoint version. Defaults to ``v1``.
session (aiohttp.ClientSession): (optional) ``aiohttp`` HTTP
session to use for sending requests. Defaults to the session
object attached to :obj:`auth_client` if not provided.
"""
BASE_URL = 'https://www.googleapis.com/dns'
# see https://cloud.google.com/dns/api/v1/changes#resource
DNS_CHANGES_DONE = 'done'
REVERSE_PREFIX = 'reverse-'
def __init__(self, project=None, auth_client=None, api_version='v1',
session=None, default_zone_prefix=None):
super().__init__(auth_client=auth_client, session=session)
self.project = project
self._base_url = f'{self.BASE_URL}/{api_version}/projects/{project}'
prefix = f'{default_zone_prefix}-' if default_zone_prefix else ''
self.forward_prefix = f'{prefix}'
self.reverse_prefix = f'{prefix}{self.REVERSE_PREFIX}'
async def get_records_for_zone(self, dns_zone, params=None):
"""Get all resource record sets for a managed zone, using the DNS zone.
Args:
dns_zone (str): Desired DNS zone to query.
params (dict): (optional) Additional query parameters for HTTP
requests to the GDNS API.
Returns:
list of dicts representing rrsets.
"""
managed_zone = self.get_managed_zone(dns_zone)
url = f'{self._base_url}/managedZones/{managed_zone}/rrsets'
if not params:
params = {}
if 'fields' not in params:
# Get only the fields we care about
params['fields'] = ('rrsets/name,rrsets/kind,rrsets/rrdatas,'
'rrsets/type,rrsets/ttl,nextPageToken')
next_page_token = None
records = []
while True:
if next_page_token:
params['pageToken'] = next_page_token
response = await self.get_json(url, params=params)
records.extend(response['rrsets'])
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
logging.info(f'Found {len(records)} rrsets for zone "{dns_zone}".')
return records
async def is_change_done(self, zone, change_id):
"""Check if a DNS change has completed.
Args:
zone (str): DNS zone of the change.
change_id (str): Identifier of the change.
Returns:
Boolean
"""
zone_id = self.get_managed_zone(zone)
url = f'{self._base_url}/managedZones/{zone_id}/changes/{change_id}'
resp = await self.get_json(url)
return resp['status'] == self.DNS_CHANGES_DONE
async def publish_changes(self, zone, changes):
"""Post changes to a zone.
Args:
zone (str): DNS zone of the change.
changes (dict): JSON compatible dict of a `Change
<https://cloud.google.com/dns/api/v1/changes>`_.
Returns:
string identifier of the change.
"""
zone_id = self.get_managed_zone(zone)
url = f'{self._base_url}/managedZones/{zone_id}/changes'
resp = await self.request('post', url, json=changes)
return json.loads(resp)['id']
|
spotify/gordon-gcp | src/gordon_gcp/clients/gdns.py | GDNSClient.get_records_for_zone | python | async def get_records_for_zone(self, dns_zone, params=None):
managed_zone = self.get_managed_zone(dns_zone)
url = f'{self._base_url}/managedZones/{managed_zone}/rrsets'
if not params:
params = {}
if 'fields' not in params:
# Get only the fields we care about
params['fields'] = ('rrsets/name,rrsets/kind,rrsets/rrdatas,'
'rrsets/type,rrsets/ttl,nextPageToken')
next_page_token = None
records = []
while True:
if next_page_token:
params['pageToken'] = next_page_token
response = await self.get_json(url, params=params)
records.extend(response['rrsets'])
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
logging.info(f'Found {len(records)} rrsets for zone "{dns_zone}".')
return records | Get all resource record sets for a managed zone, using the DNS zone.
Args:
dns_zone (str): Desired DNS zone to query.
params (dict): (optional) Additional query parameters for HTTP
requests to the GDNS API.
Returns:
list of dicts representing rrsets. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/gdns.py#L108-L141 | [
"def get_managed_zone(self, zone):\n \"\"\"Get the GDNS managed zone name for a DNS zone.\n\n Google uses custom string names with specific `requirements\n <https://cloud.google.com/dns/api/v1/managedZones#resource>`_\n for storing records. The scheme implemented here chooses a\n managed zone name which removes the trailing dot and replaces\n other dots with dashes, and in the case of reverse records,\n uses only the two most significant octets, prepended with\n 'reverse'. At least two octets are required for reverse DNS zones.\n\n Example:\n get_managed_zone('example.com.') = 'example-com'\n get_managed_zone('20.10.in-addr.arpa.) = 'reverse-20-10'\n get_managed_zone('30.20.10.in-addr.arpa.) = 'reverse-20-10'\n get_managed_zone('40.30.20.10.in-addr.arpa.) = 'reverse-20-10'\n\n Args:\n zone (str): DNS zone.\n Returns:\n str of managed zone name.\n\n \"\"\"\n if zone.endswith('.in-addr.arpa.'):\n return self.reverse_prefix + '-'.join(zone.split('.')[-5:-3])\n return self.forward_prefix + '-'.join(zone.split('.')[:-1])\n",
"async def get_json(self, url, json_callback=None, **kwargs):\n \"\"\"Get a URL and return its JSON response.\n\n Args:\n url (str): URL to be requested.\n json_callback (func): Custom JSON loader function. Defaults\n to :meth:`json.loads`.\n kwargs (dict): Additional arguments to pass through to the\n request.\n Returns:\n response body returned by :func:`json_callback` function.\n \"\"\"\n if not json_callback:\n json_callback = json.loads\n response = await self.request(method='get', url=url, **kwargs)\n return json_callback(response)\n"
] | class GDNSClient(http.AIOConnection):
"""Async HTTP client to interact with Google Cloud DNS API.
Attributes:
BASE_URL (str): base call url for the DNS API
Args:
project (str): Google project ID that hosts the managed DNS.
auth_client (.GAuthClient): client to manage authentication for
HTTP API requests.
api_version (str): DNS API endpoint version. Defaults to ``v1``.
session (aiohttp.ClientSession): (optional) ``aiohttp`` HTTP
session to use for sending requests. Defaults to the session
object attached to :obj:`auth_client` if not provided.
"""
BASE_URL = 'https://www.googleapis.com/dns'
# see https://cloud.google.com/dns/api/v1/changes#resource
DNS_CHANGES_DONE = 'done'
REVERSE_PREFIX = 'reverse-'
def __init__(self, project=None, auth_client=None, api_version='v1',
session=None, default_zone_prefix=None):
super().__init__(auth_client=auth_client, session=session)
self.project = project
self._base_url = f'{self.BASE_URL}/{api_version}/projects/{project}'
prefix = f'{default_zone_prefix}-' if default_zone_prefix else ''
self.forward_prefix = f'{prefix}'
self.reverse_prefix = f'{prefix}{self.REVERSE_PREFIX}'
def get_managed_zone(self, zone):
"""Get the GDNS managed zone name for a DNS zone.
Google uses custom string names with specific `requirements
<https://cloud.google.com/dns/api/v1/managedZones#resource>`_
for storing records. The scheme implemented here chooses a
managed zone name which removes the trailing dot and replaces
other dots with dashes, and in the case of reverse records,
uses only the two most significant octets, prepended with
'reverse'. At least two octets are required for reverse DNS zones.
Example:
get_managed_zone('example.com.') = 'example-com'
get_managed_zone('20.10.in-addr.arpa.) = 'reverse-20-10'
get_managed_zone('30.20.10.in-addr.arpa.) = 'reverse-20-10'
get_managed_zone('40.30.20.10.in-addr.arpa.) = 'reverse-20-10'
Args:
zone (str): DNS zone.
Returns:
str of managed zone name.
"""
if zone.endswith('.in-addr.arpa.'):
return self.reverse_prefix + '-'.join(zone.split('.')[-5:-3])
return self.forward_prefix + '-'.join(zone.split('.')[:-1])
async def is_change_done(self, zone, change_id):
"""Check if a DNS change has completed.
Args:
zone (str): DNS zone of the change.
change_id (str): Identifier of the change.
Returns:
Boolean
"""
zone_id = self.get_managed_zone(zone)
url = f'{self._base_url}/managedZones/{zone_id}/changes/{change_id}'
resp = await self.get_json(url)
return resp['status'] == self.DNS_CHANGES_DONE
async def publish_changes(self, zone, changes):
"""Post changes to a zone.
Args:
zone (str): DNS zone of the change.
changes (dict): JSON compatible dict of a `Change
<https://cloud.google.com/dns/api/v1/changes>`_.
Returns:
string identifier of the change.
"""
zone_id = self.get_managed_zone(zone)
url = f'{self._base_url}/managedZones/{zone_id}/changes'
resp = await self.request('post', url, json=changes)
return json.loads(resp)['id']
|
spotify/gordon-gcp | src/gordon_gcp/clients/gdns.py | GDNSClient.is_change_done | python | async def is_change_done(self, zone, change_id):
zone_id = self.get_managed_zone(zone)
url = f'{self._base_url}/managedZones/{zone_id}/changes/{change_id}'
resp = await self.get_json(url)
return resp['status'] == self.DNS_CHANGES_DONE | Check if a DNS change has completed.
Args:
zone (str): DNS zone of the change.
change_id (str): Identifier of the change.
Returns:
Boolean | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/gdns.py#L143-L155 | [
"def get_managed_zone(self, zone):\n \"\"\"Get the GDNS managed zone name for a DNS zone.\n\n Google uses custom string names with specific `requirements\n <https://cloud.google.com/dns/api/v1/managedZones#resource>`_\n for storing records. The scheme implemented here chooses a\n managed zone name which removes the trailing dot and replaces\n other dots with dashes, and in the case of reverse records,\n uses only the two most significant octets, prepended with\n 'reverse'. At least two octets are required for reverse DNS zones.\n\n Example:\n get_managed_zone('example.com.') = 'example-com'\n get_managed_zone('20.10.in-addr.arpa.) = 'reverse-20-10'\n get_managed_zone('30.20.10.in-addr.arpa.) = 'reverse-20-10'\n get_managed_zone('40.30.20.10.in-addr.arpa.) = 'reverse-20-10'\n\n Args:\n zone (str): DNS zone.\n Returns:\n str of managed zone name.\n\n \"\"\"\n if zone.endswith('.in-addr.arpa.'):\n return self.reverse_prefix + '-'.join(zone.split('.')[-5:-3])\n return self.forward_prefix + '-'.join(zone.split('.')[:-1])\n",
"async def get_json(self, url, json_callback=None, **kwargs):\n \"\"\"Get a URL and return its JSON response.\n\n Args:\n url (str): URL to be requested.\n json_callback (func): Custom JSON loader function. Defaults\n to :meth:`json.loads`.\n kwargs (dict): Additional arguments to pass through to the\n request.\n Returns:\n response body returned by :func:`json_callback` function.\n \"\"\"\n if not json_callback:\n json_callback = json.loads\n response = await self.request(method='get', url=url, **kwargs)\n return json_callback(response)\n"
] | class GDNSClient(http.AIOConnection):
"""Async HTTP client to interact with Google Cloud DNS API.
Attributes:
BASE_URL (str): base call url for the DNS API
Args:
project (str): Google project ID that hosts the managed DNS.
auth_client (.GAuthClient): client to manage authentication for
HTTP API requests.
api_version (str): DNS API endpoint version. Defaults to ``v1``.
session (aiohttp.ClientSession): (optional) ``aiohttp`` HTTP
session to use for sending requests. Defaults to the session
object attached to :obj:`auth_client` if not provided.
"""
BASE_URL = 'https://www.googleapis.com/dns'
# see https://cloud.google.com/dns/api/v1/changes#resource
DNS_CHANGES_DONE = 'done'
REVERSE_PREFIX = 'reverse-'
def __init__(self, project=None, auth_client=None, api_version='v1',
session=None, default_zone_prefix=None):
super().__init__(auth_client=auth_client, session=session)
self.project = project
self._base_url = f'{self.BASE_URL}/{api_version}/projects/{project}'
prefix = f'{default_zone_prefix}-' if default_zone_prefix else ''
self.forward_prefix = f'{prefix}'
self.reverse_prefix = f'{prefix}{self.REVERSE_PREFIX}'
def get_managed_zone(self, zone):
"""Get the GDNS managed zone name for a DNS zone.
Google uses custom string names with specific `requirements
<https://cloud.google.com/dns/api/v1/managedZones#resource>`_
for storing records. The scheme implemented here chooses a
managed zone name which removes the trailing dot and replaces
other dots with dashes, and in the case of reverse records,
uses only the two most significant octets, prepended with
'reverse'. At least two octets are required for reverse DNS zones.
Example:
get_managed_zone('example.com.') = 'example-com'
get_managed_zone('20.10.in-addr.arpa.) = 'reverse-20-10'
get_managed_zone('30.20.10.in-addr.arpa.) = 'reverse-20-10'
get_managed_zone('40.30.20.10.in-addr.arpa.) = 'reverse-20-10'
Args:
zone (str): DNS zone.
Returns:
str of managed zone name.
"""
if zone.endswith('.in-addr.arpa.'):
return self.reverse_prefix + '-'.join(zone.split('.')[-5:-3])
return self.forward_prefix + '-'.join(zone.split('.')[:-1])
async def get_records_for_zone(self, dns_zone, params=None):
"""Get all resource record sets for a managed zone, using the DNS zone.
Args:
dns_zone (str): Desired DNS zone to query.
params (dict): (optional) Additional query parameters for HTTP
requests to the GDNS API.
Returns:
list of dicts representing rrsets.
"""
managed_zone = self.get_managed_zone(dns_zone)
url = f'{self._base_url}/managedZones/{managed_zone}/rrsets'
if not params:
params = {}
if 'fields' not in params:
# Get only the fields we care about
params['fields'] = ('rrsets/name,rrsets/kind,rrsets/rrdatas,'
'rrsets/type,rrsets/ttl,nextPageToken')
next_page_token = None
records = []
while True:
if next_page_token:
params['pageToken'] = next_page_token
response = await self.get_json(url, params=params)
records.extend(response['rrsets'])
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
logging.info(f'Found {len(records)} rrsets for zone "{dns_zone}".')
return records
async def publish_changes(self, zone, changes):
"""Post changes to a zone.
Args:
zone (str): DNS zone of the change.
changes (dict): JSON compatible dict of a `Change
<https://cloud.google.com/dns/api/v1/changes>`_.
Returns:
string identifier of the change.
"""
zone_id = self.get_managed_zone(zone)
url = f'{self._base_url}/managedZones/{zone_id}/changes'
resp = await self.request('post', url, json=changes)
return json.loads(resp)['id']
|
spotify/gordon-gcp | src/gordon_gcp/clients/gdns.py | GDNSClient.publish_changes | python | async def publish_changes(self, zone, changes):
zone_id = self.get_managed_zone(zone)
url = f'{self._base_url}/managedZones/{zone_id}/changes'
resp = await self.request('post', url, json=changes)
return json.loads(resp)['id'] | Post changes to a zone.
Args:
zone (str): DNS zone of the change.
changes (dict): JSON compatible dict of a `Change
<https://cloud.google.com/dns/api/v1/changes>`_.
Returns:
string identifier of the change. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/gdns.py#L157-L170 | [
"def get_managed_zone(self, zone):\n \"\"\"Get the GDNS managed zone name for a DNS zone.\n\n Google uses custom string names with specific `requirements\n <https://cloud.google.com/dns/api/v1/managedZones#resource>`_\n for storing records. The scheme implemented here chooses a\n managed zone name which removes the trailing dot and replaces\n other dots with dashes, and in the case of reverse records,\n uses only the two most significant octets, prepended with\n 'reverse'. At least two octets are required for reverse DNS zones.\n\n Example:\n get_managed_zone('example.com.') = 'example-com'\n get_managed_zone('20.10.in-addr.arpa.) = 'reverse-20-10'\n get_managed_zone('30.20.10.in-addr.arpa.) = 'reverse-20-10'\n get_managed_zone('40.30.20.10.in-addr.arpa.) = 'reverse-20-10'\n\n Args:\n zone (str): DNS zone.\n Returns:\n str of managed zone name.\n\n \"\"\"\n if zone.endswith('.in-addr.arpa.'):\n return self.reverse_prefix + '-'.join(zone.split('.')[-5:-3])\n return self.forward_prefix + '-'.join(zone.split('.')[:-1])\n",
"async def request(self, method, url, params=None, headers=None,\n data=None, json=None, token_refresh_attempts=2,\n **kwargs):\n \"\"\"Make an asynchronous HTTP request.\n\n Args:\n method (str): HTTP method to use for the request.\n url (str): URL to be requested.\n params (dict): (optional) Query parameters for the request.\n Defaults to ``None``.\n headers (dict): (optional) HTTP headers to send with the\n request. Headers pass through to the request will\n include :attr:`DEFAULT_REQUEST_HEADERS`.\n data (obj): (optional) A dictionary, bytes, or file-like\n object to send in the body of the request.\n json (obj): (optional) Any json compatible python\n object.\n NOTE: json and body parameters cannot be used at the same time.\n token_refresh_attempts (int): (optional) Number of attempts a token\n refresh should be performed.\n Returns:\n (str) HTTP response body.\n Raises:\n :exc:`.GCPHTTPError`: if any exception occurred,\n specifically a :exc:`.GCPHTTPResponseError`, if the\n exception is associated with a response status code.\n\n \"\"\"\n if all([data, json]):\n msg = ('\"data\" and \"json\" request parameters can not be used '\n 'at the same time')\n logging.warn(msg)\n raise exceptions.GCPHTTPError(msg)\n\n req_headers = headers or {}\n req_headers.update(_utils.DEFAULT_REQUEST_HEADERS)\n req_kwargs = {\n 'params': params,\n 'headers': req_headers,\n }\n\n if data:\n req_kwargs['data'] = data\n if json:\n req_kwargs['json'] = json\n\n if token_refresh_attempts:\n if not await self.valid_token_set():\n await self._auth_client.refresh_token()\n token_refresh_attempts -= 1\n\n req_headers.update(\n {'Authorization': f'Bearer {self._auth_client.token}'}\n )\n\n request_id = kwargs.get('request_id', uuid.uuid4())\n logging.debug(_utils.REQ_LOG_FMT.format(\n request_id=request_id,\n method=method.upper(),\n url=url,\n kwargs=req_kwargs))\n try:\n async with self._session.request(method, url, **req_kwargs) as resp:\n log_kw = {\n 'request_id': request_id,\n 'method': method.upper(),\n 'url': resp.url,\n 'status': resp.status,\n 'reason': resp.reason\n }\n logging.debug(_utils.RESP_LOG_FMT.format(**log_kw))\n\n if resp.status in REFRESH_STATUS_CODES:\n logging.warning(\n f'[{request_id}] HTTP Status Code {resp.status}'\n f' returned requesting {resp.url}: {resp.reason}')\n if token_refresh_attempts:\n logging.info(\n f'[{request_id}] Attempting request to {resp.url} '\n 'again.')\n return await self.request(\n method, url,\n token_refresh_attempts=token_refresh_attempts,\n request_id=request_id,\n **req_kwargs)\n\n logging.warning(\n f'[{request_id}] Max attempts refreshing auth token '\n f'exhausted while requesting {resp.url}')\n\n resp.raise_for_status()\n\n return await resp.text()\n except aiohttp.ClientResponseError as e:\n # bad HTTP status; avoid leaky abstractions and wrap HTTP errors\n # with our own\n msg = f'[{request_id}] HTTP error response from {resp.url}: {e}'\n logging.error(msg, exc_info=e)\n raise exceptions.GCPHTTPResponseError(msg, resp.status)\n except exceptions.GCPHTTPResponseError as e:\n # from recursive call\n raise e\n except Exception as e:\n msg = f'[{request_id}] Request call failed: {e}'\n logging.error(msg, exc_info=e)\n raise exceptions.GCPHTTPError(msg)\n"
] | class GDNSClient(http.AIOConnection):
"""Async HTTP client to interact with Google Cloud DNS API.
Attributes:
BASE_URL (str): base call url for the DNS API
Args:
project (str): Google project ID that hosts the managed DNS.
auth_client (.GAuthClient): client to manage authentication for
HTTP API requests.
api_version (str): DNS API endpoint version. Defaults to ``v1``.
session (aiohttp.ClientSession): (optional) ``aiohttp`` HTTP
session to use for sending requests. Defaults to the session
object attached to :obj:`auth_client` if not provided.
"""
BASE_URL = 'https://www.googleapis.com/dns'
# see https://cloud.google.com/dns/api/v1/changes#resource
DNS_CHANGES_DONE = 'done'
REVERSE_PREFIX = 'reverse-'
def __init__(self, project=None, auth_client=None, api_version='v1',
session=None, default_zone_prefix=None):
super().__init__(auth_client=auth_client, session=session)
self.project = project
self._base_url = f'{self.BASE_URL}/{api_version}/projects/{project}'
prefix = f'{default_zone_prefix}-' if default_zone_prefix else ''
self.forward_prefix = f'{prefix}'
self.reverse_prefix = f'{prefix}{self.REVERSE_PREFIX}'
def get_managed_zone(self, zone):
"""Get the GDNS managed zone name for a DNS zone.
Google uses custom string names with specific `requirements
<https://cloud.google.com/dns/api/v1/managedZones#resource>`_
for storing records. The scheme implemented here chooses a
managed zone name which removes the trailing dot and replaces
other dots with dashes, and in the case of reverse records,
uses only the two most significant octets, prepended with
'reverse'. At least two octets are required for reverse DNS zones.
Example:
get_managed_zone('example.com.') = 'example-com'
get_managed_zone('20.10.in-addr.arpa.) = 'reverse-20-10'
get_managed_zone('30.20.10.in-addr.arpa.) = 'reverse-20-10'
get_managed_zone('40.30.20.10.in-addr.arpa.) = 'reverse-20-10'
Args:
zone (str): DNS zone.
Returns:
str of managed zone name.
"""
if zone.endswith('.in-addr.arpa.'):
return self.reverse_prefix + '-'.join(zone.split('.')[-5:-3])
return self.forward_prefix + '-'.join(zone.split('.')[:-1])
async def get_records_for_zone(self, dns_zone, params=None):
"""Get all resource record sets for a managed zone, using the DNS zone.
Args:
dns_zone (str): Desired DNS zone to query.
params (dict): (optional) Additional query parameters for HTTP
requests to the GDNS API.
Returns:
list of dicts representing rrsets.
"""
managed_zone = self.get_managed_zone(dns_zone)
url = f'{self._base_url}/managedZones/{managed_zone}/rrsets'
if not params:
params = {}
if 'fields' not in params:
# Get only the fields we care about
params['fields'] = ('rrsets/name,rrsets/kind,rrsets/rrdatas,'
'rrsets/type,rrsets/ttl,nextPageToken')
next_page_token = None
records = []
while True:
if next_page_token:
params['pageToken'] = next_page_token
response = await self.get_json(url, params=params)
records.extend(response['rrsets'])
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
logging.info(f'Found {len(records)} rrsets for zone "{dns_zone}".')
return records
async def is_change_done(self, zone, change_id):
"""Check if a DNS change has completed.
Args:
zone (str): DNS zone of the change.
change_id (str): Identifier of the change.
Returns:
Boolean
"""
zone_id = self.get_managed_zone(zone)
url = f'{self._base_url}/managedZones/{zone_id}/changes/{change_id}'
resp = await self.get_json(url)
return resp['status'] == self.DNS_CHANGES_DONE
|
spotify/gordon-gcp | src/gordon_gcp/plugins/service/__init__.py | get_event_consumer | python | def get_event_consumer(config, success_channel, error_channel, metrics,
**kwargs):
builder = event_consumer.GPSEventConsumerBuilder(
config, success_channel, error_channel, metrics, **kwargs)
return builder.build_event_consumer() | Get a GPSEventConsumer client.
A factory function that validates configuration, creates schema
validator and parser clients, creates an auth and a pubsub client,
and returns an event consumer (:interface:`gordon.interfaces.
IRunnable` and :interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
success_channel (asyncio.Queue): Queue to place a successfully
consumed message to be further handled by the ``gordon``
core system.
error_channel (asyncio.Queue): Queue to place a message met
with errors to be further handled by the ``gordon`` core
system.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
event consumer.
Returns:
A :class:`GPSEventConsumer` instance. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/plugins/service/__init__.py#L34-L60 | [
"def build_event_consumer(self):\n self._validate_config()\n validator = validate.MessageValidator()\n parser = parse.MessageParser()\n auth_client = self._init_auth()\n subscriber, flow_control = self._init_subscriber_client(auth_client)\n if not self.kwargs.get('loop'):\n self.kwargs['loop'] = asyncio.get_event_loop()\n\n return GPSEventConsumer(\n self.config, self.success_channel, self.error_channel,\n self.metrics, subscriber, flow_control, validator, parser,\n **self.kwargs\n )\n"
] | # -*- coding: utf-8 -*-
#
# Copyright 2018 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gordon_gcp.plugins.service import enricher
from gordon_gcp.plugins.service import event_consumer
from gordon_gcp.plugins.service import gdns_publisher
# Mainly for easier documentation reading
from gordon_gcp.plugins.service.enricher import * # noqa: F401,F403
from gordon_gcp.plugins.service.event_consumer import * # noqa: F401,F403
from gordon_gcp.plugins.service.gdns_publisher import * # noqa: F401,F403
__all__ = (
enricher.__all__ + # noqa: F405
event_consumer.__all__ + # noqa: F405
gdns_publisher.__all__ + # noqa: F405
('get_event_consumer', 'get_enricher', 'get_gdns_publisher')
)
def get_enricher(config, metrics, **kwargs):
"""Get a GCEEnricher client.
A factory function that validates configuration and returns an
enricher client (:interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Compute Engine API related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
enricher.
Returns:
A :class:`GCEEnricher` instance.
"""
builder = enricher.GCEEnricherBuilder(
config, metrics, **kwargs)
return builder.build_enricher()
def get_gdns_publisher(config, metrics, **kwargs):
"""Get a GDNSPublisher client.
A factory function that validates configuration and returns a
publisher client (:interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud DNS API related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
publisher.
Returns:
A :class:`GDNSPublisher` instance.
"""
builder = gdns_publisher.GDNSPublisherBuilder(
config, metrics, **kwargs)
return builder.build_publisher()
|
spotify/gordon-gcp | src/gordon_gcp/plugins/service/__init__.py | get_enricher | python | def get_enricher(config, metrics, **kwargs):
builder = enricher.GCEEnricherBuilder(
config, metrics, **kwargs)
return builder.build_enricher() | Get a GCEEnricher client.
A factory function that validates configuration and returns an
enricher client (:interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Compute Engine API related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
enricher.
Returns:
A :class:`GCEEnricher` instance. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/plugins/service/__init__.py#L63-L80 | [
"def build_enricher(self):\n return GCEEnricher(self.config, self.metrics, self.http_client,\n self.dns_client, **self.kwargs)\n"
] | # -*- coding: utf-8 -*-
#
# Copyright 2018 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gordon_gcp.plugins.service import enricher
from gordon_gcp.plugins.service import event_consumer
from gordon_gcp.plugins.service import gdns_publisher
# Mainly for easier documentation reading
from gordon_gcp.plugins.service.enricher import * # noqa: F401,F403
from gordon_gcp.plugins.service.event_consumer import * # noqa: F401,F403
from gordon_gcp.plugins.service.gdns_publisher import * # noqa: F401,F403
__all__ = (
enricher.__all__ + # noqa: F405
event_consumer.__all__ + # noqa: F405
gdns_publisher.__all__ + # noqa: F405
('get_event_consumer', 'get_enricher', 'get_gdns_publisher')
)
def get_event_consumer(config, success_channel, error_channel, metrics,
**kwargs):
"""Get a GPSEventConsumer client.
A factory function that validates configuration, creates schema
validator and parser clients, creates an auth and a pubsub client,
and returns an event consumer (:interface:`gordon.interfaces.
IRunnable` and :interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
success_channel (asyncio.Queue): Queue to place a successfully
consumed message to be further handled by the ``gordon``
core system.
error_channel (asyncio.Queue): Queue to place a message met
with errors to be further handled by the ``gordon`` core
system.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
event consumer.
Returns:
A :class:`GPSEventConsumer` instance.
"""
builder = event_consumer.GPSEventConsumerBuilder(
config, success_channel, error_channel, metrics, **kwargs)
return builder.build_event_consumer()
def get_gdns_publisher(config, metrics, **kwargs):
"""Get a GDNSPublisher client.
A factory function that validates configuration and returns a
publisher client (:interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud DNS API related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
publisher.
Returns:
A :class:`GDNSPublisher` instance.
"""
builder = gdns_publisher.GDNSPublisherBuilder(
config, metrics, **kwargs)
return builder.build_publisher()
|
spotify/gordon-gcp | src/gordon_gcp/plugins/service/__init__.py | get_gdns_publisher | python | def get_gdns_publisher(config, metrics, **kwargs):
builder = gdns_publisher.GDNSPublisherBuilder(
config, metrics, **kwargs)
return builder.build_publisher() | Get a GDNSPublisher client.
A factory function that validates configuration and returns a
publisher client (:interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud DNS API related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
publisher.
Returns:
A :class:`GDNSPublisher` instance. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/plugins/service/__init__.py#L83-L100 | [
"def build_publisher(self):\n self._validate_config()\n dns_client = self._init_dns_client()\n return GDNSPublisher(\n self.config, self.metrics, dns_client, **self.kwargs)\n"
] | # -*- coding: utf-8 -*-
#
# Copyright 2018 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gordon_gcp.plugins.service import enricher
from gordon_gcp.plugins.service import event_consumer
from gordon_gcp.plugins.service import gdns_publisher
# Mainly for easier documentation reading
from gordon_gcp.plugins.service.enricher import * # noqa: F401,F403
from gordon_gcp.plugins.service.event_consumer import * # noqa: F401,F403
from gordon_gcp.plugins.service.gdns_publisher import * # noqa: F401,F403
__all__ = (
enricher.__all__ + # noqa: F405
event_consumer.__all__ + # noqa: F405
gdns_publisher.__all__ + # noqa: F405
('get_event_consumer', 'get_enricher', 'get_gdns_publisher')
)
def get_event_consumer(config, success_channel, error_channel, metrics,
**kwargs):
"""Get a GPSEventConsumer client.
A factory function that validates configuration, creates schema
validator and parser clients, creates an auth and a pubsub client,
and returns an event consumer (:interface:`gordon.interfaces.
IRunnable` and :interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
success_channel (asyncio.Queue): Queue to place a successfully
consumed message to be further handled by the ``gordon``
core system.
error_channel (asyncio.Queue): Queue to place a message met
with errors to be further handled by the ``gordon`` core
system.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
event consumer.
Returns:
A :class:`GPSEventConsumer` instance.
"""
builder = event_consumer.GPSEventConsumerBuilder(
config, success_channel, error_channel, metrics, **kwargs)
return builder.build_event_consumer()
def get_enricher(config, metrics, **kwargs):
"""Get a GCEEnricher client.
A factory function that validates configuration and returns an
enricher client (:interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Compute Engine API related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
enricher.
Returns:
A :class:`GCEEnricher` instance.
"""
builder = enricher.GCEEnricherBuilder(
config, metrics, **kwargs)
return builder.build_enricher()
|
spotify/gordon-gcp | src/gordon_gcp/clients/gcrm.py | GCRMClient.list_all_active_projects | python | async def list_all_active_projects(self, page_size=1000):
url = f'{self.BASE_URL}/{self.api_version}/projects'
params = {'pageSize': page_size}
responses = await self.list_all(url, params)
projects = self._parse_rsps_for_projects(responses)
return [
project for project in projects
if project.get('lifecycleState', '').lower() == 'active'
] | Get all active projects.
You can find the endpoint documentation `here <https://cloud.
google.com/resource-manager/reference/rest/v1/projects/list>`__.
Args:
page_size (int): hint for the client to only retrieve up to
this number of results per API call.
Returns:
list(dicts): all active projects | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/gcrm.py#L85-L105 | [
"async def list_all(self, url, params):\n \"\"\"Aggregate data from all pages of an API query.\n\n Args:\n url (str): Google API endpoint URL.\n params (dict): URL query parameters.\n Returns:\n list: parsed query response results.\n \"\"\"\n items = []\n next_page_token = None\n\n while True:\n if next_page_token:\n params['pageToken'] = next_page_token\n response = await self.get_json(url, params=params)\n\n items.append(response)\n next_page_token = response.get('nextPageToken')\n if not next_page_token:\n break\n return items\n",
"def _parse_rsps_for_projects(self, responses):\n projects = []\n for response in responses:\n for project in response.get('projects', []):\n projects.append(project)\n return projects\n"
] | class GCRMClient(http.AIOConnection, _utils.GPaginatorMixin):
"""Async client to interact with Google Cloud Resource Manager API.
You can find the endpoint documentation `here <https://cloud.google.
com/resource-manager/reference/rest/#rest-resource-v1projects>`__.
Attributes:
BASE_URL (str): Base endpoint URL.
Args:
auth_client (.GAuthClient): client to manage authentication for
HTTP API requests.
session (aiohttp.ClientSession): (optional) ``aiohttp`` HTTP
session to use for sending requests. Defaults to the
session object attached to :obj:`auth_client` if not provided.
api_version (str): version of API endpoint to send requests to.
"""
BASE_URL = 'https://cloudresourcemanager.googleapis.com'
def __init__(self, auth_client=None, session=None, api_version='v1'):
super().__init__(auth_client=auth_client, session=session)
self.api_version = api_version
def _parse_rsps_for_projects(self, responses):
projects = []
for response in responses:
for project in response.get('projects', []):
projects.append(project)
return projects
|
spotify/gordon-gcp | src/gordon_gcp/plugins/service/enricher.py | GCEEnricherBuilder._call_validators | python | def _call_validators(self):
msg = []
msg.extend(self._validate_keyfile())
msg.extend(self._validate_dns_zone())
msg.extend(self._validate_retries())
msg.extend(self._validate_project())
return msg | Actually run all the validations.
Returns:
list(str): Error messages from the validators. | train | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/plugins/service/enricher.py#L93-L104 | null | class GCEEnricherBuilder:
"""Build and configure a :class:`GCEEnricher` object.
Args:
config (dict): Google Compute Engine API related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
enricher.
"""
def __init__(self, config, metrics, **kwargs):
self.config = config
self.metrics = metrics
self.kwargs = kwargs
self._validate_config()
self.http_client = self._init_http_client()
self.dns_client = self._init_dns_client()
def _validate_keyfile(self):
msg = []
if not self.config.get('keyfile'):
msg.append('The path to a Service Account JSON keyfile is required '
'to authenticate to the GCE API.')
return msg
def _validate_dns_zone(self):
msg = []
if not self.config.get('dns_zone'):
msg.append('A dns zone is required to build correct A records.')
if not self.config.get('dns_zone', '').endswith('.'):
msg.append('A dns zone must be an FQDN and end with the root '
'zone (".").')
return msg
def _validate_retries(self):
if not self.config.get('retries'):
self.config['retries'] = 5
return []
def _validate_project(self):
msg = []
if not self.config.get('project'):
msg.append('The GCP project that contains the Google Cloud DNS '
'managed zone is required to correctly delete A records '
'for deleted instances.')
return msg
def _validate_config(self):
errors = []
errors = self._call_validators()
if errors:
error_msgs = '\n'.join(errors)
exp_msg = f'Invalid configuration:\n{error_msgs}'
logging.error(error_msgs)
raise exceptions.GCPConfigError(exp_msg)
def _init_auth(self):
scopes = self.config.get('scopes')
return auth.GAuthClient(keyfile=self.config['keyfile'],
scopes=scopes)
def _init_http_client(self):
return http.AIOConnection(auth_client=self._init_auth())
def _init_dns_client(self):
return gdns.GDNSClient(
self.config['project'], self._init_auth(),
default_zone_prefix=self.config.get('default_zone_prefix', ''))
def build_enricher(self):
return GCEEnricher(self.config, self.metrics, self.http_client,
self.dns_client, **self.kwargs)
|
costastf/toonlib | _CI/bin/bump.py | get_arguments | python | def get_arguments():
# https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser(
description='Handles bumping of the artifact version')
parser.add_argument('--log-config',
'-l',
action='store',
dest='logger_config',
help='The location of the logging config json file',
default='')
parser.add_argument('--log-level',
'-L',
help='Provide the log level. Defaults to INFO.',
dest='log_level',
action='store',
default='INFO',
choices=['DEBUG',
'INFO',
'WARNING',
'ERROR',
'CRITICAL'])
parser.add_argument('--major',
help='Bump the major version',
dest='bump_major',
action='store_true',
default=False)
parser.add_argument('--minor',
help='Bump the minor version',
dest='bump_minor',
action='store_true',
default=False)
parser.add_argument('--patch',
help='Bump the patch version',
dest='bump_patch',
action='store_true',
default=False)
parser.add_argument('--version',
help='Set the version',
dest='version',
action='store',
default=False)
args = parser.parse_args()
return args | This get us the cli arguments.
Returns the args as parsed from the argsparser. | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/_CI/bin/bump.py#L21-L68 | null | #!/usr/bin/env python2.7
import sys
import os
import argparse
modules_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'modules'
)
)
sys.path.insert(0, modules_path)
import semver
import logging
LOGGER = logging.getLogger(__name__)
#sys.tracebacklimit = 0
def setup_logging(args):
"""
This sets up the logging.
Needs the args to get the log level supplied
:param args: The command line arguments
"""
handler = logging.StreamHandler()
handler.setLevel(args.log_level)
formatter = logging.Formatter(('%(asctime)s - '
'%(name)s - '
'%(levelname)s - '
'%(message)s'))
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
def main():
"""
Main method.
This method holds what you want to execute when
the script is run on command line.
"""
args = get_arguments()
setup_logging(args)
version_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..',
'..',
'.VERSION'
))
try:
version_text = open(version_path).read().strip()
except Exception:
print ('Could not open or read the .VERSION file')
sys.exit(1)
try:
semver.parse(version_text)
except ValueError:
print ('The .VERSION file contains an invalid version: "{0}"'.format(
version_text
))
sys.exit(1)
new_version = version_text
if args.version:
try:
if semver.parse(args.version):
new_version = args.version
except Exception:
print ('Could not parse "{0}" as a version'.format(
args.version
))
sys.exit(1)
elif args.bump_major:
new_version = semver.bump_major(version_text)
elif args.bump_minor:
new_version = semver.bump_minor(version_text)
elif args.bump_patch:
new_version = semver.bump_patch(version_text)
try:
with open(version_path, 'w') as version_file:
version_file.write(new_version)
except Exception:
print ('Could not write the .VERSION file')
sys.exit(1)
print new_version
if __name__ == '__main__':
main()
|
costastf/toonlib | _CI/bin/bump.py | setup_logging | python | def setup_logging(args):
handler = logging.StreamHandler()
handler.setLevel(args.log_level)
formatter = logging.Formatter(('%(asctime)s - '
'%(name)s - '
'%(levelname)s - '
'%(message)s'))
handler.setFormatter(formatter)
LOGGER.addHandler(handler) | This sets up the logging.
Needs the args to get the log level supplied
:param args: The command line arguments | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/_CI/bin/bump.py#L71-L85 | null | #!/usr/bin/env python2.7
import sys
import os
import argparse
modules_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'modules'
)
)
sys.path.insert(0, modules_path)
import semver
import logging
LOGGER = logging.getLogger(__name__)
#sys.tracebacklimit = 0
def get_arguments():
"""
This get us the cli arguments.
Returns the args as parsed from the argsparser.
"""
# https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser(
description='Handles bumping of the artifact version')
parser.add_argument('--log-config',
'-l',
action='store',
dest='logger_config',
help='The location of the logging config json file',
default='')
parser.add_argument('--log-level',
'-L',
help='Provide the log level. Defaults to INFO.',
dest='log_level',
action='store',
default='INFO',
choices=['DEBUG',
'INFO',
'WARNING',
'ERROR',
'CRITICAL'])
parser.add_argument('--major',
help='Bump the major version',
dest='bump_major',
action='store_true',
default=False)
parser.add_argument('--minor',
help='Bump the minor version',
dest='bump_minor',
action='store_true',
default=False)
parser.add_argument('--patch',
help='Bump the patch version',
dest='bump_patch',
action='store_true',
default=False)
parser.add_argument('--version',
help='Set the version',
dest='version',
action='store',
default=False)
args = parser.parse_args()
return args
def main():
"""
Main method.
This method holds what you want to execute when
the script is run on command line.
"""
args = get_arguments()
setup_logging(args)
version_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..',
'..',
'.VERSION'
))
try:
version_text = open(version_path).read().strip()
except Exception:
print ('Could not open or read the .VERSION file')
sys.exit(1)
try:
semver.parse(version_text)
except ValueError:
print ('The .VERSION file contains an invalid version: "{0}"'.format(
version_text
))
sys.exit(1)
new_version = version_text
if args.version:
try:
if semver.parse(args.version):
new_version = args.version
except Exception:
print ('Could not parse "{0}" as a version'.format(
args.version
))
sys.exit(1)
elif args.bump_major:
new_version = semver.bump_major(version_text)
elif args.bump_minor:
new_version = semver.bump_minor(version_text)
elif args.bump_patch:
new_version = semver.bump_patch(version_text)
try:
with open(version_path, 'w') as version_file:
version_file.write(new_version)
except Exception:
print ('Could not write the .VERSION file')
sys.exit(1)
print new_version
if __name__ == '__main__':
main()
|
costastf/toonlib | toonlib/toonlib.py | Toon._authenticate | python | def _authenticate(self):
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data) | Authenticates to the api and sets up client information. | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L75-L85 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon._logout | python | def _logout(self, reset=True):
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False | Log out of the API. | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L127-L136 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon._state | python | def _state(self):
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_ | The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state. | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L162-L188 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.smokedetectors | python | def smokedetectors(self):
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])] | :return: A list of smokedetector objects modeled as named tuples | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L225-L234 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.get_smokedetector_by_name | python | def get_smokedetector_by_name(self, name):
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None) | Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L236-L243 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.lights | python | def lights(self):
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')] | :return: A list of light objects | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L246-L251 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.get_light_by_name | python | def get_light_by_name(self, name):
return next((light for light in self.lights
if light.name.lower() == name.lower()), None) | Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L253-L260 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.smartplugs | python | def smartplugs(self):
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')] | :return: A list of smartplug objects. | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L263-L268 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.get_smartplug_by_name | python | def get_smartplug_by_name(self, name):
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None) | Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L270-L277 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.gas | python | def gas(self):
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value')) | :return: A gas object modeled as a named tuple | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L280-L289 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.power | python | def power(self):
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage')) | :return: A power object modeled as a named tuple | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L292-L303 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.thermostat_info | python | def thermostat_info(self):
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint')) | :return: A thermostatinfo object modeled as a named tuple | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L317-L336 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.thermostat_states | python | def thermostat_states(self):
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']] | :return: A list of thermostatstate object modeled as named tuples | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L339-L345 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.get_thermostat_state_by_name | python | def get_thermostat_state_by_name(self, name):
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None) | Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L347-L355 | [
"def _validate_thermostat_state_name(name):\n if name.lower() not in [value.lower() for value in STATES.values()\n if not value.lower() == 'unknown']:\n raise InvalidThermostatState(name)\n"
] | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.get_thermostat_state_by_id | python | def get_thermostat_state_by_id(self, id_):
return next((state for state in self.thermostat_states
if state.id == id_), None) | Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L357-L364 | null | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.thermostat_state | python | def thermostat_state(self):
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state | The state of the thermostat programming
:return: A thermostat state object of the current setting | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L377-L387 | [
"def _clear_cache(self):\n self._logger.debug('Clearing state cache.')\n state_cache.clear()\n",
"def _get_data(self, endpoint, params=None):\n url = '{base}{endpoint}'.format(base=self.base_url,\n endpoint=endpoint)\n try:\n response = self._session.get(url,\n params=params or self._parameters,\n timeout=REQUEST_TIMEOUT)\n except Timeout:\n self._logger.warning('Detected a timeout. '\n 'Re-authenticating and retrying request.')\n self._logout(reset=False)\n self._login()\n return self._get_data(endpoint, params)\n if response.status_code == 500:\n error_message = response.json().get('reason', '')\n if any([message in error_message\n for message in AUTHENTICATION_ERROR_STRINGS]):\n self._logger.warning('Detected an issue with authentication. '\n 'Trying to reauthenticate.')\n self._login()\n return self._get_data(endpoint, params)\n elif not response.ok:\n self._logger.debug(('\\n\\tStatus Code :{}'\n '\\n\\tText :{}').format(response.status_code,\n response.text))\n else:\n try:\n return response.json()\n except (ValueError, TypeError):\n self._logger.debug(('\\n\\tStatus Code :{}'\n '\\n\\tText :{}').format(response.status_code,\n response.text))\n return {}\n",
"def get_thermostat_state_by_id(self, id_):\n \"\"\"Retrieves a thermostat state object by its id\n\n :param id_: The id of the thermostat state\n :return: The thermostat state object\n \"\"\"\n return next((state for state in self.thermostat_states\n if state.id == id_), None)\n",
"def _validate_thermostat_state_name(name):\n if name.lower() not in [value.lower() for value in STATES.values()\n if not value.lower() == 'unknown']:\n raise InvalidThermostatState(name)\n"
] | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.thermostat_state | python | def thermostat_state(self, name):
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache() | Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to. | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L390-L403 | [
"def _clear_cache(self):\n self._logger.debug('Clearing state cache.')\n state_cache.clear()\n",
"def _get_data(self, endpoint, params=None):\n url = '{base}{endpoint}'.format(base=self.base_url,\n endpoint=endpoint)\n try:\n response = self._session.get(url,\n params=params or self._parameters,\n timeout=REQUEST_TIMEOUT)\n except Timeout:\n self._logger.warning('Detected a timeout. '\n 'Re-authenticating and retrying request.')\n self._logout(reset=False)\n self._login()\n return self._get_data(endpoint, params)\n if response.status_code == 500:\n error_message = response.json().get('reason', '')\n if any([message in error_message\n for message in AUTHENTICATION_ERROR_STRINGS]):\n self._logger.warning('Detected an issue with authentication. '\n 'Trying to reauthenticate.')\n self._login()\n return self._get_data(endpoint, params)\n elif not response.ok:\n self._logger.debug(('\\n\\tStatus Code :{}'\n '\\n\\tText :{}').format(response.status_code,\n response.text))\n else:\n try:\n return response.json()\n except (ValueError, TypeError):\n self._logger.debug(('\\n\\tStatus Code :{}'\n '\\n\\tText :{}').format(response.status_code,\n response.text))\n return {}\n",
"def get_thermostat_state_by_id(self, id_):\n \"\"\"Retrieves a thermostat state object by its id\n\n :param id_: The id of the thermostat state\n :return: The thermostat state object\n \"\"\"\n return next((state for state in self.thermostat_states\n if state.id == id_), None)\n",
"def _validate_thermostat_state_name(name):\n if name.lower() not in [value.lower() for value in STATES.values()\n if not value.lower() == 'unknown']:\n raise InvalidThermostatState(name)\n"
] | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
costastf/toonlib | toonlib/toonlib.py | Toon.thermostat | python | def thermostat(self, temperature):
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache() | A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to. | train | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L415-L425 | [
"def _clear_cache(self):\n self._logger.debug('Clearing state cache.')\n state_cache.clear()\n",
"def _get_data(self, endpoint, params=None):\n url = '{base}{endpoint}'.format(base=self.base_url,\n endpoint=endpoint)\n try:\n response = self._session.get(url,\n params=params or self._parameters,\n timeout=REQUEST_TIMEOUT)\n except Timeout:\n self._logger.warning('Detected a timeout. '\n 'Re-authenticating and retrying request.')\n self._logout(reset=False)\n self._login()\n return self._get_data(endpoint, params)\n if response.status_code == 500:\n error_message = response.json().get('reason', '')\n if any([message in error_message\n for message in AUTHENTICATION_ERROR_STRINGS]):\n self._logger.warning('Detected an issue with authentication. '\n 'Trying to reauthenticate.')\n self._login()\n return self._get_data(endpoint, params)\n elif not response.ok:\n self._logger.debug(('\\n\\tStatus Code :{}'\n '\\n\\tText :{}').format(response.status_code,\n response.text))\n else:\n try:\n return response.json()\n except (ValueError, TypeError):\n self._logger.debug(('\\n\\tStatus Code :{}'\n '\\n\\tText :{}').format(response.status_code,\n response.text))\n return {}\n"
] | class Toon(object):
"""Model of the toon smart meter from eneco."""
def __init__(self, username, password, state_retrieval_retry=1):
logger_name = u'{base}.{suffix}'.format(base=LOGGER_BASENAME,
suffix=self.__class__.__name__)
self._logger = logging.getLogger(logger_name)
self._session = Session()
self.username = username
self.password = password
self.base_url = 'https://toonopafstand.eneco.nl/toonMobileBackendWeb'
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._state_retries = state_retrieval_retry
self._uuid = None
self.data = Data(self)
self._login()
def _reset(self):
self.agreements = None
self.agreement = None
self.client = None
self._state_ = DEFAULT_STATE
self._uuid = None
def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data)
def _populate_info(self, data):
agreements = data.pop('agreements')
self.agreements = [Agreement(agreement.get('agreementId'),
agreement.get('agreementIdChecksum'),
agreement.get('city'),
agreement.get('displayCommonName'),
agreement.get('displayHardwareVersion'),
agreement.get('displaySoftwareVersion'),
agreement.get('heatingType'),
agreement.get('houseNumber'),
agreement.get('isBoilerManagement'),
agreement.get('isToonSolar'),
agreement.get('isToonly'),
agreement.get('postalCode'),
agreement.get('street'))
for agreement in agreements]
self.agreement = self.agreements[0]
details = PersonalDetails(data.get('enecoClientNumber'),
data.get('enecoClientEmailAddress'),
data.get('enecoClientFirstName'),
data.get('enecoClientLastName'),
data.get('enecoClientMiddleName'),
data.get('enecoClientMobileNumber'),
data.get('enecoClientPhoneNumber'))
self.client = Client(data.get('clientId'),
data.get('clientIdChecksum'),
data.get('passwordHash'),
data.get('sample'),
details)
@property
def _parameters(self):
return {'clientId': self.client.id,
'clientIdChecksum': self.client.checksum,
'random': self._uuid or uuid.uuid4()}
def _login(self):
self._authenticate()
self._get_session()
def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False
def _get_session(self):
data = copy.copy(self._parameters)
data.update({'agreementId': self.agreement.id,
'agreementIdChecksum': self.agreement.checksum})
url = '{base}/client/auth/start'.format(base=self.base_url)
response = self._session.get(url, params=data)
if not response.ok:
self._logout()
message = ('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text)
raise UnableToGetSession(message)
else:
uuid_kpi = response.json().get('displayUuidKpi', None)
if uuid_kpi:
self._uuid = uuid_kpi.get('uuid', None)
return True
def _clear_cache(self):
self._logger.debug('Clearing state cache.')
state_cache.clear()
@property
@cached(state_cache)
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_
def _get_data(self, endpoint, params=None):
url = '{base}{endpoint}'.format(base=self.base_url,
endpoint=endpoint)
try:
response = self._session.get(url,
params=params or self._parameters,
timeout=REQUEST_TIMEOUT)
except Timeout:
self._logger.warning('Detected a timeout. '
'Re-authenticating and retrying request.')
self._logout(reset=False)
self._login()
return self._get_data(endpoint, params)
if response.status_code == 500:
error_message = response.json().get('reason', '')
if any([message in error_message
for message in AUTHENTICATION_ERROR_STRINGS]):
self._logger.warning('Detected an issue with authentication. '
'Trying to reauthenticate.')
self._login()
return self._get_data(endpoint, params)
elif not response.ok:
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
else:
try:
return response.json()
except (ValueError, TypeError):
self._logger.debug(('\n\tStatus Code :{}'
'\n\tText :{}').format(response.status_code,
response.text))
return {}
@property
def smokedetectors(self):
""":return: A list of smokedetector objects modeled as named tuples"""
return [SmokeDetector(smokedetector.get('devUuid'),
smokedetector.get('name'),
smokedetector.get('lastConnectedChange'),
smokedetector.get('connected'),
smokedetector.get('batteryLevel'),
smokedetector.get('type'))
for smokedetector in self._state.get('smokeDetectors',
{}).get('device', [])]
def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None)
@property
def lights(self):
""":return: A list of light objects"""
return [Light(self, light.get('name'))
for light in self._state.get('deviceStatusInfo',
{}).get('device', [])
if light.get('rgbColor')]
def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None)
@property
def smartplugs(self):
""":return: A list of smartplug objects."""
return [SmartPlug(self, plug.get('name'))
for plug in self._state.get('deviceStatusInfo',
{}).get('device', [])
if plug.get('networkHealthState')]
def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None)
@property
def gas(self):
""":return: A gas object modeled as a named tuple"""
usage = self._state['gasUsage']
return Usage(usage.get('avgDayValue'),
usage.get('avgValue'),
usage.get('dayCost'),
usage.get('dayUsage'),
usage.get('isSmart'),
usage.get('meterReading'),
usage.get('value'))
@property
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage'))
@property
def solar(self):
power = self._state['powerUsage']
return Solar(power.get('maxSolar'),
power.get('valueProduced'),
power.get('valueSolar'),
power.get('avgProduValue'),
power.get('meterReadingLowProdu'),
power.get('meterReadingProdu'),
power.get('dayCostProduced'))
@property
def thermostat_info(self):
""":return: A thermostatinfo object modeled as a named tuple"""
info = self._state['thermostatInfo']
return ThermostatInfo(info.get('activeState'),
info.get('boilerModuleConnected'),
info.get('burnerInfo'),
info.get('currentDisplayTemp'),
info.get('currentModulationLevel'),
info.get('currentSetpoint'),
info.get('currentTemp'),
info.get('errorFound'),
info.get('haveOTBoiler'),
info.get('nextProgram'),
info.get('nextSetpoint'),
info.get('nextState'),
info.get('nextTime'),
info.get('otCommError'),
info.get('programState'),
info.get('randomConfigId'),
info.get('realSetpoint'))
@property
def thermostat_states(self):
""":return: A list of thermostatstate object modeled as named tuples"""
return [ThermostatState(STATES[state.get('id')],
state.get('id'),
state.get('tempValue'),
state.get('dhw'))
for state in self._state['thermostatStates']['state']]
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None)
@property
def burner_on(self):
return True if int(self.thermostat_info.burner_info) else False
@staticmethod
def _validate_thermostat_state_name(name):
if name.lower() not in [value.lower() for value in STATES.values()
if not value.lower() == 'unknown']:
raise InvalidThermostatState(name)
@property
def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state
@thermostat_state.setter
def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache()
@property
def thermostat(self):
"""The current setting of the thermostat as temperature
:return: A float of the current setting of the temperature of the
thermostat
"""
return float(self.thermostat_info.current_set_point / 100)
@thermostat.setter
@property
def temperature(self):
"""The current actual temperature as perceived by toon.
:return: A float of the current temperature
"""
return float(self.thermostat_info.current_temperature / 100)
|
ioos/pyoos | pyoos/parsers/hads.py | HadsParser._parse_data | python | def _parse_data(self, raw_data, var_filter, time_extents):
retval = defaultdict(list)
p = parser()
begin_time, end_time = time_extents
for line in raw_data.splitlines():
if len(line) == 0:
continue
fields = line.split("|")[0:-1]
if var_filter is None or fields[2] in var_filter:
dt = p.parse(fields[3]).replace(tzinfo=pytz.utc)
if (begin_time is None or dt >= begin_time) and (
end_time is None or dt <= end_time
):
try:
value = (
float(fields[4]) if fields[4] != "NaN" else npNan
)
except ValueError:
value = npNan
retval[fields[0]].append((fields[2], dt, value))
return dict(retval) | Transforms raw HADS observations into a dict:
station code -> [(variable, time, value), ...]
Takes into account the var filter (if set). | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/hads.py#L127-L158 | null | class HadsParser(object):
def __init__(self):
pass
def parse(self, metadata, raw_data, var_filter, time_extents):
self.parsed_metadata = self._parse_metadata(metadata)
self.parsed_data = self._parse_data(raw_data, var_filter, time_extents)
self.feature = self._build_station_collection(
self.parsed_metadata, self.parsed_data
)
return self.feature
def _build_station_collection(self, parsed_metadata, parsed_data):
stations = []
for station_code, station_metadata in parsed_metadata.items():
s = Station()
s.uid = station_code
s.name = station_metadata["nwsli"]
s.location = sPoint(
station_metadata["longitude"], station_metadata["latitude"], 0
) # hads always vertically zero
s.set_property(
"location_description", station_metadata["location_text"]
)
s.set_property("state", station_metadata["state"])
s.set_property("country", "USA") # @TODO
s.set_property("vertical_units", "ft")
s.set_property("horizontal_crs", "EPSG:4326")
s.set_property("vertical_crs", None)
s.set_property("hsa", station_metadata["hsa"])
s.set_property("init_transmit", station_metadata["init_transmit"])
s.set_property("manufacturer", station_metadata["manufacturer"])
s.set_property("owner", station_metadata["owner"])
s.set_property("channel", station_metadata["channel"])
stations.append(s)
# data
# possibility no data for this station, or vars filtered all out
if station_code not in parsed_data:
continue
# need to group into distinct time/z value pairs
# create a keyfunc (creates string of <z>-<timestamp>)
zandtime = (
lambda x: str(x[3]) + "-" + str(time.mktime(x[1].timetuple()))
)
# annotate data with z values, sort, group by keyfunc (z/time)
grouped_data = groupby(
sorted(
(
(
x[0],
x[1],
x[2],
parsed_metadata[station_code]["variables"][x[0]][
"base_elevation"
],
)
for x in parsed_data[station_code]
),
key=zandtime,
),
zandtime,
)
for _, group in grouped_data:
# group is an iterator, turn it into a list (it will have at least one item)
groupvals = list(group)
p = Point()
p.time = groupvals[0][1]
p.location = sPoint(
station_metadata["longitude"],
station_metadata["latitude"],
groupvals[0][3],
)
for val in groupvals:
std_var = self.get_variable_info(val[0])
if std_var is None:
print(
"Unknown PE Code, ignoring: {} (station: {}).".format(
val[0], station_code
)
)
continue
p.add_member(
Member(
value=val[2],
standard=std_var[0],
unit=std_var[1],
name=std_var[2],
description=std_var[3],
)
)
s.add_element(p)
return StationCollection(elements=stations)
def _parse_metadata(self, metadata):
"""
Transforms raw HADS metadata into a dictionary (station code -> props)
"""
retval = {}
# these are the first keys, afterwards follows a var-len list of variables/props
# first key always blank so skip it
field_keys = [
"nesdis_id",
"nwsli",
"location_text",
"latitude",
"longitude",
"hsa",
"state",
"owner",
"manufacturer",
"channel",
"init_transmit", # HHMM
"trans_interval",
] # min
# repeat in blocks of 7 after field_keys
var_keys = [
"pe_code",
"data_interval", # min
"coefficient",
"constant",
"time_offset", # min
"base_elevation", # ft
"gauge_correction",
] # ft
lines = metadata.splitlines()
for line in lines:
if len(line) == 0:
continue
raw_fields = line.split("|")
fields = dict(zip(field_keys, raw_fields[1 : len(field_keys)]))
# how many blocks of var_keys after initial fields
var_offset = len(field_keys) + 1
var_blocks = (len(raw_fields) - var_offset) // len(
var_keys
) # how many variables
vars_only = raw_fields[var_offset:]
variables = {}
for offset in range(var_blocks):
var_dict = dict(
zip(
var_keys,
vars_only[
offset
* len(var_keys) : (offset + 1)
* len(var_keys)
],
)
)
variables[var_dict["pe_code"]] = var_dict
var_dict["base_elevation"] = float(var_dict["base_elevation"])
var_dict["gauge_correction"] = float(
var_dict["gauge_correction"]
)
del var_dict["pe_code"] # no need to duplicate
line_val = {"variables": variables}
line_val.update(fields)
# conversions
def dms_to_dd(dms):
parts = dms.split(" ")
sec = int(parts[1]) * 60 + int(parts[2])
return float(parts[0]) + (
sec / 3600.0
) # negative already in first portion
line_val["latitude"] = dms_to_dd(line_val["latitude"])
line_val["longitude"] = dms_to_dd(line_val["longitude"])
retval[line_val["nesdis_id"]] = line_val
return retval
@classmethod
def get_variable_info(cls, hads_var_name):
"""
Returns a tuple of (mmi name, units, english name, english description) or None.
"""
if hads_var_name == "UR":
return (
"wind_gust_from_direction",
"degrees from N",
"Wind Gust from Direction",
"Direction from which wind gust is blowing when maximum wind speed is observed. Meteorological Convention. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name in ["VJA", "TX"]:
return (
"air_temperature_maximum",
"f",
"Air Temperature Maximum",
"",
)
elif hads_var_name in ["VJB", "TN"]:
return (
"air_temperature_minimum",
"f",
"Air Temperature Minumum",
"",
)
elif hads_var_name == "PC": # PC2?
return (
"precipitation_accumulated",
"in",
"Precipitation Accumulated",
"Amount of liquid equivalent precipitation accumulated or totaled for a defined period of time, usually hourly, daily, or annually.",
)
elif hads_var_name == "PP":
return (
"precipitation_rate",
"in",
"Precipitation Rate",
"Amount of wet equivalent precipitation per unit time.",
)
elif hads_var_name == "US":
return (
"wind_speed",
"mph",
"Wind Speed",
"Magnitude of wind velocity. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name == "UD":
return (
"wind_from_direction",
"degrees_true",
"Wind from Direction",
"Direction from which wind is blowing. Meteorological Convention. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name in ["UP", "UG", "VUP"]:
return (
"wind_gust",
"mph",
"Wind Gust Speed",
"Maximum instantaneous wind speed (usually no more than but not limited to 10 seconds) within a sample averaging interval. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name in ["TA", "TA2"]:
return (
"air_temperature",
"f",
"Air Temperature",
"Air temperature is the bulk temperature of the air, not the surface (skin) temperature.",
)
elif hads_var_name == "MT":
return ("fuel_temperature", "f", "Fuel Temperature", "")
elif hads_var_name == "XR":
return ("relative_humidity", "percent", "Relative Humidity", "")
elif hads_var_name == "VB":
return ("battery_voltage", "voltage", "Battery Voltage", "")
elif hads_var_name == "MM":
return ("fuel_moisture", "percent", "Fuel Moisture", "")
elif hads_var_name == "RW":
return ("solar_radiation", "watt/m^2", "Solar Radiation", "")
elif hads_var_name == "RS":
return (
"photosynthetically_active_radiation",
"watt/m^2",
"Photosynthetically Active Radiation",
"",
)
elif hads_var_name == "TW": # TW2?
return (
"sea_water_temperature",
"f",
"Sea Water Temperature",
"Sea water temperature is the in situ temperature of the sea water.",
)
elif hads_var_name == "WT":
return (
"turbidity",
"nephelometric turbidity units",
"Turbidity",
"",
)
elif hads_var_name == "WC":
return (
"sea_water_electrical_conductivity",
"micro mhos/cm",
"Sea Water Electrical Conductivity",
"",
)
elif hads_var_name == "WP":
return (
"sea_water_ph_reported_on_total_scale",
"std units",
"Sea Water PH reported on Total Scale",
"the measure of acidity of seawater",
)
elif hads_var_name == "WO":
return ("dissolved_oxygen", "ppm", "Dissolved Oxygen", "")
elif hads_var_name == "WX":
return (
"dissolved_oxygen_saturation",
"percent",
"Dissolved Oxygen Saturation",
"",
)
elif hads_var_name == "TD":
return (
"dew_point_temperature",
"f",
"Dew Point Temperature",
"the temperature at which a parcel of air reaches saturation upon being cooled at constant pressure and specific humidity.",
)
elif hads_var_name == "HG": # HG2?
return ("stream_gage_height", "ft", "Stream Gage Height", "")
elif hads_var_name == "HP":
return (
"water_surface_height_above_reference_datum",
"ft",
"Water Surface Height Above Reference Datum",
"means the height of the upper surface of a body of liquid water, such as sea, lake or river, above an arbitrary reference datum.",
)
elif hads_var_name == "WS":
return ("salinity", "ppt", "Salinity", "")
elif hads_var_name == "HM":
return ("water_level", "ft", "Water Level", "")
elif hads_var_name == "PA":
return ("air_pressure", "hp", "Air Pressure", "")
elif hads_var_name == "SD":
return ("snow_depth", "in", "Snow Depth", "")
elif hads_var_name == "SW":
return ("snow_water_equivalent", "m", "Snow Water Equivalent", "")
elif hads_var_name == "TS":
return (
"soil_temperature",
"f",
"Soil Temperature",
"Soil temperature is the bulk temperature of the soil, not the surface (skin) temperature.",
)
return None
|
ioos/pyoos | pyoos/parsers/hads.py | HadsParser._parse_metadata | python | def _parse_metadata(self, metadata):
retval = {}
# these are the first keys, afterwards follows a var-len list of variables/props
# first key always blank so skip it
field_keys = [
"nesdis_id",
"nwsli",
"location_text",
"latitude",
"longitude",
"hsa",
"state",
"owner",
"manufacturer",
"channel",
"init_transmit", # HHMM
"trans_interval",
] # min
# repeat in blocks of 7 after field_keys
var_keys = [
"pe_code",
"data_interval", # min
"coefficient",
"constant",
"time_offset", # min
"base_elevation", # ft
"gauge_correction",
] # ft
lines = metadata.splitlines()
for line in lines:
if len(line) == 0:
continue
raw_fields = line.split("|")
fields = dict(zip(field_keys, raw_fields[1 : len(field_keys)]))
# how many blocks of var_keys after initial fields
var_offset = len(field_keys) + 1
var_blocks = (len(raw_fields) - var_offset) // len(
var_keys
) # how many variables
vars_only = raw_fields[var_offset:]
variables = {}
for offset in range(var_blocks):
var_dict = dict(
zip(
var_keys,
vars_only[
offset
* len(var_keys) : (offset + 1)
* len(var_keys)
],
)
)
variables[var_dict["pe_code"]] = var_dict
var_dict["base_elevation"] = float(var_dict["base_elevation"])
var_dict["gauge_correction"] = float(
var_dict["gauge_correction"]
)
del var_dict["pe_code"] # no need to duplicate
line_val = {"variables": variables}
line_val.update(fields)
# conversions
def dms_to_dd(dms):
parts = dms.split(" ")
sec = int(parts[1]) * 60 + int(parts[2])
return float(parts[0]) + (
sec / 3600.0
) # negative already in first portion
line_val["latitude"] = dms_to_dd(line_val["latitude"])
line_val["longitude"] = dms_to_dd(line_val["longitude"])
retval[line_val["nesdis_id"]] = line_val
return retval | Transforms raw HADS metadata into a dictionary (station code -> props) | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/hads.py#L160-L246 | [
"def dms_to_dd(dms):\n parts = dms.split(\" \")\n sec = int(parts[1]) * 60 + int(parts[2])\n return float(parts[0]) + (\n sec / 3600.0\n ) # negative already in first portion\n"
] | class HadsParser(object):
def __init__(self):
pass
def parse(self, metadata, raw_data, var_filter, time_extents):
self.parsed_metadata = self._parse_metadata(metadata)
self.parsed_data = self._parse_data(raw_data, var_filter, time_extents)
self.feature = self._build_station_collection(
self.parsed_metadata, self.parsed_data
)
return self.feature
def _build_station_collection(self, parsed_metadata, parsed_data):
stations = []
for station_code, station_metadata in parsed_metadata.items():
s = Station()
s.uid = station_code
s.name = station_metadata["nwsli"]
s.location = sPoint(
station_metadata["longitude"], station_metadata["latitude"], 0
) # hads always vertically zero
s.set_property(
"location_description", station_metadata["location_text"]
)
s.set_property("state", station_metadata["state"])
s.set_property("country", "USA") # @TODO
s.set_property("vertical_units", "ft")
s.set_property("horizontal_crs", "EPSG:4326")
s.set_property("vertical_crs", None)
s.set_property("hsa", station_metadata["hsa"])
s.set_property("init_transmit", station_metadata["init_transmit"])
s.set_property("manufacturer", station_metadata["manufacturer"])
s.set_property("owner", station_metadata["owner"])
s.set_property("channel", station_metadata["channel"])
stations.append(s)
# data
# possibility no data for this station, or vars filtered all out
if station_code not in parsed_data:
continue
# need to group into distinct time/z value pairs
# create a keyfunc (creates string of <z>-<timestamp>)
zandtime = (
lambda x: str(x[3]) + "-" + str(time.mktime(x[1].timetuple()))
)
# annotate data with z values, sort, group by keyfunc (z/time)
grouped_data = groupby(
sorted(
(
(
x[0],
x[1],
x[2],
parsed_metadata[station_code]["variables"][x[0]][
"base_elevation"
],
)
for x in parsed_data[station_code]
),
key=zandtime,
),
zandtime,
)
for _, group in grouped_data:
# group is an iterator, turn it into a list (it will have at least one item)
groupvals = list(group)
p = Point()
p.time = groupvals[0][1]
p.location = sPoint(
station_metadata["longitude"],
station_metadata["latitude"],
groupvals[0][3],
)
for val in groupvals:
std_var = self.get_variable_info(val[0])
if std_var is None:
print(
"Unknown PE Code, ignoring: {} (station: {}).".format(
val[0], station_code
)
)
continue
p.add_member(
Member(
value=val[2],
standard=std_var[0],
unit=std_var[1],
name=std_var[2],
description=std_var[3],
)
)
s.add_element(p)
return StationCollection(elements=stations)
def _parse_data(self, raw_data, var_filter, time_extents):
"""
Transforms raw HADS observations into a dict:
station code -> [(variable, time, value), ...]
Takes into account the var filter (if set).
"""
retval = defaultdict(list)
p = parser()
begin_time, end_time = time_extents
for line in raw_data.splitlines():
if len(line) == 0:
continue
fields = line.split("|")[0:-1]
if var_filter is None or fields[2] in var_filter:
dt = p.parse(fields[3]).replace(tzinfo=pytz.utc)
if (begin_time is None or dt >= begin_time) and (
end_time is None or dt <= end_time
):
try:
value = (
float(fields[4]) if fields[4] != "NaN" else npNan
)
except ValueError:
value = npNan
retval[fields[0]].append((fields[2], dt, value))
return dict(retval)
@classmethod
def get_variable_info(cls, hads_var_name):
"""
Returns a tuple of (mmi name, units, english name, english description) or None.
"""
if hads_var_name == "UR":
return (
"wind_gust_from_direction",
"degrees from N",
"Wind Gust from Direction",
"Direction from which wind gust is blowing when maximum wind speed is observed. Meteorological Convention. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name in ["VJA", "TX"]:
return (
"air_temperature_maximum",
"f",
"Air Temperature Maximum",
"",
)
elif hads_var_name in ["VJB", "TN"]:
return (
"air_temperature_minimum",
"f",
"Air Temperature Minumum",
"",
)
elif hads_var_name == "PC": # PC2?
return (
"precipitation_accumulated",
"in",
"Precipitation Accumulated",
"Amount of liquid equivalent precipitation accumulated or totaled for a defined period of time, usually hourly, daily, or annually.",
)
elif hads_var_name == "PP":
return (
"precipitation_rate",
"in",
"Precipitation Rate",
"Amount of wet equivalent precipitation per unit time.",
)
elif hads_var_name == "US":
return (
"wind_speed",
"mph",
"Wind Speed",
"Magnitude of wind velocity. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name == "UD":
return (
"wind_from_direction",
"degrees_true",
"Wind from Direction",
"Direction from which wind is blowing. Meteorological Convention. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name in ["UP", "UG", "VUP"]:
return (
"wind_gust",
"mph",
"Wind Gust Speed",
"Maximum instantaneous wind speed (usually no more than but not limited to 10 seconds) within a sample averaging interval. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name in ["TA", "TA2"]:
return (
"air_temperature",
"f",
"Air Temperature",
"Air temperature is the bulk temperature of the air, not the surface (skin) temperature.",
)
elif hads_var_name == "MT":
return ("fuel_temperature", "f", "Fuel Temperature", "")
elif hads_var_name == "XR":
return ("relative_humidity", "percent", "Relative Humidity", "")
elif hads_var_name == "VB":
return ("battery_voltage", "voltage", "Battery Voltage", "")
elif hads_var_name == "MM":
return ("fuel_moisture", "percent", "Fuel Moisture", "")
elif hads_var_name == "RW":
return ("solar_radiation", "watt/m^2", "Solar Radiation", "")
elif hads_var_name == "RS":
return (
"photosynthetically_active_radiation",
"watt/m^2",
"Photosynthetically Active Radiation",
"",
)
elif hads_var_name == "TW": # TW2?
return (
"sea_water_temperature",
"f",
"Sea Water Temperature",
"Sea water temperature is the in situ temperature of the sea water.",
)
elif hads_var_name == "WT":
return (
"turbidity",
"nephelometric turbidity units",
"Turbidity",
"",
)
elif hads_var_name == "WC":
return (
"sea_water_electrical_conductivity",
"micro mhos/cm",
"Sea Water Electrical Conductivity",
"",
)
elif hads_var_name == "WP":
return (
"sea_water_ph_reported_on_total_scale",
"std units",
"Sea Water PH reported on Total Scale",
"the measure of acidity of seawater",
)
elif hads_var_name == "WO":
return ("dissolved_oxygen", "ppm", "Dissolved Oxygen", "")
elif hads_var_name == "WX":
return (
"dissolved_oxygen_saturation",
"percent",
"Dissolved Oxygen Saturation",
"",
)
elif hads_var_name == "TD":
return (
"dew_point_temperature",
"f",
"Dew Point Temperature",
"the temperature at which a parcel of air reaches saturation upon being cooled at constant pressure and specific humidity.",
)
elif hads_var_name == "HG": # HG2?
return ("stream_gage_height", "ft", "Stream Gage Height", "")
elif hads_var_name == "HP":
return (
"water_surface_height_above_reference_datum",
"ft",
"Water Surface Height Above Reference Datum",
"means the height of the upper surface of a body of liquid water, such as sea, lake or river, above an arbitrary reference datum.",
)
elif hads_var_name == "WS":
return ("salinity", "ppt", "Salinity", "")
elif hads_var_name == "HM":
return ("water_level", "ft", "Water Level", "")
elif hads_var_name == "PA":
return ("air_pressure", "hp", "Air Pressure", "")
elif hads_var_name == "SD":
return ("snow_depth", "in", "Snow Depth", "")
elif hads_var_name == "SW":
return ("snow_water_equivalent", "m", "Snow Water Equivalent", "")
elif hads_var_name == "TS":
return (
"soil_temperature",
"f",
"Soil Temperature",
"Soil temperature is the bulk temperature of the soil, not the surface (skin) temperature.",
)
return None
|
ioos/pyoos | pyoos/parsers/hads.py | HadsParser.get_variable_info | python | def get_variable_info(cls, hads_var_name):
if hads_var_name == "UR":
return (
"wind_gust_from_direction",
"degrees from N",
"Wind Gust from Direction",
"Direction from which wind gust is blowing when maximum wind speed is observed. Meteorological Convention. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name in ["VJA", "TX"]:
return (
"air_temperature_maximum",
"f",
"Air Temperature Maximum",
"",
)
elif hads_var_name in ["VJB", "TN"]:
return (
"air_temperature_minimum",
"f",
"Air Temperature Minumum",
"",
)
elif hads_var_name == "PC": # PC2?
return (
"precipitation_accumulated",
"in",
"Precipitation Accumulated",
"Amount of liquid equivalent precipitation accumulated or totaled for a defined period of time, usually hourly, daily, or annually.",
)
elif hads_var_name == "PP":
return (
"precipitation_rate",
"in",
"Precipitation Rate",
"Amount of wet equivalent precipitation per unit time.",
)
elif hads_var_name == "US":
return (
"wind_speed",
"mph",
"Wind Speed",
"Magnitude of wind velocity. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name == "UD":
return (
"wind_from_direction",
"degrees_true",
"Wind from Direction",
"Direction from which wind is blowing. Meteorological Convention. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name in ["UP", "UG", "VUP"]:
return (
"wind_gust",
"mph",
"Wind Gust Speed",
"Maximum instantaneous wind speed (usually no more than but not limited to 10 seconds) within a sample averaging interval. Wind is motion of air relative to the surface of the earth.",
)
elif hads_var_name in ["TA", "TA2"]:
return (
"air_temperature",
"f",
"Air Temperature",
"Air temperature is the bulk temperature of the air, not the surface (skin) temperature.",
)
elif hads_var_name == "MT":
return ("fuel_temperature", "f", "Fuel Temperature", "")
elif hads_var_name == "XR":
return ("relative_humidity", "percent", "Relative Humidity", "")
elif hads_var_name == "VB":
return ("battery_voltage", "voltage", "Battery Voltage", "")
elif hads_var_name == "MM":
return ("fuel_moisture", "percent", "Fuel Moisture", "")
elif hads_var_name == "RW":
return ("solar_radiation", "watt/m^2", "Solar Radiation", "")
elif hads_var_name == "RS":
return (
"photosynthetically_active_radiation",
"watt/m^2",
"Photosynthetically Active Radiation",
"",
)
elif hads_var_name == "TW": # TW2?
return (
"sea_water_temperature",
"f",
"Sea Water Temperature",
"Sea water temperature is the in situ temperature of the sea water.",
)
elif hads_var_name == "WT":
return (
"turbidity",
"nephelometric turbidity units",
"Turbidity",
"",
)
elif hads_var_name == "WC":
return (
"sea_water_electrical_conductivity",
"micro mhos/cm",
"Sea Water Electrical Conductivity",
"",
)
elif hads_var_name == "WP":
return (
"sea_water_ph_reported_on_total_scale",
"std units",
"Sea Water PH reported on Total Scale",
"the measure of acidity of seawater",
)
elif hads_var_name == "WO":
return ("dissolved_oxygen", "ppm", "Dissolved Oxygen", "")
elif hads_var_name == "WX":
return (
"dissolved_oxygen_saturation",
"percent",
"Dissolved Oxygen Saturation",
"",
)
elif hads_var_name == "TD":
return (
"dew_point_temperature",
"f",
"Dew Point Temperature",
"the temperature at which a parcel of air reaches saturation upon being cooled at constant pressure and specific humidity.",
)
elif hads_var_name == "HG": # HG2?
return ("stream_gage_height", "ft", "Stream Gage Height", "")
elif hads_var_name == "HP":
return (
"water_surface_height_above_reference_datum",
"ft",
"Water Surface Height Above Reference Datum",
"means the height of the upper surface of a body of liquid water, such as sea, lake or river, above an arbitrary reference datum.",
)
elif hads_var_name == "WS":
return ("salinity", "ppt", "Salinity", "")
elif hads_var_name == "HM":
return ("water_level", "ft", "Water Level", "")
elif hads_var_name == "PA":
return ("air_pressure", "hp", "Air Pressure", "")
elif hads_var_name == "SD":
return ("snow_depth", "in", "Snow Depth", "")
elif hads_var_name == "SW":
return ("snow_water_equivalent", "m", "Snow Water Equivalent", "")
elif hads_var_name == "TS":
return (
"soil_temperature",
"f",
"Soil Temperature",
"Soil temperature is the bulk temperature of the soil, not the surface (skin) temperature.",
)
return None | Returns a tuple of (mmi name, units, english name, english description) or None. | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/hads.py#L249-L404 | null | class HadsParser(object):
def __init__(self):
pass
def parse(self, metadata, raw_data, var_filter, time_extents):
self.parsed_metadata = self._parse_metadata(metadata)
self.parsed_data = self._parse_data(raw_data, var_filter, time_extents)
self.feature = self._build_station_collection(
self.parsed_metadata, self.parsed_data
)
return self.feature
def _build_station_collection(self, parsed_metadata, parsed_data):
stations = []
for station_code, station_metadata in parsed_metadata.items():
s = Station()
s.uid = station_code
s.name = station_metadata["nwsli"]
s.location = sPoint(
station_metadata["longitude"], station_metadata["latitude"], 0
) # hads always vertically zero
s.set_property(
"location_description", station_metadata["location_text"]
)
s.set_property("state", station_metadata["state"])
s.set_property("country", "USA") # @TODO
s.set_property("vertical_units", "ft")
s.set_property("horizontal_crs", "EPSG:4326")
s.set_property("vertical_crs", None)
s.set_property("hsa", station_metadata["hsa"])
s.set_property("init_transmit", station_metadata["init_transmit"])
s.set_property("manufacturer", station_metadata["manufacturer"])
s.set_property("owner", station_metadata["owner"])
s.set_property("channel", station_metadata["channel"])
stations.append(s)
# data
# possibility no data for this station, or vars filtered all out
if station_code not in parsed_data:
continue
# need to group into distinct time/z value pairs
# create a keyfunc (creates string of <z>-<timestamp>)
zandtime = (
lambda x: str(x[3]) + "-" + str(time.mktime(x[1].timetuple()))
)
# annotate data with z values, sort, group by keyfunc (z/time)
grouped_data = groupby(
sorted(
(
(
x[0],
x[1],
x[2],
parsed_metadata[station_code]["variables"][x[0]][
"base_elevation"
],
)
for x in parsed_data[station_code]
),
key=zandtime,
),
zandtime,
)
for _, group in grouped_data:
# group is an iterator, turn it into a list (it will have at least one item)
groupvals = list(group)
p = Point()
p.time = groupvals[0][1]
p.location = sPoint(
station_metadata["longitude"],
station_metadata["latitude"],
groupvals[0][3],
)
for val in groupvals:
std_var = self.get_variable_info(val[0])
if std_var is None:
print(
"Unknown PE Code, ignoring: {} (station: {}).".format(
val[0], station_code
)
)
continue
p.add_member(
Member(
value=val[2],
standard=std_var[0],
unit=std_var[1],
name=std_var[2],
description=std_var[3],
)
)
s.add_element(p)
return StationCollection(elements=stations)
def _parse_data(self, raw_data, var_filter, time_extents):
"""
Transforms raw HADS observations into a dict:
station code -> [(variable, time, value), ...]
Takes into account the var filter (if set).
"""
retval = defaultdict(list)
p = parser()
begin_time, end_time = time_extents
for line in raw_data.splitlines():
if len(line) == 0:
continue
fields = line.split("|")[0:-1]
if var_filter is None or fields[2] in var_filter:
dt = p.parse(fields[3]).replace(tzinfo=pytz.utc)
if (begin_time is None or dt >= begin_time) and (
end_time is None or dt <= end_time
):
try:
value = (
float(fields[4]) if fields[4] != "NaN" else npNan
)
except ValueError:
value = npNan
retval[fields[0]].append((fields[2], dt, value))
return dict(retval)
def _parse_metadata(self, metadata):
"""
Transforms raw HADS metadata into a dictionary (station code -> props)
"""
retval = {}
# these are the first keys, afterwards follows a var-len list of variables/props
# first key always blank so skip it
field_keys = [
"nesdis_id",
"nwsli",
"location_text",
"latitude",
"longitude",
"hsa",
"state",
"owner",
"manufacturer",
"channel",
"init_transmit", # HHMM
"trans_interval",
] # min
# repeat in blocks of 7 after field_keys
var_keys = [
"pe_code",
"data_interval", # min
"coefficient",
"constant",
"time_offset", # min
"base_elevation", # ft
"gauge_correction",
] # ft
lines = metadata.splitlines()
for line in lines:
if len(line) == 0:
continue
raw_fields = line.split("|")
fields = dict(zip(field_keys, raw_fields[1 : len(field_keys)]))
# how many blocks of var_keys after initial fields
var_offset = len(field_keys) + 1
var_blocks = (len(raw_fields) - var_offset) // len(
var_keys
) # how many variables
vars_only = raw_fields[var_offset:]
variables = {}
for offset in range(var_blocks):
var_dict = dict(
zip(
var_keys,
vars_only[
offset
* len(var_keys) : (offset + 1)
* len(var_keys)
],
)
)
variables[var_dict["pe_code"]] = var_dict
var_dict["base_elevation"] = float(var_dict["base_elevation"])
var_dict["gauge_correction"] = float(
var_dict["gauge_correction"]
)
del var_dict["pe_code"] # no need to duplicate
line_val = {"variables": variables}
line_val.update(fields)
# conversions
def dms_to_dd(dms):
parts = dms.split(" ")
sec = int(parts[1]) * 60 + int(parts[2])
return float(parts[0]) + (
sec / 3600.0
) # negative already in first portion
line_val["latitude"] = dms_to_dd(line_val["latitude"])
line_val["longitude"] = dms_to_dd(line_val["longitude"])
retval[line_val["nesdis_id"]] = line_val
return retval
@classmethod
|
ioos/pyoos | pyoos/utils/asatime.py | AsaTime.parse | python | def parse(cls, date_string):
try:
date = dateparser.parse(date_string)
if date.tzinfo is None:
date = dateparser.parse(date_string, tzinfos=cls.tzd)
return date
except Exception:
raise ValueError("Could not parse date string!") | Parse any time string. Use a custom timezone matching if
the original matching does not pull one out. | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/utils/asatime.py#L54-L65 | null | class AsaTime(object):
tz_str = """-12 Y
-11 X NUT SST
-10 W CKT HAST HST TAHT TKT
-9 V AKST GAMT GIT HADT HNY
-8 U AKDT CIST HAY HNP PST PT
-7 T HAP HNR MST PDT
-6 S CST EAST GALT HAR HNC MDT
-5 R CDT COT EASST ECT EST ET HAC HNE PET
-4 Q AST BOT CLT COST EDT FKT GYT HAE HNA PYT
-3 P ADT ART BRT CLST FKST GFT HAA PMST PYST SRT UYT WGT
-2 O BRST FNT PMDT UYST WGST
-1 N AZOT CVT EGT
0 Z EGST GMT UTC WET WT
1 A CET DFT WAT WEDT WEST
2 B CAT CEDT CEST EET SAST WAST
3 C EAT EEDT EEST IDT MSK
4 D AMT AZT GET GST KUYT MSD MUT RET SAMT SCT
5 E AMST AQTT AZST HMT MAWT MVT PKT TFT TJT TMT UZT YEKT
6 F ALMT BIOT BTT IOT KGT NOVT OMST YEKST
7 G CXT DAVT HOVT ICT KRAT NOVST OMSST THA WIB
8 H ACT AWST BDT BNT CAST HKT IRKT KRAST MYT PHT SGT ULAT WITA WST
9 I AWDT IRKST JST KST PWT TLT WDT WIT YAKT
10 K AEST ChST PGT VLAT YAKST YAPT
11 L AEDT LHDT MAGT NCT PONT SBT VLAST VUT
12 M ANAST ANAT FJT GILT MAGST MHT NZST PETST PETT TVT WFT
13 FJST NZDT
11.5 NFT
10.5 ACDT LHST
9.5 ACST
6.5 CCT MMT
5.75 NPT
5.5 SLT
4.5 AFT IRDT
3.5 IRST
-2.5 HAT NDT
-3.5 HNT NST NT
-4.5 HLV VET
-9.5 MART MIT"""
tzd = {}
for tz_descr in map(str.split, tz_str.split("\n")):
tz_offset = int(float(tz_descr[0]) * 3600)
for tz_code in tz_descr[1:]:
tzd[tz_code] = tz_offset
@classmethod
|
ioos/pyoos | pyoos/collectors/ioos/swe_sos.py | IoosSweSos.metadata | python | def metadata(
self, output_format=None, feature_name_callback=None, **kwargs
):
callback = feature_name_callback or str
if output_format is None:
output_format = (
'text/xml; subtype="sensorML/1.0.1/profiles/ioos_sos/1.0"'
)
responses = []
if self.features is not None:
for feature in self.features:
ds_kwargs = kwargs.copy()
ds_kwargs.update(
{
"outputFormat": output_format,
"procedure": callback(feature),
}
)
responses.append(
SensorML(self.server.describe_sensor(**ds_kwargs))
)
return responses | Gets SensorML objects for all procedures in your filtered features.
You should override the default output_format for servers that do not
respond properly. | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/ioos/swe_sos.py#L18-L48 | null | class IoosSweSos(Collector):
def __init__(self, url, xml=None):
super(IoosSweSos, self).__init__()
self.server = Sos(url, xml=xml)
def metadata_plus_exceptions(
self, output_format=None, feature_name_callback=None, **kwargs
):
"""
Gets SensorML objects for all procedures in your filtered features.
Return two dictionaries for service responses keyed by 'feature':
responses: values are SOS DescribeSensor response text
response_failures: values are exception text content furnished from ServiceException, ExceptionReport
You should override the default output_format for servers that do not
respond properly.
"""
callback = feature_name_callback or str
if output_format is None:
output_format = (
'text/xml; subtype="sensorML/1.0.1/profiles/ioos_sos/1.0"'
)
responses = {}
response_failures = {}
if self.features is not None:
for feature in self.features:
ds_kwargs = kwargs.copy()
ds_kwargs.update(
{
"outputFormat": output_format,
"procedure": callback(feature),
}
)
try:
responses[feature] = SensorML(
self.server.describe_sensor(**ds_kwargs)
)
except (ServiceException, ExceptionReport) as e:
response_failures[feature] = str(e)
return (responses, response_failures)
def setup_params(self, **kwargs):
params = kwargs
if self.bbox is not None:
params["featureOfInterest"] = "BBOX:%s,%s,%s,%s" % (
self.bbox[0],
self.bbox[1],
self.bbox[2],
self.bbox[3],
)
if self.start_time is not None:
params["eventTime"] = self.start_time.strftime(
"%Y-%m-%dT%H:%M:%SZ"
)
if self.end_time is not None:
params["eventTime"] += "/%s" % self.end_time.strftime(
"%Y-%m-%dT%H:%M:%SZ"
)
if self.variables is None or len(self.variables) < 1:
raise ValueError(
"You must set a filter for at least one variable (observedProperty)"
)
else:
ops = ",".join(self.variables)
if isinstance(ops, string_types):
ops = [ops]
params["observedProperties"] = ops
return params
def collect(self, **kwargs):
# there is an unfortunate difference in how 52N and ncSOS handle the response format.
# 52N expects subtype, ncSOS expects schema.
# consult the observed properties and getcaps to figure out which should be used if none passed
if "responseFormat" not in kwargs:
# iterate offerings and see if we need to change to subtype
off_dict = {off.name: off for off in self.server.offerings}
response_format = None
for offering in kwargs.get("offerings", []):
if offering not in off_dict:
continue
ioos_formats = [
rf
for rf in off_dict[offering].response_formats
if "ioos_sos/1.0" in rf
]
if not len(ioos_formats):
raise Exception(
"No ioos_sos/1.0 response format found for offering {}".format(
offering
)
)
if response_format != ioos_formats[0]:
response_format = ioos_formats[0]
kwargs["responseFormat"] = response_format
return IoosGetObservation(self.raw(**kwargs)).observations
def raw(self, **kwargs):
params = self.setup_params(**kwargs)
return self.server.get_observation(**params)
|
ioos/pyoos | pyoos/collectors/ioos/swe_sos.py | IoosSweSos.metadata_plus_exceptions | python | def metadata_plus_exceptions(
self, output_format=None, feature_name_callback=None, **kwargs
):
callback = feature_name_callback or str
if output_format is None:
output_format = (
'text/xml; subtype="sensorML/1.0.1/profiles/ioos_sos/1.0"'
)
responses = {}
response_failures = {}
if self.features is not None:
for feature in self.features:
ds_kwargs = kwargs.copy()
ds_kwargs.update(
{
"outputFormat": output_format,
"procedure": callback(feature),
}
)
try:
responses[feature] = SensorML(
self.server.describe_sensor(**ds_kwargs)
)
except (ServiceException, ExceptionReport) as e:
response_failures[feature] = str(e)
return (responses, response_failures) | Gets SensorML objects for all procedures in your filtered features.
Return two dictionaries for service responses keyed by 'feature':
responses: values are SOS DescribeSensor response text
response_failures: values are exception text content furnished from ServiceException, ExceptionReport
You should override the default output_format for servers that do not
respond properly. | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/ioos/swe_sos.py#L50-L87 | null | class IoosSweSos(Collector):
def __init__(self, url, xml=None):
super(IoosSweSos, self).__init__()
self.server = Sos(url, xml=xml)
def metadata(
self, output_format=None, feature_name_callback=None, **kwargs
):
"""
Gets SensorML objects for all procedures in your filtered features.
You should override the default output_format for servers that do not
respond properly.
"""
callback = feature_name_callback or str
if output_format is None:
output_format = (
'text/xml; subtype="sensorML/1.0.1/profiles/ioos_sos/1.0"'
)
responses = []
if self.features is not None:
for feature in self.features:
ds_kwargs = kwargs.copy()
ds_kwargs.update(
{
"outputFormat": output_format,
"procedure": callback(feature),
}
)
responses.append(
SensorML(self.server.describe_sensor(**ds_kwargs))
)
return responses
def setup_params(self, **kwargs):
params = kwargs
if self.bbox is not None:
params["featureOfInterest"] = "BBOX:%s,%s,%s,%s" % (
self.bbox[0],
self.bbox[1],
self.bbox[2],
self.bbox[3],
)
if self.start_time is not None:
params["eventTime"] = self.start_time.strftime(
"%Y-%m-%dT%H:%M:%SZ"
)
if self.end_time is not None:
params["eventTime"] += "/%s" % self.end_time.strftime(
"%Y-%m-%dT%H:%M:%SZ"
)
if self.variables is None or len(self.variables) < 1:
raise ValueError(
"You must set a filter for at least one variable (observedProperty)"
)
else:
ops = ",".join(self.variables)
if isinstance(ops, string_types):
ops = [ops]
params["observedProperties"] = ops
return params
def collect(self, **kwargs):
# there is an unfortunate difference in how 52N and ncSOS handle the response format.
# 52N expects subtype, ncSOS expects schema.
# consult the observed properties and getcaps to figure out which should be used if none passed
if "responseFormat" not in kwargs:
# iterate offerings and see if we need to change to subtype
off_dict = {off.name: off for off in self.server.offerings}
response_format = None
for offering in kwargs.get("offerings", []):
if offering not in off_dict:
continue
ioos_formats = [
rf
for rf in off_dict[offering].response_formats
if "ioos_sos/1.0" in rf
]
if not len(ioos_formats):
raise Exception(
"No ioos_sos/1.0 response format found for offering {}".format(
offering
)
)
if response_format != ioos_formats[0]:
response_format = ioos_formats[0]
kwargs["responseFormat"] = response_format
return IoosGetObservation(self.raw(**kwargs)).observations
def raw(self, **kwargs):
params = self.setup_params(**kwargs)
return self.server.get_observation(**params)
|
ioos/pyoos | pyoos/collectors/hads/hads.py | Hads.list_variables | python | def list_variables(self):
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
variables = self._list_variables(station_codes)
if hasattr(self, "_variables") and self.variables is not None:
variables.intersection_update(set(self.variables))
return list(variables) | List available variables and applies any filters. | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L50-L61 | [
"def _list_variables(self, station_codes):\n \"\"\"\n Internal helper to list the variables for the given station codes.\n \"\"\"\n # sample output from obs retrieval:\n #\n # DD9452D0\n # HP(SRBM5)\n # 2013-07-22 19:30 45.97\n # HT(SRBM5)\n # 2013-07-22 19:30 44.29\n # PC(SRBM5)\n # 2013-07-22 19:30 36.19\n #\n rvar = re.compile(r\"\\n\\s([A-Z]{2}[A-Z0-9]{0,1})\\(\\w+\\)\")\n\n variables = set()\n resp = requests.post(\n self.obs_retrieval_url,\n data={\n \"state\": \"nil\",\n \"hsa\": \"nil\",\n \"of\": \"3\",\n \"extraids\": \" \".join(station_codes),\n \"sinceday\": -1,\n },\n )\n resp.raise_for_status()\n\n list(map(variables.add, rvar.findall(resp.text)))\n return variables\n",
"def _apply_features_filter(self, station_codes):\n \"\"\"\n If the features filter is set, this will return the intersection of\n those filter items and the given station codes.\n \"\"\"\n # apply features filter\n if hasattr(self, \"features\") and self.features is not None:\n station_codes = set(station_codes)\n station_codes = list(\n station_codes.intersection(set(self.features))\n )\n\n return station_codes\n",
"def _get_station_codes(self, force=False):\n \"\"\"\n Gets and caches a list of station codes optionally within a bbox.\n\n Will return the cached version if it exists unless force is True.\n \"\"\"\n if not force and self.station_codes is not None:\n return self.station_codes\n\n state_urls = self._get_state_urls()\n\n # filter by bounding box against a shapefile\n state_matches = None\n\n if self.bbox:\n with collection(\n os.path.join(\n \"resources\",\n \"ne_50m_admin_1_states_provinces_lakes_shp.shp\",\n ),\n \"r\",\n ) as c:\n geom_matches = [\n x[\"properties\"] for x in c.filter(bbox=self.bbox)\n ]\n state_matches = [\n x[\"postal\"] if x[\"admin\"] != \"Canada\" else \"CN\"\n for x in geom_matches\n ]\n\n self.station_codes = []\n\n for state_url in state_urls:\n if state_matches is not None:\n state_abbr = state_url.split(\"/\")[-1].split(\".\")[0]\n if state_abbr not in state_matches:\n continue\n\n self.station_codes.extend(self._get_stations_for_state(state_url))\n\n if self.bbox:\n # retrieve metadata for all stations to properly filter them\n metadata = self._get_metadata(self.station_codes)\n parsed_metadata = self.parser._parse_metadata(metadata)\n\n def in_bbox(code):\n lat = parsed_metadata[code][\"latitude\"]\n lon = parsed_metadata[code][\"longitude\"]\n\n return (\n lon >= self.bbox[0]\n and lon <= self.bbox[2]\n and lat >= self.bbox[1]\n and lat <= self.bbox[3]\n )\n\n self.station_codes = list(filter(in_bbox, self.station_codes))\n\n return self.station_codes\n"
] | class Hads(Collector):
def __init__(self, **kwargs):
super(Hads, self).__init__()
self.states_url = kwargs.get(
"states_url", "https://hads.ncep.noaa.gov/hads/goog_earth/"
)
self.metadata_url = kwargs.get(
"metadata_url",
"https://hads.ncep.noaa.gov/nexhads2/servlet/DCPInfo",
)
self.obs_retrieval_url = kwargs.get(
"obs_retrieval_url",
"https://hads.ncep.noaa.gov/nexhads2/servlet/DecodedData",
)
self.station_codes = None
self.parser = HadsParser()
def clear(self):
super(Hads, self).clear()
self.station_codes = None
@Collector.bbox.setter
def bbox(self, bbox):
Collector.bbox.fset(self, bbox)
self.station_codes = None
@Collector.features.setter
def features(self, features):
Collector.features.fset(self, features)
self.station_codes = None
def _list_variables(self, station_codes):
"""
Internal helper to list the variables for the given station codes.
"""
# sample output from obs retrieval:
#
# DD9452D0
# HP(SRBM5)
# 2013-07-22 19:30 45.97
# HT(SRBM5)
# 2013-07-22 19:30 44.29
# PC(SRBM5)
# 2013-07-22 19:30 36.19
#
rvar = re.compile(r"\n\s([A-Z]{2}[A-Z0-9]{0,1})\(\w+\)")
variables = set()
resp = requests.post(
self.obs_retrieval_url,
data={
"state": "nil",
"hsa": "nil",
"of": "3",
"extraids": " ".join(station_codes),
"sinceday": -1,
},
)
resp.raise_for_status()
list(map(variables.add, rvar.findall(resp.text)))
return variables
def list_features(self):
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
return station_codes
def collect(self, **kwargs):
var_filter = None
if hasattr(self, "_variables"):
var_filter = self._variables
time_extents = (
self.start_time if hasattr(self, "start_time") else None,
self.end_time if hasattr(self, "end_time") else None,
)
metadata, raw_data = self.raw(**kwargs)
return self.parser.parse(metadata, raw_data, var_filter, time_extents)
def raw(self, format=None, **kwargs):
"""
Returns a tuple of (metadata, raw data)
"""
station_codes = self._apply_features_filter(self._get_station_codes())
metadata = self._get_metadata(station_codes, **kwargs)
raw_data = self._get_raw_data(station_codes, **kwargs)
return (metadata, raw_data)
def _apply_features_filter(self, station_codes):
"""
If the features filter is set, this will return the intersection of
those filter items and the given station codes.
"""
# apply features filter
if hasattr(self, "features") and self.features is not None:
station_codes = set(station_codes)
station_codes = list(
station_codes.intersection(set(self.features))
)
return station_codes
def _get_metadata(self, station_codes, **kwargs):
if "verify" in kwargs:
verify_cert = kwargs["verify"]
else:
verify_cert = True # the default for requests
resp = requests.post(
self.metadata_url,
data={
"state": "nil",
"hsa": "nil",
"of": "1",
"extraids": " ".join(station_codes),
"data": "Get Meta Data",
},
verify=verify_cert,
)
resp.raise_for_status()
return resp.text
def _get_station_codes(self, force=False):
"""
Gets and caches a list of station codes optionally within a bbox.
Will return the cached version if it exists unless force is True.
"""
if not force and self.station_codes is not None:
return self.station_codes
state_urls = self._get_state_urls()
# filter by bounding box against a shapefile
state_matches = None
if self.bbox:
with collection(
os.path.join(
"resources",
"ne_50m_admin_1_states_provinces_lakes_shp.shp",
),
"r",
) as c:
geom_matches = [
x["properties"] for x in c.filter(bbox=self.bbox)
]
state_matches = [
x["postal"] if x["admin"] != "Canada" else "CN"
for x in geom_matches
]
self.station_codes = []
for state_url in state_urls:
if state_matches is not None:
state_abbr = state_url.split("/")[-1].split(".")[0]
if state_abbr not in state_matches:
continue
self.station_codes.extend(self._get_stations_for_state(state_url))
if self.bbox:
# retrieve metadata for all stations to properly filter them
metadata = self._get_metadata(self.station_codes)
parsed_metadata = self.parser._parse_metadata(metadata)
def in_bbox(code):
lat = parsed_metadata[code]["latitude"]
lon = parsed_metadata[code]["longitude"]
return (
lon >= self.bbox[0]
and lon <= self.bbox[2]
and lat >= self.bbox[1]
and lat <= self.bbox[3]
)
self.station_codes = list(filter(in_bbox, self.station_codes))
return self.station_codes
def _get_state_urls(self):
root = BeautifulSoup(requests.get(self.states_url).text)
areas = root.find_all("area")
return list({x.attrs.get("href", None) for x in areas})
def _get_stations_for_state(self, state_url):
state_root = BeautifulSoup(requests.get(state_url).text)
return [
x
for x in [
x.attrs["href"].split("nesdis_id=")[-1]
for x in state_root.find_all("a")
]
if len(x) > 0
]
def _get_raw_data(self, station_codes, **kwargs):
if "verify" in kwargs:
verify_cert = kwargs["verify"]
else:
verify_cert = True # the default for requests
since = 7
if hasattr(self, "start_time") and self.start_time is not None:
# calc delta between now and start_time
timediff = (
datetime.utcnow().replace(tzinfo=pytz.utc) - self.start_time
)
if timediff.days == 0:
if timediff.seconds / 60 / 60 > 0:
since = -(timediff.seconds / 60 / 60)
elif timediff.seconds / 60 > 0:
since = -1 # 1 hour minimum resolution
else:
since = min(7, timediff.days) # max of 7 days
resp = requests.post(
self.obs_retrieval_url,
data={
"state": "nil",
"hsa": "nil",
"of": "1",
"extraids": " ".join(station_codes),
"sinceday": since,
},
verify=verify_cert,
)
resp.raise_for_status()
return resp.text
|
ioos/pyoos | pyoos/collectors/hads/hads.py | Hads._list_variables | python | def _list_variables(self, station_codes):
# sample output from obs retrieval:
#
# DD9452D0
# HP(SRBM5)
# 2013-07-22 19:30 45.97
# HT(SRBM5)
# 2013-07-22 19:30 44.29
# PC(SRBM5)
# 2013-07-22 19:30 36.19
#
rvar = re.compile(r"\n\s([A-Z]{2}[A-Z0-9]{0,1})\(\w+\)")
variables = set()
resp = requests.post(
self.obs_retrieval_url,
data={
"state": "nil",
"hsa": "nil",
"of": "3",
"extraids": " ".join(station_codes),
"sinceday": -1,
},
)
resp.raise_for_status()
list(map(variables.add, rvar.findall(resp.text)))
return variables | Internal helper to list the variables for the given station codes. | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L63-L93 | null | class Hads(Collector):
def __init__(self, **kwargs):
super(Hads, self).__init__()
self.states_url = kwargs.get(
"states_url", "https://hads.ncep.noaa.gov/hads/goog_earth/"
)
self.metadata_url = kwargs.get(
"metadata_url",
"https://hads.ncep.noaa.gov/nexhads2/servlet/DCPInfo",
)
self.obs_retrieval_url = kwargs.get(
"obs_retrieval_url",
"https://hads.ncep.noaa.gov/nexhads2/servlet/DecodedData",
)
self.station_codes = None
self.parser = HadsParser()
def clear(self):
super(Hads, self).clear()
self.station_codes = None
@Collector.bbox.setter
def bbox(self, bbox):
Collector.bbox.fset(self, bbox)
self.station_codes = None
@Collector.features.setter
def features(self, features):
Collector.features.fset(self, features)
self.station_codes = None
def list_variables(self):
"""
List available variables and applies any filters.
"""
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
variables = self._list_variables(station_codes)
if hasattr(self, "_variables") and self.variables is not None:
variables.intersection_update(set(self.variables))
return list(variables)
def list_features(self):
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
return station_codes
def collect(self, **kwargs):
var_filter = None
if hasattr(self, "_variables"):
var_filter = self._variables
time_extents = (
self.start_time if hasattr(self, "start_time") else None,
self.end_time if hasattr(self, "end_time") else None,
)
metadata, raw_data = self.raw(**kwargs)
return self.parser.parse(metadata, raw_data, var_filter, time_extents)
def raw(self, format=None, **kwargs):
"""
Returns a tuple of (metadata, raw data)
"""
station_codes = self._apply_features_filter(self._get_station_codes())
metadata = self._get_metadata(station_codes, **kwargs)
raw_data = self._get_raw_data(station_codes, **kwargs)
return (metadata, raw_data)
def _apply_features_filter(self, station_codes):
"""
If the features filter is set, this will return the intersection of
those filter items and the given station codes.
"""
# apply features filter
if hasattr(self, "features") and self.features is not None:
station_codes = set(station_codes)
station_codes = list(
station_codes.intersection(set(self.features))
)
return station_codes
def _get_metadata(self, station_codes, **kwargs):
if "verify" in kwargs:
verify_cert = kwargs["verify"]
else:
verify_cert = True # the default for requests
resp = requests.post(
self.metadata_url,
data={
"state": "nil",
"hsa": "nil",
"of": "1",
"extraids": " ".join(station_codes),
"data": "Get Meta Data",
},
verify=verify_cert,
)
resp.raise_for_status()
return resp.text
def _get_station_codes(self, force=False):
"""
Gets and caches a list of station codes optionally within a bbox.
Will return the cached version if it exists unless force is True.
"""
if not force and self.station_codes is not None:
return self.station_codes
state_urls = self._get_state_urls()
# filter by bounding box against a shapefile
state_matches = None
if self.bbox:
with collection(
os.path.join(
"resources",
"ne_50m_admin_1_states_provinces_lakes_shp.shp",
),
"r",
) as c:
geom_matches = [
x["properties"] for x in c.filter(bbox=self.bbox)
]
state_matches = [
x["postal"] if x["admin"] != "Canada" else "CN"
for x in geom_matches
]
self.station_codes = []
for state_url in state_urls:
if state_matches is not None:
state_abbr = state_url.split("/")[-1].split(".")[0]
if state_abbr not in state_matches:
continue
self.station_codes.extend(self._get_stations_for_state(state_url))
if self.bbox:
# retrieve metadata for all stations to properly filter them
metadata = self._get_metadata(self.station_codes)
parsed_metadata = self.parser._parse_metadata(metadata)
def in_bbox(code):
lat = parsed_metadata[code]["latitude"]
lon = parsed_metadata[code]["longitude"]
return (
lon >= self.bbox[0]
and lon <= self.bbox[2]
and lat >= self.bbox[1]
and lat <= self.bbox[3]
)
self.station_codes = list(filter(in_bbox, self.station_codes))
return self.station_codes
def _get_state_urls(self):
root = BeautifulSoup(requests.get(self.states_url).text)
areas = root.find_all("area")
return list({x.attrs.get("href", None) for x in areas})
def _get_stations_for_state(self, state_url):
state_root = BeautifulSoup(requests.get(state_url).text)
return [
x
for x in [
x.attrs["href"].split("nesdis_id=")[-1]
for x in state_root.find_all("a")
]
if len(x) > 0
]
def _get_raw_data(self, station_codes, **kwargs):
if "verify" in kwargs:
verify_cert = kwargs["verify"]
else:
verify_cert = True # the default for requests
since = 7
if hasattr(self, "start_time") and self.start_time is not None:
# calc delta between now and start_time
timediff = (
datetime.utcnow().replace(tzinfo=pytz.utc) - self.start_time
)
if timediff.days == 0:
if timediff.seconds / 60 / 60 > 0:
since = -(timediff.seconds / 60 / 60)
elif timediff.seconds / 60 > 0:
since = -1 # 1 hour minimum resolution
else:
since = min(7, timediff.days) # max of 7 days
resp = requests.post(
self.obs_retrieval_url,
data={
"state": "nil",
"hsa": "nil",
"of": "1",
"extraids": " ".join(station_codes),
"sinceday": since,
},
verify=verify_cert,
)
resp.raise_for_status()
return resp.text
|
ioos/pyoos | pyoos/collectors/hads/hads.py | Hads.raw | python | def raw(self, format=None, **kwargs):
station_codes = self._apply_features_filter(self._get_station_codes())
metadata = self._get_metadata(station_codes, **kwargs)
raw_data = self._get_raw_data(station_codes, **kwargs)
return (metadata, raw_data) | Returns a tuple of (metadata, raw data) | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L114-L122 | [
"def _apply_features_filter(self, station_codes):\n \"\"\"\n If the features filter is set, this will return the intersection of\n those filter items and the given station codes.\n \"\"\"\n # apply features filter\n if hasattr(self, \"features\") and self.features is not None:\n station_codes = set(station_codes)\n station_codes = list(\n station_codes.intersection(set(self.features))\n )\n\n return station_codes\n",
"def _get_metadata(self, station_codes, **kwargs):\n if \"verify\" in kwargs:\n verify_cert = kwargs[\"verify\"]\n else:\n verify_cert = True # the default for requests\n\n resp = requests.post(\n self.metadata_url,\n data={\n \"state\": \"nil\",\n \"hsa\": \"nil\",\n \"of\": \"1\",\n \"extraids\": \" \".join(station_codes),\n \"data\": \"Get Meta Data\",\n },\n verify=verify_cert,\n )\n resp.raise_for_status()\n return resp.text\n",
"def _get_station_codes(self, force=False):\n \"\"\"\n Gets and caches a list of station codes optionally within a bbox.\n\n Will return the cached version if it exists unless force is True.\n \"\"\"\n if not force and self.station_codes is not None:\n return self.station_codes\n\n state_urls = self._get_state_urls()\n\n # filter by bounding box against a shapefile\n state_matches = None\n\n if self.bbox:\n with collection(\n os.path.join(\n \"resources\",\n \"ne_50m_admin_1_states_provinces_lakes_shp.shp\",\n ),\n \"r\",\n ) as c:\n geom_matches = [\n x[\"properties\"] for x in c.filter(bbox=self.bbox)\n ]\n state_matches = [\n x[\"postal\"] if x[\"admin\"] != \"Canada\" else \"CN\"\n for x in geom_matches\n ]\n\n self.station_codes = []\n\n for state_url in state_urls:\n if state_matches is not None:\n state_abbr = state_url.split(\"/\")[-1].split(\".\")[0]\n if state_abbr not in state_matches:\n continue\n\n self.station_codes.extend(self._get_stations_for_state(state_url))\n\n if self.bbox:\n # retrieve metadata for all stations to properly filter them\n metadata = self._get_metadata(self.station_codes)\n parsed_metadata = self.parser._parse_metadata(metadata)\n\n def in_bbox(code):\n lat = parsed_metadata[code][\"latitude\"]\n lon = parsed_metadata[code][\"longitude\"]\n\n return (\n lon >= self.bbox[0]\n and lon <= self.bbox[2]\n and lat >= self.bbox[1]\n and lat <= self.bbox[3]\n )\n\n self.station_codes = list(filter(in_bbox, self.station_codes))\n\n return self.station_codes\n",
"def _get_raw_data(self, station_codes, **kwargs):\n if \"verify\" in kwargs:\n verify_cert = kwargs[\"verify\"]\n else:\n verify_cert = True # the default for requests\n\n since = 7\n if hasattr(self, \"start_time\") and self.start_time is not None:\n # calc delta between now and start_time\n timediff = (\n datetime.utcnow().replace(tzinfo=pytz.utc) - self.start_time\n )\n\n if timediff.days == 0:\n if timediff.seconds / 60 / 60 > 0:\n since = -(timediff.seconds / 60 / 60)\n elif timediff.seconds / 60 > 0:\n since = -1 # 1 hour minimum resolution\n else:\n since = min(7, timediff.days) # max of 7 days\n\n resp = requests.post(\n self.obs_retrieval_url,\n data={\n \"state\": \"nil\",\n \"hsa\": \"nil\",\n \"of\": \"1\",\n \"extraids\": \" \".join(station_codes),\n \"sinceday\": since,\n },\n verify=verify_cert,\n )\n resp.raise_for_status()\n\n return resp.text\n"
] | class Hads(Collector):
def __init__(self, **kwargs):
super(Hads, self).__init__()
self.states_url = kwargs.get(
"states_url", "https://hads.ncep.noaa.gov/hads/goog_earth/"
)
self.metadata_url = kwargs.get(
"metadata_url",
"https://hads.ncep.noaa.gov/nexhads2/servlet/DCPInfo",
)
self.obs_retrieval_url = kwargs.get(
"obs_retrieval_url",
"https://hads.ncep.noaa.gov/nexhads2/servlet/DecodedData",
)
self.station_codes = None
self.parser = HadsParser()
def clear(self):
super(Hads, self).clear()
self.station_codes = None
@Collector.bbox.setter
def bbox(self, bbox):
Collector.bbox.fset(self, bbox)
self.station_codes = None
@Collector.features.setter
def features(self, features):
Collector.features.fset(self, features)
self.station_codes = None
def list_variables(self):
"""
List available variables and applies any filters.
"""
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
variables = self._list_variables(station_codes)
if hasattr(self, "_variables") and self.variables is not None:
variables.intersection_update(set(self.variables))
return list(variables)
def _list_variables(self, station_codes):
"""
Internal helper to list the variables for the given station codes.
"""
# sample output from obs retrieval:
#
# DD9452D0
# HP(SRBM5)
# 2013-07-22 19:30 45.97
# HT(SRBM5)
# 2013-07-22 19:30 44.29
# PC(SRBM5)
# 2013-07-22 19:30 36.19
#
rvar = re.compile(r"\n\s([A-Z]{2}[A-Z0-9]{0,1})\(\w+\)")
variables = set()
resp = requests.post(
self.obs_retrieval_url,
data={
"state": "nil",
"hsa": "nil",
"of": "3",
"extraids": " ".join(station_codes),
"sinceday": -1,
},
)
resp.raise_for_status()
list(map(variables.add, rvar.findall(resp.text)))
return variables
def list_features(self):
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
return station_codes
def collect(self, **kwargs):
var_filter = None
if hasattr(self, "_variables"):
var_filter = self._variables
time_extents = (
self.start_time if hasattr(self, "start_time") else None,
self.end_time if hasattr(self, "end_time") else None,
)
metadata, raw_data = self.raw(**kwargs)
return self.parser.parse(metadata, raw_data, var_filter, time_extents)
def _apply_features_filter(self, station_codes):
"""
If the features filter is set, this will return the intersection of
those filter items and the given station codes.
"""
# apply features filter
if hasattr(self, "features") and self.features is not None:
station_codes = set(station_codes)
station_codes = list(
station_codes.intersection(set(self.features))
)
return station_codes
def _get_metadata(self, station_codes, **kwargs):
if "verify" in kwargs:
verify_cert = kwargs["verify"]
else:
verify_cert = True # the default for requests
resp = requests.post(
self.metadata_url,
data={
"state": "nil",
"hsa": "nil",
"of": "1",
"extraids": " ".join(station_codes),
"data": "Get Meta Data",
},
verify=verify_cert,
)
resp.raise_for_status()
return resp.text
def _get_station_codes(self, force=False):
"""
Gets and caches a list of station codes optionally within a bbox.
Will return the cached version if it exists unless force is True.
"""
if not force and self.station_codes is not None:
return self.station_codes
state_urls = self._get_state_urls()
# filter by bounding box against a shapefile
state_matches = None
if self.bbox:
with collection(
os.path.join(
"resources",
"ne_50m_admin_1_states_provinces_lakes_shp.shp",
),
"r",
) as c:
geom_matches = [
x["properties"] for x in c.filter(bbox=self.bbox)
]
state_matches = [
x["postal"] if x["admin"] != "Canada" else "CN"
for x in geom_matches
]
self.station_codes = []
for state_url in state_urls:
if state_matches is not None:
state_abbr = state_url.split("/")[-1].split(".")[0]
if state_abbr not in state_matches:
continue
self.station_codes.extend(self._get_stations_for_state(state_url))
if self.bbox:
# retrieve metadata for all stations to properly filter them
metadata = self._get_metadata(self.station_codes)
parsed_metadata = self.parser._parse_metadata(metadata)
def in_bbox(code):
lat = parsed_metadata[code]["latitude"]
lon = parsed_metadata[code]["longitude"]
return (
lon >= self.bbox[0]
and lon <= self.bbox[2]
and lat >= self.bbox[1]
and lat <= self.bbox[3]
)
self.station_codes = list(filter(in_bbox, self.station_codes))
return self.station_codes
def _get_state_urls(self):
root = BeautifulSoup(requests.get(self.states_url).text)
areas = root.find_all("area")
return list({x.attrs.get("href", None) for x in areas})
def _get_stations_for_state(self, state_url):
state_root = BeautifulSoup(requests.get(state_url).text)
return [
x
for x in [
x.attrs["href"].split("nesdis_id=")[-1]
for x in state_root.find_all("a")
]
if len(x) > 0
]
def _get_raw_data(self, station_codes, **kwargs):
if "verify" in kwargs:
verify_cert = kwargs["verify"]
else:
verify_cert = True # the default for requests
since = 7
if hasattr(self, "start_time") and self.start_time is not None:
# calc delta between now and start_time
timediff = (
datetime.utcnow().replace(tzinfo=pytz.utc) - self.start_time
)
if timediff.days == 0:
if timediff.seconds / 60 / 60 > 0:
since = -(timediff.seconds / 60 / 60)
elif timediff.seconds / 60 > 0:
since = -1 # 1 hour minimum resolution
else:
since = min(7, timediff.days) # max of 7 days
resp = requests.post(
self.obs_retrieval_url,
data={
"state": "nil",
"hsa": "nil",
"of": "1",
"extraids": " ".join(station_codes),
"sinceday": since,
},
verify=verify_cert,
)
resp.raise_for_status()
return resp.text
|
ioos/pyoos | pyoos/collectors/hads/hads.py | Hads._apply_features_filter | python | def _apply_features_filter(self, station_codes):
# apply features filter
if hasattr(self, "features") and self.features is not None:
station_codes = set(station_codes)
station_codes = list(
station_codes.intersection(set(self.features))
)
return station_codes | If the features filter is set, this will return the intersection of
those filter items and the given station codes. | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L124-L136 | null | class Hads(Collector):
def __init__(self, **kwargs):
super(Hads, self).__init__()
self.states_url = kwargs.get(
"states_url", "https://hads.ncep.noaa.gov/hads/goog_earth/"
)
self.metadata_url = kwargs.get(
"metadata_url",
"https://hads.ncep.noaa.gov/nexhads2/servlet/DCPInfo",
)
self.obs_retrieval_url = kwargs.get(
"obs_retrieval_url",
"https://hads.ncep.noaa.gov/nexhads2/servlet/DecodedData",
)
self.station_codes = None
self.parser = HadsParser()
def clear(self):
super(Hads, self).clear()
self.station_codes = None
@Collector.bbox.setter
def bbox(self, bbox):
Collector.bbox.fset(self, bbox)
self.station_codes = None
@Collector.features.setter
def features(self, features):
Collector.features.fset(self, features)
self.station_codes = None
def list_variables(self):
"""
List available variables and applies any filters.
"""
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
variables = self._list_variables(station_codes)
if hasattr(self, "_variables") and self.variables is not None:
variables.intersection_update(set(self.variables))
return list(variables)
def _list_variables(self, station_codes):
"""
Internal helper to list the variables for the given station codes.
"""
# sample output from obs retrieval:
#
# DD9452D0
# HP(SRBM5)
# 2013-07-22 19:30 45.97
# HT(SRBM5)
# 2013-07-22 19:30 44.29
# PC(SRBM5)
# 2013-07-22 19:30 36.19
#
rvar = re.compile(r"\n\s([A-Z]{2}[A-Z0-9]{0,1})\(\w+\)")
variables = set()
resp = requests.post(
self.obs_retrieval_url,
data={
"state": "nil",
"hsa": "nil",
"of": "3",
"extraids": " ".join(station_codes),
"sinceday": -1,
},
)
resp.raise_for_status()
list(map(variables.add, rvar.findall(resp.text)))
return variables
def list_features(self):
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
return station_codes
def collect(self, **kwargs):
var_filter = None
if hasattr(self, "_variables"):
var_filter = self._variables
time_extents = (
self.start_time if hasattr(self, "start_time") else None,
self.end_time if hasattr(self, "end_time") else None,
)
metadata, raw_data = self.raw(**kwargs)
return self.parser.parse(metadata, raw_data, var_filter, time_extents)
def raw(self, format=None, **kwargs):
"""
Returns a tuple of (metadata, raw data)
"""
station_codes = self._apply_features_filter(self._get_station_codes())
metadata = self._get_metadata(station_codes, **kwargs)
raw_data = self._get_raw_data(station_codes, **kwargs)
return (metadata, raw_data)
def _get_metadata(self, station_codes, **kwargs):
if "verify" in kwargs:
verify_cert = kwargs["verify"]
else:
verify_cert = True # the default for requests
resp = requests.post(
self.metadata_url,
data={
"state": "nil",
"hsa": "nil",
"of": "1",
"extraids": " ".join(station_codes),
"data": "Get Meta Data",
},
verify=verify_cert,
)
resp.raise_for_status()
return resp.text
def _get_station_codes(self, force=False):
"""
Gets and caches a list of station codes optionally within a bbox.
Will return the cached version if it exists unless force is True.
"""
if not force and self.station_codes is not None:
return self.station_codes
state_urls = self._get_state_urls()
# filter by bounding box against a shapefile
state_matches = None
if self.bbox:
with collection(
os.path.join(
"resources",
"ne_50m_admin_1_states_provinces_lakes_shp.shp",
),
"r",
) as c:
geom_matches = [
x["properties"] for x in c.filter(bbox=self.bbox)
]
state_matches = [
x["postal"] if x["admin"] != "Canada" else "CN"
for x in geom_matches
]
self.station_codes = []
for state_url in state_urls:
if state_matches is not None:
state_abbr = state_url.split("/")[-1].split(".")[0]
if state_abbr not in state_matches:
continue
self.station_codes.extend(self._get_stations_for_state(state_url))
if self.bbox:
# retrieve metadata for all stations to properly filter them
metadata = self._get_metadata(self.station_codes)
parsed_metadata = self.parser._parse_metadata(metadata)
def in_bbox(code):
lat = parsed_metadata[code]["latitude"]
lon = parsed_metadata[code]["longitude"]
return (
lon >= self.bbox[0]
and lon <= self.bbox[2]
and lat >= self.bbox[1]
and lat <= self.bbox[3]
)
self.station_codes = list(filter(in_bbox, self.station_codes))
return self.station_codes
def _get_state_urls(self):
root = BeautifulSoup(requests.get(self.states_url).text)
areas = root.find_all("area")
return list({x.attrs.get("href", None) for x in areas})
def _get_stations_for_state(self, state_url):
state_root = BeautifulSoup(requests.get(state_url).text)
return [
x
for x in [
x.attrs["href"].split("nesdis_id=")[-1]
for x in state_root.find_all("a")
]
if len(x) > 0
]
def _get_raw_data(self, station_codes, **kwargs):
if "verify" in kwargs:
verify_cert = kwargs["verify"]
else:
verify_cert = True # the default for requests
since = 7
if hasattr(self, "start_time") and self.start_time is not None:
# calc delta between now and start_time
timediff = (
datetime.utcnow().replace(tzinfo=pytz.utc) - self.start_time
)
if timediff.days == 0:
if timediff.seconds / 60 / 60 > 0:
since = -(timediff.seconds / 60 / 60)
elif timediff.seconds / 60 > 0:
since = -1 # 1 hour minimum resolution
else:
since = min(7, timediff.days) # max of 7 days
resp = requests.post(
self.obs_retrieval_url,
data={
"state": "nil",
"hsa": "nil",
"of": "1",
"extraids": " ".join(station_codes),
"sinceday": since,
},
verify=verify_cert,
)
resp.raise_for_status()
return resp.text
|
ioos/pyoos | pyoos/collectors/hads/hads.py | Hads._get_station_codes | python | def _get_station_codes(self, force=False):
if not force and self.station_codes is not None:
return self.station_codes
state_urls = self._get_state_urls()
# filter by bounding box against a shapefile
state_matches = None
if self.bbox:
with collection(
os.path.join(
"resources",
"ne_50m_admin_1_states_provinces_lakes_shp.shp",
),
"r",
) as c:
geom_matches = [
x["properties"] for x in c.filter(bbox=self.bbox)
]
state_matches = [
x["postal"] if x["admin"] != "Canada" else "CN"
for x in geom_matches
]
self.station_codes = []
for state_url in state_urls:
if state_matches is not None:
state_abbr = state_url.split("/")[-1].split(".")[0]
if state_abbr not in state_matches:
continue
self.station_codes.extend(self._get_stations_for_state(state_url))
if self.bbox:
# retrieve metadata for all stations to properly filter them
metadata = self._get_metadata(self.station_codes)
parsed_metadata = self.parser._parse_metadata(metadata)
def in_bbox(code):
lat = parsed_metadata[code]["latitude"]
lon = parsed_metadata[code]["longitude"]
return (
lon >= self.bbox[0]
and lon <= self.bbox[2]
and lat >= self.bbox[1]
and lat <= self.bbox[3]
)
self.station_codes = list(filter(in_bbox, self.station_codes))
return self.station_codes | Gets and caches a list of station codes optionally within a bbox.
Will return the cached version if it exists unless force is True. | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/collectors/hads/hads.py#L158-L216 | [
"def _get_metadata(self, station_codes, **kwargs):\n if \"verify\" in kwargs:\n verify_cert = kwargs[\"verify\"]\n else:\n verify_cert = True # the default for requests\n\n resp = requests.post(\n self.metadata_url,\n data={\n \"state\": \"nil\",\n \"hsa\": \"nil\",\n \"of\": \"1\",\n \"extraids\": \" \".join(station_codes),\n \"data\": \"Get Meta Data\",\n },\n verify=verify_cert,\n )\n resp.raise_for_status()\n return resp.text\n",
"def _get_state_urls(self):\n root = BeautifulSoup(requests.get(self.states_url).text)\n areas = root.find_all(\"area\")\n return list({x.attrs.get(\"href\", None) for x in areas})\n",
"def _get_stations_for_state(self, state_url):\n state_root = BeautifulSoup(requests.get(state_url).text)\n return [\n x\n for x in [\n x.attrs[\"href\"].split(\"nesdis_id=\")[-1]\n for x in state_root.find_all(\"a\")\n ]\n if len(x) > 0\n ]\n"
] | class Hads(Collector):
def __init__(self, **kwargs):
super(Hads, self).__init__()
self.states_url = kwargs.get(
"states_url", "https://hads.ncep.noaa.gov/hads/goog_earth/"
)
self.metadata_url = kwargs.get(
"metadata_url",
"https://hads.ncep.noaa.gov/nexhads2/servlet/DCPInfo",
)
self.obs_retrieval_url = kwargs.get(
"obs_retrieval_url",
"https://hads.ncep.noaa.gov/nexhads2/servlet/DecodedData",
)
self.station_codes = None
self.parser = HadsParser()
def clear(self):
super(Hads, self).clear()
self.station_codes = None
@Collector.bbox.setter
def bbox(self, bbox):
Collector.bbox.fset(self, bbox)
self.station_codes = None
@Collector.features.setter
def features(self, features):
Collector.features.fset(self, features)
self.station_codes = None
def list_variables(self):
"""
List available variables and applies any filters.
"""
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
variables = self._list_variables(station_codes)
if hasattr(self, "_variables") and self.variables is not None:
variables.intersection_update(set(self.variables))
return list(variables)
def _list_variables(self, station_codes):
"""
Internal helper to list the variables for the given station codes.
"""
# sample output from obs retrieval:
#
# DD9452D0
# HP(SRBM5)
# 2013-07-22 19:30 45.97
# HT(SRBM5)
# 2013-07-22 19:30 44.29
# PC(SRBM5)
# 2013-07-22 19:30 36.19
#
rvar = re.compile(r"\n\s([A-Z]{2}[A-Z0-9]{0,1})\(\w+\)")
variables = set()
resp = requests.post(
self.obs_retrieval_url,
data={
"state": "nil",
"hsa": "nil",
"of": "3",
"extraids": " ".join(station_codes),
"sinceday": -1,
},
)
resp.raise_for_status()
list(map(variables.add, rvar.findall(resp.text)))
return variables
def list_features(self):
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
return station_codes
def collect(self, **kwargs):
var_filter = None
if hasattr(self, "_variables"):
var_filter = self._variables
time_extents = (
self.start_time if hasattr(self, "start_time") else None,
self.end_time if hasattr(self, "end_time") else None,
)
metadata, raw_data = self.raw(**kwargs)
return self.parser.parse(metadata, raw_data, var_filter, time_extents)
def raw(self, format=None, **kwargs):
"""
Returns a tuple of (metadata, raw data)
"""
station_codes = self._apply_features_filter(self._get_station_codes())
metadata = self._get_metadata(station_codes, **kwargs)
raw_data = self._get_raw_data(station_codes, **kwargs)
return (metadata, raw_data)
def _apply_features_filter(self, station_codes):
"""
If the features filter is set, this will return the intersection of
those filter items and the given station codes.
"""
# apply features filter
if hasattr(self, "features") and self.features is not None:
station_codes = set(station_codes)
station_codes = list(
station_codes.intersection(set(self.features))
)
return station_codes
def _get_metadata(self, station_codes, **kwargs):
if "verify" in kwargs:
verify_cert = kwargs["verify"]
else:
verify_cert = True # the default for requests
resp = requests.post(
self.metadata_url,
data={
"state": "nil",
"hsa": "nil",
"of": "1",
"extraids": " ".join(station_codes),
"data": "Get Meta Data",
},
verify=verify_cert,
)
resp.raise_for_status()
return resp.text
def _get_state_urls(self):
root = BeautifulSoup(requests.get(self.states_url).text)
areas = root.find_all("area")
return list({x.attrs.get("href", None) for x in areas})
def _get_stations_for_state(self, state_url):
state_root = BeautifulSoup(requests.get(state_url).text)
return [
x
for x in [
x.attrs["href"].split("nesdis_id=")[-1]
for x in state_root.find_all("a")
]
if len(x) > 0
]
def _get_raw_data(self, station_codes, **kwargs):
if "verify" in kwargs:
verify_cert = kwargs["verify"]
else:
verify_cert = True # the default for requests
since = 7
if hasattr(self, "start_time") and self.start_time is not None:
# calc delta between now and start_time
timediff = (
datetime.utcnow().replace(tzinfo=pytz.utc) - self.start_time
)
if timediff.days == 0:
if timediff.seconds / 60 / 60 > 0:
since = -(timediff.seconds / 60 / 60)
elif timediff.seconds / 60 > 0:
since = -1 # 1 hour minimum resolution
else:
since = min(7, timediff.days) # max of 7 days
resp = requests.post(
self.obs_retrieval_url,
data={
"state": "nil",
"hsa": "nil",
"of": "1",
"extraids": " ".join(station_codes),
"sinceday": since,
},
verify=verify_cert,
)
resp.raise_for_status()
return resp.text
|
ioos/pyoos | pyoos/parsers/ioos/one/timeseries.py | TimeSeries._merge_points | python | def _merge_points(self, pc1, pc2):
res = pc1[:]
for p in pc2:
for sp in res:
if sp.time == p.time and (
sp.location is None or (sp.location.equals(p.location))
):
sp.members.extend(p.members)
break
else:
res.append(p)
return res | Merges points based on time/location.
@TODO: move to paegan, SO SLOW | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries.py#L232-L250 | null | class TimeSeries(object):
def __init__(self, element):
record = DataRecord(element)
# Top level org structure
stations_field = record.get_by_name("stations")
stations = {}
sensors = {}
for station in stations_field.content.field:
s = Station()
s.name = station.name
s.uid = station.content.get_by_name("stationID").content.value
# Location
vector = station.content.get_by_name("platformLocation").content
srss = vector.referenceFrame.split("&")
hsrs = None
try:
hsrs = Crs(srss[0])
except ValueError:
pass
vsrs = None
try:
vsrs = Crs(srss[-1].replace("2=http:", "http:"))
except ValueError:
pass
s.set_property("horizontal_srs", hsrs)
s.set_property("vertical_srs", vsrs)
s.set_property("localFrame", vector.localFrame)
lat = vector.get_by_name("latitude").content.value
lon = vector.get_by_name("longitude").content.value
z = vector.get_by_name("height").content.value
loc = [lon, lat]
if z:
loc.append(z)
s.location = sPoint(*loc)
# Sensors
for sensor in station.content.get_by_name("sensors").content.field:
name = sensor.name
uri = sensor.content.get_by_name("sensorID").content.value
height = None
location_quantity = sensor.content.get_by_name(
"height"
).content
if location_quantity.referenceFrame == "#%s_frame" % s.name:
# Uses the station as reference frame
if location_quantity.value and z:
height = z + location_quantity.value
horizontal_srs = s.get_property("horizontal_srs")
vertical_srs = s.get_property("vertical_srs")
else:
# Uses its own height
if location_quantity.value:
height = location_quantity.value
horizontal_srs = None
vertical_srs = None
if hasattr(sensor, "referenceFrame"):
srss = sensor.referenceFrame.split("&")
try:
horizontal_srs = Crs(srss[0])
except ValueError:
pass
try:
vertical_srs = Crs(
srss[-1].replace("2=http:", "http:")
)
except ValueError:
pass
loc = [s.location.x, s.location.y]
if height:
loc.append(height)
location = sPoint(*loc)
sensors[name] = {
"station": s.uid,
"name": name,
"uri": uri,
"horizontal_srs": horizontal_srs,
"vertical_srs": vertical_srs,
"location": location,
"columns": [], # Array of Members representing the columns
"values": [], # Array of Points (the actual data)
}
stations[s.uid] = s
# Start building the column structure
data_array = record.get_by_name("observationData").content
data_record = data_array.elementType.content
columns = []
# Data outside of the <field name="sensors"> DataChoice element
for f in data_record.field:
columns.append(f)
# Data inside of DataChoice
sensor_data = data_record.get_by_name("sensor")
for sendata in sensor_data.content.item:
if sendata.content is not None:
sensors[sendata.name]["columns"] = []
sensors[sendata.name]["values"] = []
for f in sendata.content.field:
# Create a model Member for each column that will be copied and filled with data from each row
sensors[sendata.name]["columns"].append(f)
# decimalSeparator = data_array.encoding.decimalSeparator
tokenSeparator = data_array.encoding.tokenSeparator
blockSeparator = data_array.encoding.blockSeparator
# collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces
data_values = data_array.values
self.raw_data = copy(data_values)
for row in [x for x in data_values.split(blockSeparator) if x != ""]:
pt = None
members = []
values = row.split(tokenSeparator)
sensor_key = None
i = 0
for x in columns:
if (
isinstance(x.content, Time)
and x.content.definition
== "http://www.opengis.net/def/property/OGC/0/SamplingTime"
):
pt = Point()
pt.time = parser.parse(values[i])
elif isinstance(x.content, DataChoice):
sensor_key = values[i]
dc_cols = sensors[sensor_key]["columns"]
for j, c in enumerate(dc_cols):
if isinstance(c.content, AbstractSimpleComponent):
m = Member(
units=c.content.uom,
name=c.name,
standard=c.content.definition,
value=float(values[i + 1]),
)
members.append(m)
elif (
isinstance(c.content, Time)
and c.content.definition
== "http://www.opengis.net/def/property/OGC/0/SamplingTime"
):
pt = Point()
pt.time = parser.parse(values[i])
# For each data column
i += 1
elif isinstance(x.content, AbstractSimpleComponent):
m = Member(
units=x.content.uom,
name=x.name,
standard=x.content.definition,
value=float(values[i]),
)
members.append(m)
else:
print("WHAT AM I?")
i += 1
pt.members = members
pt.location = stations[sensors[sensor_key]["station"]].location
sensors[sensor_key]["values"].append(pt)
for k, v in stations.items():
for sk, sv in sensors.items():
# Match on station uid
if sv["station"] == k:
v.elements = self._merge_points(
v.elements or [], sv["values"]
)
if len(stations) > 1:
self.feature = StationCollection(elements=stations)
elif len(stations) == 1:
self.feature = next(iter(stations.values()))
else:
print("No stations found!")
|
ioos/pyoos | pyoos/utils/dataorg.py | flatten_element | python | def flatten_element(p):
rd = {"time": p.time}
for member in p.members:
rd[member["standard"]] = member["value"]
return rd | Convenience function to return record-style time series representation
from elements ('p') members in station element.
member['standard'] is a standard_name parameter name, typically CF based.
Ideally, member['value'] should already be floating point value,
so it's ready to use.
Useful with most pyoos collectors. | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/utils/dataorg.py#L4-L16 | null | from __future__ import absolute_import, division, print_function
|
ioos/pyoos | pyoos/parsers/ioos/one/timeseries_profile.py | ProfileCache._get_point | python | def _get_point(self, profile, point):
cur_points_z = [p.location.z for p in profile.elements]
try:
cur_idx = cur_points_z.index(point.z)
return profile.elements[cur_idx]
except ValueError:
new_idx = bisect_left(cur_points_z, point.z)
new_point = Point()
new_point.location = sPoint(point)
new_point.time = profile.time
profile.elements.insert(new_idx, new_point)
return new_point | Finds the given point in the profile, or adds it in sorted z order. | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries_profile.py#L74-L88 | null | class ProfileCache(object):
"""
Helper class to accumulate observations and transform them into
ProfileCollections representing TimeseriesProfiles.
Used internally.
"""
def __init__(self):
self._cache = defaultdict(OrderedDict)
def add_obs(self, sensor, t, obs_point, obs_members):
"""
"""
profile = self._get_profile(sensor, t)
point = self._get_point(profile, obs_point)
point.members.extend(obs_members)
def get_collections(self):
return {
k[2]: ProfileCollection(elements=list(pd.values()))
for k, pd in self._cache.items()
}
def _get_profile(self, sensor, t):
sens_loc_tuple = (
sensor["location"]["point"].x,
sensor["location"]["point"].y,
sensor["station"],
)
profile_od = self._cache[sens_loc_tuple]
if t not in profile_od:
profile = Profile()
profile.location = sPoint(*sens_loc_tuple[0:2])
profile.time = t
# @TODO this is a hack until we can figure out how to assoc stations properly
profile.station = sensor["station"]
profile_od[t] = profile
return profile
return profile_od[t]
|
ioos/pyoos | pyoos/parsers/ioos/one/timeseries_profile.py | TimeSeriesProfile._parse_data_array | python | def _parse_data_array(self, data_array):
# decimalSeparator = data_array.encoding.decimalSeparator
tokenSeparator = data_array.encoding.tokenSeparator
blockSeparator = data_array.encoding.blockSeparator
# collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces
data_values = data_array.values
lines = [x for x in data_values.split(blockSeparator) if x != ""]
ret_val = []
for row in lines:
values = row.split(tokenSeparator)
ret_val.append(
[
float(v)
if " " not in v.strip()
else [float(vv) for vv in v.split()]
for v in values
]
)
# transpose into columns
return [list(x) for x in zip(*ret_val)] | Parses a general DataArray. | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries_profile.py#L280-L306 | null | class TimeSeriesProfile(object):
def __init__(self, element):
record = DataRecord(element)
stations_field = record.get_by_name("stations")
stations = {}
sensors = {}
for station in stations_field.content.field:
s = StationProfile()
s.name = station.name
s.uid = station.content.get_by_name("stationID").content.value
# Location
vector = station.content.get_by_name("platformLocation").content
srss = vector.referenceFrame.split("&")
hsrs = None
try:
hsrs = Crs(srss[0])
except ValueError:
pass
vsrs = None
try:
vsrs = Crs(srss[-1].replace("2=http:", "http:"))
except ValueError:
pass
s.set_property("horizontal_srs", hsrs)
s.set_property("vertical_srs", vsrs)
s.set_property("localFrame", vector.localFrame)
lat = vector.get_by_name("latitude").content.value
lon = vector.get_by_name("longitude").content.value
z = vector.get_by_name("height").content.value
loc = [lon, lat]
if z:
loc.append(z)
s.location = sPoint(*loc)
# sensors
for sensor in station.content.get_by_name("sensors").content.field:
name = sensor.name
uri = sensor.content.get_by_name("sensorID").content.value
sensors[name] = {"station": s.uid, "name": name, "uri": uri}
# orientation
ori_el = sensor.content.get_by_name("sensorOrientation")
if ori_el:
orientation = self._parse_sensor_orientation(ori_el)
sensors[name]["sensor_orientation"] = orientation
# location
loc_el = sensor.content.get_by_name("sensorLocation")
if loc_el:
location = self._parse_location(loc_el, s.location)
sensors[name]["location"] = location
# profile bins
profbins_el = sensor.content.get_by_name("profileBins")
if profbins_el:
profile_bins = self._parse_profile_bins(profbins_el)
sensors[name]["profile_bins"] = profile_bins
# OR profile heights
profheights_el = sensor.content.get_by_name("profileHeights")
if profheights_el:
profile_heights = self._parse_profile_heights(
profheights_el
)
sensors[name]["profile_heights"] = profile_heights
s.sensors = sensors
stations[s.uid] = s
sensor_data = self._parse_sensor_data(
record.get_by_name("observationData"), sensors
)
# sensor data is dict of station id -> profile collection
for station_id, sensor_profile_data in sensor_data.items():
stations[station_id].elements.extend(sensor_profile_data._elements)
if len(stations) > 1:
self.feature = StationCollection(elements=stations)
elif len(stations) == 1:
self.feature = next(iter(stations.values()))
else:
print("No stations found!")
self.feature = None
def _parse_sensor_orientation(self, ori_el):
# 'srs':Crs(), # @TODO (OWSLib cannot parse this Crs yet)
orientation = {}
for coord in ori_el.content.coordinate:
orientation[coord.axisID] = {
"name": coord.name,
"value": coord.value,
"axis": coord.axisID,
"uom": coord.uom,
}
return orientation
def _parse_location(self, loc_el, station_point):
vector = loc_el.content
srss = vector.referenceFrame.split("&")
hsrs = None
try:
hsrs = Crs(srss[0])
except ValueError:
pass
vsrs = None
try:
vsrs = Crs(srss[-1].replace("2=http:", "http:"))
except ValueError:
pass
local_frame = vector.localFrame
lat = vector.get_by_name("latitude").content.value
lon = vector.get_by_name("longitude").content.value
z = vector.get_by_name("height").content.value
loc = [lon, lat]
if z:
loc.append(z)
else:
loc.append(station_point.z)
location = {
"horizontal_srs": hsrs,
"vertical_srs": vsrs,
"localFrame": local_frame,
"point": sPoint(*loc),
}
return location
def _parse_profile_bins(self, profbins_el):
data_array = profbins_el.content
# count = int(data_array.elementCount[0].text)
data = self._parse_data_array(data_array)
bin_center_quantity = data_array.elementType.content.get_by_name(
"binCenter"
)
bin_center = {
"referenceFrame": bin_center_quantity.content.referenceFrame,
"axisID": bin_center_quantity.content.axisID,
"uom": bin_center_quantity.content.uom,
"values": data[0],
}
bin_edges_quantityrange = data_array.elementType.content.get_by_name(
"binEdges"
)
bin_edges = {
"referenceFrame": bin_edges_quantityrange.content.referenceFrame,
"axisID": bin_edges_quantityrange.content.axisID,
"uom": bin_edges_quantityrange.content.uom,
"values": data[1],
}
profile_bins = {"bin_center": bin_center, "bin_edges": bin_edges}
return profile_bins
def _parse_profile_heights(self, profheights_el):
data_array = profheights_el.content
# count = int(data_array.elementCount[0].text)
data = self._parse_data_array(data_array)
height_el = data_array.elementType.get_by_name("height")
profile_definition = {
"referenceFrame": height_el.content.referenceFrame,
"axisID": height_el.content.axisID,
"uom": height_el.content.uom,
"values": data[0],
}
return profile_definition
def _parse_sensor_data(self, obs_el, sensor_info):
"""
Returns ProfileCollection
"""
data_array = obs_el.content
# get column defs
data_record = data_array.elementType.content
columns = []
for f in data_record.field:
columns.append(f)
# get more information on sensor cols
sensor_cols = defaultdict(list)
# sensor_vals = defaultdict(list)
sensor_rec = data_record.get_by_name("sensor")
for sendata in sensor_rec.content.item:
if sendata.content is None:
continue
for f in sendata.content.field:
sensor_cols[sendata.name].append(f)
# @TODO deduplicate
# decimalSeparator = data_array.encoding.decimalSeparator
tokenSeparator = data_array.encoding.tokenSeparator
blockSeparator = data_array.encoding.blockSeparator
# collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces
data_values = data_array.values
lines = [x for x in data_values.split(blockSeparator) if x != ""]
# profile cacher!
profile_cache = ProfileCache()
for row in lines:
values = row.split(tokenSeparator)
# skey = None
i = 0
cur_time = None
# cur_qual = None
for c in columns:
if (
isinstance(c.content, Time)
and c.content.definition
== "http://www.opengis.net/def/property/OGC/0/SamplingTime"
):
cur_time = parser.parse(values[i])
i += 1
if len(c.quality):
# @TODO: do some quality constraint checks
i += len(c.quality)
# for qua in c.quality:
elif isinstance(c.content, DataChoice) and c.name == "sensor":
sensor_key = values[i]
i += 1
sensor_dr = c.content.get_by_name(sensor_key).content
sensor_info_ = sensor_info[sensor_key]
parsed, nc = self._parse_sensor_record(
sensor_dr, sensor_info_, values[i:]
)
# turn these into Points/Members
for rec in parsed:
# calc a Z value from rec/sensor and build point
point, members = self._build_obs_point(
sensor_info_, rec
)
# add to profile
profile_cache.add_obs(
sensor_info_, cur_time, point, members
)
i += nc
return profile_cache.get_collections()
def _parse_sensor_record(self, sensor_data_rec, sensor_info, rem_values):
"""
Parses values via sensor data record passed in.
Returns parsed values AND how many items it consumed out of rem_values.
"""
val_idx = 0
# @TODO seems there is only a single field in each of these
assert len(sensor_data_rec.field) == 1
sensor_data_array = sensor_data_rec.field[0].content
# there is probably not going to be a count in the def, it'll be in the data
count = None
count_text = sensor_data_array.elementCount.text
if count_text:
count = int(count_text.strip())
if not count:
count = int(rem_values[val_idx])
val_idx += 1
parsed = []
for recnum in range(count):
cur = []
for f in sensor_data_array.elementType.field:
cur_val = rem_values[val_idx]
val_idx += 1
m = Member(name=f.name, standard=f.content.definition)
if hasattr(f.content, "uom"):
m["units"] = f.content.uom
try:
m["value"] = float(cur_val)
except ValueError:
m["value"] = cur_val
if len(f.quality):
m["quality"] = []
for qual in f.quality:
cur_qual = rem_values[val_idx]
val_idx += 1
# @TODO check this against constraints
m["quality"].append(cur_qual)
cur.append(m)
parsed.append(cur)
return parsed, val_idx
def _build_obs_point(self, sensor_info, obs_recs):
"""
Pulls bin/profile height info out and calcs a z.
TODO: currently extremely naive
Returns a 2-tuple: point, remaining obs_recs
"""
cur_point = sensor_info["location"]["point"]
keys = [m["name"] for m in obs_recs]
if "binIndex" in keys:
zidx = keys.index("binIndex")
bin_index = int(obs_recs[zidx]["value"])
z = sensor_info["profile_heights"]["values"][bin_index]
point = sPoint(cur_point.x, cur_point.y, cur_point.z + z)
elif "profileIndex" in keys:
zidx = keys.index("profileIndex")
bin_index = int(obs_recs[zidx]["value"])
# @TODO take into account orientation, may change x/y/z
# @TODO bin edges?
z = sensor_info["profile_bins"]["bin_center"]["values"][bin_index]
point = sPoint(cur_point.x, cur_point.y, cur_point.z + z)
else:
raise ValueError("no binIndex or profileIndex in Member: %s", keys)
# remove z related Member
obs_recs = obs_recs[:]
obs_recs.pop(zidx)
return point, obs_recs
|
ioos/pyoos | pyoos/parsers/ioos/one/timeseries_profile.py | TimeSeriesProfile._parse_sensor_data | python | def _parse_sensor_data(self, obs_el, sensor_info):
data_array = obs_el.content
# get column defs
data_record = data_array.elementType.content
columns = []
for f in data_record.field:
columns.append(f)
# get more information on sensor cols
sensor_cols = defaultdict(list)
# sensor_vals = defaultdict(list)
sensor_rec = data_record.get_by_name("sensor")
for sendata in sensor_rec.content.item:
if sendata.content is None:
continue
for f in sendata.content.field:
sensor_cols[sendata.name].append(f)
# @TODO deduplicate
# decimalSeparator = data_array.encoding.decimalSeparator
tokenSeparator = data_array.encoding.tokenSeparator
blockSeparator = data_array.encoding.blockSeparator
# collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces
data_values = data_array.values
lines = [x for x in data_values.split(blockSeparator) if x != ""]
# profile cacher!
profile_cache = ProfileCache()
for row in lines:
values = row.split(tokenSeparator)
# skey = None
i = 0
cur_time = None
# cur_qual = None
for c in columns:
if (
isinstance(c.content, Time)
and c.content.definition
== "http://www.opengis.net/def/property/OGC/0/SamplingTime"
):
cur_time = parser.parse(values[i])
i += 1
if len(c.quality):
# @TODO: do some quality constraint checks
i += len(c.quality)
# for qua in c.quality:
elif isinstance(c.content, DataChoice) and c.name == "sensor":
sensor_key = values[i]
i += 1
sensor_dr = c.content.get_by_name(sensor_key).content
sensor_info_ = sensor_info[sensor_key]
parsed, nc = self._parse_sensor_record(
sensor_dr, sensor_info_, values[i:]
)
# turn these into Points/Members
for rec in parsed:
# calc a Z value from rec/sensor and build point
point, members = self._build_obs_point(
sensor_info_, rec
)
# add to profile
profile_cache.add_obs(
sensor_info_, cur_time, point, members
)
i += nc
return profile_cache.get_collections() | Returns ProfileCollection | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries_profile.py#L308-L391 | [
"def add_obs(self, sensor, t, obs_point, obs_members):\n \"\"\"\n \"\"\"\n profile = self._get_profile(sensor, t)\n point = self._get_point(profile, obs_point)\n point.members.extend(obs_members)\n",
"def get_collections(self):\n return {\n k[2]: ProfileCollection(elements=list(pd.values()))\n for k, pd in self._cache.items()\n }\n",
"def _parse_sensor_record(self, sensor_data_rec, sensor_info, rem_values):\n \"\"\"\n Parses values via sensor data record passed in.\n Returns parsed values AND how many items it consumed out of rem_values.\n \"\"\"\n val_idx = 0\n\n # @TODO seems there is only a single field in each of these\n assert len(sensor_data_rec.field) == 1\n sensor_data_array = sensor_data_rec.field[0].content\n\n # there is probably not going to be a count in the def, it'll be in the data\n count = None\n count_text = sensor_data_array.elementCount.text\n if count_text:\n count = int(count_text.strip())\n\n if not count:\n count = int(rem_values[val_idx])\n val_idx += 1\n\n parsed = []\n\n for recnum in range(count):\n cur = []\n\n for f in sensor_data_array.elementType.field:\n cur_val = rem_values[val_idx]\n val_idx += 1\n\n m = Member(name=f.name, standard=f.content.definition)\n\n if hasattr(f.content, \"uom\"):\n m[\"units\"] = f.content.uom\n\n try:\n m[\"value\"] = float(cur_val)\n except ValueError:\n m[\"value\"] = cur_val\n\n if len(f.quality):\n m[\"quality\"] = []\n for qual in f.quality:\n cur_qual = rem_values[val_idx]\n val_idx += 1\n\n # @TODO check this against constraints\n m[\"quality\"].append(cur_qual)\n\n cur.append(m)\n\n parsed.append(cur)\n\n return parsed, val_idx\n",
"def _build_obs_point(self, sensor_info, obs_recs):\n \"\"\"\n Pulls bin/profile height info out and calcs a z.\n\n TODO: currently extremely naive\n\n Returns a 2-tuple: point, remaining obs_recs\n \"\"\"\n cur_point = sensor_info[\"location\"][\"point\"]\n\n keys = [m[\"name\"] for m in obs_recs]\n if \"binIndex\" in keys:\n zidx = keys.index(\"binIndex\")\n bin_index = int(obs_recs[zidx][\"value\"])\n z = sensor_info[\"profile_heights\"][\"values\"][bin_index]\n\n point = sPoint(cur_point.x, cur_point.y, cur_point.z + z)\n\n elif \"profileIndex\" in keys:\n zidx = keys.index(\"profileIndex\")\n bin_index = int(obs_recs[zidx][\"value\"])\n\n # @TODO take into account orientation, may change x/y/z\n # @TODO bin edges?\n z = sensor_info[\"profile_bins\"][\"bin_center\"][\"values\"][bin_index]\n\n point = sPoint(cur_point.x, cur_point.y, cur_point.z + z)\n\n else:\n raise ValueError(\"no binIndex or profileIndex in Member: %s\", keys)\n\n # remove z related Member\n obs_recs = obs_recs[:]\n obs_recs.pop(zidx)\n\n return point, obs_recs\n"
] | class TimeSeriesProfile(object):
def __init__(self, element):
record = DataRecord(element)
stations_field = record.get_by_name("stations")
stations = {}
sensors = {}
for station in stations_field.content.field:
s = StationProfile()
s.name = station.name
s.uid = station.content.get_by_name("stationID").content.value
# Location
vector = station.content.get_by_name("platformLocation").content
srss = vector.referenceFrame.split("&")
hsrs = None
try:
hsrs = Crs(srss[0])
except ValueError:
pass
vsrs = None
try:
vsrs = Crs(srss[-1].replace("2=http:", "http:"))
except ValueError:
pass
s.set_property("horizontal_srs", hsrs)
s.set_property("vertical_srs", vsrs)
s.set_property("localFrame", vector.localFrame)
lat = vector.get_by_name("latitude").content.value
lon = vector.get_by_name("longitude").content.value
z = vector.get_by_name("height").content.value
loc = [lon, lat]
if z:
loc.append(z)
s.location = sPoint(*loc)
# sensors
for sensor in station.content.get_by_name("sensors").content.field:
name = sensor.name
uri = sensor.content.get_by_name("sensorID").content.value
sensors[name] = {"station": s.uid, "name": name, "uri": uri}
# orientation
ori_el = sensor.content.get_by_name("sensorOrientation")
if ori_el:
orientation = self._parse_sensor_orientation(ori_el)
sensors[name]["sensor_orientation"] = orientation
# location
loc_el = sensor.content.get_by_name("sensorLocation")
if loc_el:
location = self._parse_location(loc_el, s.location)
sensors[name]["location"] = location
# profile bins
profbins_el = sensor.content.get_by_name("profileBins")
if profbins_el:
profile_bins = self._parse_profile_bins(profbins_el)
sensors[name]["profile_bins"] = profile_bins
# OR profile heights
profheights_el = sensor.content.get_by_name("profileHeights")
if profheights_el:
profile_heights = self._parse_profile_heights(
profheights_el
)
sensors[name]["profile_heights"] = profile_heights
s.sensors = sensors
stations[s.uid] = s
sensor_data = self._parse_sensor_data(
record.get_by_name("observationData"), sensors
)
# sensor data is dict of station id -> profile collection
for station_id, sensor_profile_data in sensor_data.items():
stations[station_id].elements.extend(sensor_profile_data._elements)
if len(stations) > 1:
self.feature = StationCollection(elements=stations)
elif len(stations) == 1:
self.feature = next(iter(stations.values()))
else:
print("No stations found!")
self.feature = None
def _parse_sensor_orientation(self, ori_el):
# 'srs':Crs(), # @TODO (OWSLib cannot parse this Crs yet)
orientation = {}
for coord in ori_el.content.coordinate:
orientation[coord.axisID] = {
"name": coord.name,
"value": coord.value,
"axis": coord.axisID,
"uom": coord.uom,
}
return orientation
def _parse_location(self, loc_el, station_point):
vector = loc_el.content
srss = vector.referenceFrame.split("&")
hsrs = None
try:
hsrs = Crs(srss[0])
except ValueError:
pass
vsrs = None
try:
vsrs = Crs(srss[-1].replace("2=http:", "http:"))
except ValueError:
pass
local_frame = vector.localFrame
lat = vector.get_by_name("latitude").content.value
lon = vector.get_by_name("longitude").content.value
z = vector.get_by_name("height").content.value
loc = [lon, lat]
if z:
loc.append(z)
else:
loc.append(station_point.z)
location = {
"horizontal_srs": hsrs,
"vertical_srs": vsrs,
"localFrame": local_frame,
"point": sPoint(*loc),
}
return location
def _parse_profile_bins(self, profbins_el):
data_array = profbins_el.content
# count = int(data_array.elementCount[0].text)
data = self._parse_data_array(data_array)
bin_center_quantity = data_array.elementType.content.get_by_name(
"binCenter"
)
bin_center = {
"referenceFrame": bin_center_quantity.content.referenceFrame,
"axisID": bin_center_quantity.content.axisID,
"uom": bin_center_quantity.content.uom,
"values": data[0],
}
bin_edges_quantityrange = data_array.elementType.content.get_by_name(
"binEdges"
)
bin_edges = {
"referenceFrame": bin_edges_quantityrange.content.referenceFrame,
"axisID": bin_edges_quantityrange.content.axisID,
"uom": bin_edges_quantityrange.content.uom,
"values": data[1],
}
profile_bins = {"bin_center": bin_center, "bin_edges": bin_edges}
return profile_bins
def _parse_profile_heights(self, profheights_el):
data_array = profheights_el.content
# count = int(data_array.elementCount[0].text)
data = self._parse_data_array(data_array)
height_el = data_array.elementType.get_by_name("height")
profile_definition = {
"referenceFrame": height_el.content.referenceFrame,
"axisID": height_el.content.axisID,
"uom": height_el.content.uom,
"values": data[0],
}
return profile_definition
def _parse_data_array(self, data_array):
"""
Parses a general DataArray.
"""
# decimalSeparator = data_array.encoding.decimalSeparator
tokenSeparator = data_array.encoding.tokenSeparator
blockSeparator = data_array.encoding.blockSeparator
# collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces
data_values = data_array.values
lines = [x for x in data_values.split(blockSeparator) if x != ""]
ret_val = []
for row in lines:
values = row.split(tokenSeparator)
ret_val.append(
[
float(v)
if " " not in v.strip()
else [float(vv) for vv in v.split()]
for v in values
]
)
# transpose into columns
return [list(x) for x in zip(*ret_val)]
def _parse_sensor_record(self, sensor_data_rec, sensor_info, rem_values):
"""
Parses values via sensor data record passed in.
Returns parsed values AND how many items it consumed out of rem_values.
"""
val_idx = 0
# @TODO seems there is only a single field in each of these
assert len(sensor_data_rec.field) == 1
sensor_data_array = sensor_data_rec.field[0].content
# there is probably not going to be a count in the def, it'll be in the data
count = None
count_text = sensor_data_array.elementCount.text
if count_text:
count = int(count_text.strip())
if not count:
count = int(rem_values[val_idx])
val_idx += 1
parsed = []
for recnum in range(count):
cur = []
for f in sensor_data_array.elementType.field:
cur_val = rem_values[val_idx]
val_idx += 1
m = Member(name=f.name, standard=f.content.definition)
if hasattr(f.content, "uom"):
m["units"] = f.content.uom
try:
m["value"] = float(cur_val)
except ValueError:
m["value"] = cur_val
if len(f.quality):
m["quality"] = []
for qual in f.quality:
cur_qual = rem_values[val_idx]
val_idx += 1
# @TODO check this against constraints
m["quality"].append(cur_qual)
cur.append(m)
parsed.append(cur)
return parsed, val_idx
def _build_obs_point(self, sensor_info, obs_recs):
"""
Pulls bin/profile height info out and calcs a z.
TODO: currently extremely naive
Returns a 2-tuple: point, remaining obs_recs
"""
cur_point = sensor_info["location"]["point"]
keys = [m["name"] for m in obs_recs]
if "binIndex" in keys:
zidx = keys.index("binIndex")
bin_index = int(obs_recs[zidx]["value"])
z = sensor_info["profile_heights"]["values"][bin_index]
point = sPoint(cur_point.x, cur_point.y, cur_point.z + z)
elif "profileIndex" in keys:
zidx = keys.index("profileIndex")
bin_index = int(obs_recs[zidx]["value"])
# @TODO take into account orientation, may change x/y/z
# @TODO bin edges?
z = sensor_info["profile_bins"]["bin_center"]["values"][bin_index]
point = sPoint(cur_point.x, cur_point.y, cur_point.z + z)
else:
raise ValueError("no binIndex or profileIndex in Member: %s", keys)
# remove z related Member
obs_recs = obs_recs[:]
obs_recs.pop(zidx)
return point, obs_recs
|
ioos/pyoos | pyoos/parsers/ioos/one/timeseries_profile.py | TimeSeriesProfile._parse_sensor_record | python | def _parse_sensor_record(self, sensor_data_rec, sensor_info, rem_values):
val_idx = 0
# @TODO seems there is only a single field in each of these
assert len(sensor_data_rec.field) == 1
sensor_data_array = sensor_data_rec.field[0].content
# there is probably not going to be a count in the def, it'll be in the data
count = None
count_text = sensor_data_array.elementCount.text
if count_text:
count = int(count_text.strip())
if not count:
count = int(rem_values[val_idx])
val_idx += 1
parsed = []
for recnum in range(count):
cur = []
for f in sensor_data_array.elementType.field:
cur_val = rem_values[val_idx]
val_idx += 1
m = Member(name=f.name, standard=f.content.definition)
if hasattr(f.content, "uom"):
m["units"] = f.content.uom
try:
m["value"] = float(cur_val)
except ValueError:
m["value"] = cur_val
if len(f.quality):
m["quality"] = []
for qual in f.quality:
cur_qual = rem_values[val_idx]
val_idx += 1
# @TODO check this against constraints
m["quality"].append(cur_qual)
cur.append(m)
parsed.append(cur)
return parsed, val_idx | Parses values via sensor data record passed in.
Returns parsed values AND how many items it consumed out of rem_values. | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries_profile.py#L393-L446 | null | class TimeSeriesProfile(object):
def __init__(self, element):
record = DataRecord(element)
stations_field = record.get_by_name("stations")
stations = {}
sensors = {}
for station in stations_field.content.field:
s = StationProfile()
s.name = station.name
s.uid = station.content.get_by_name("stationID").content.value
# Location
vector = station.content.get_by_name("platformLocation").content
srss = vector.referenceFrame.split("&")
hsrs = None
try:
hsrs = Crs(srss[0])
except ValueError:
pass
vsrs = None
try:
vsrs = Crs(srss[-1].replace("2=http:", "http:"))
except ValueError:
pass
s.set_property("horizontal_srs", hsrs)
s.set_property("vertical_srs", vsrs)
s.set_property("localFrame", vector.localFrame)
lat = vector.get_by_name("latitude").content.value
lon = vector.get_by_name("longitude").content.value
z = vector.get_by_name("height").content.value
loc = [lon, lat]
if z:
loc.append(z)
s.location = sPoint(*loc)
# sensors
for sensor in station.content.get_by_name("sensors").content.field:
name = sensor.name
uri = sensor.content.get_by_name("sensorID").content.value
sensors[name] = {"station": s.uid, "name": name, "uri": uri}
# orientation
ori_el = sensor.content.get_by_name("sensorOrientation")
if ori_el:
orientation = self._parse_sensor_orientation(ori_el)
sensors[name]["sensor_orientation"] = orientation
# location
loc_el = sensor.content.get_by_name("sensorLocation")
if loc_el:
location = self._parse_location(loc_el, s.location)
sensors[name]["location"] = location
# profile bins
profbins_el = sensor.content.get_by_name("profileBins")
if profbins_el:
profile_bins = self._parse_profile_bins(profbins_el)
sensors[name]["profile_bins"] = profile_bins
# OR profile heights
profheights_el = sensor.content.get_by_name("profileHeights")
if profheights_el:
profile_heights = self._parse_profile_heights(
profheights_el
)
sensors[name]["profile_heights"] = profile_heights
s.sensors = sensors
stations[s.uid] = s
sensor_data = self._parse_sensor_data(
record.get_by_name("observationData"), sensors
)
# sensor data is dict of station id -> profile collection
for station_id, sensor_profile_data in sensor_data.items():
stations[station_id].elements.extend(sensor_profile_data._elements)
if len(stations) > 1:
self.feature = StationCollection(elements=stations)
elif len(stations) == 1:
self.feature = next(iter(stations.values()))
else:
print("No stations found!")
self.feature = None
def _parse_sensor_orientation(self, ori_el):
# 'srs':Crs(), # @TODO (OWSLib cannot parse this Crs yet)
orientation = {}
for coord in ori_el.content.coordinate:
orientation[coord.axisID] = {
"name": coord.name,
"value": coord.value,
"axis": coord.axisID,
"uom": coord.uom,
}
return orientation
def _parse_location(self, loc_el, station_point):
vector = loc_el.content
srss = vector.referenceFrame.split("&")
hsrs = None
try:
hsrs = Crs(srss[0])
except ValueError:
pass
vsrs = None
try:
vsrs = Crs(srss[-1].replace("2=http:", "http:"))
except ValueError:
pass
local_frame = vector.localFrame
lat = vector.get_by_name("latitude").content.value
lon = vector.get_by_name("longitude").content.value
z = vector.get_by_name("height").content.value
loc = [lon, lat]
if z:
loc.append(z)
else:
loc.append(station_point.z)
location = {
"horizontal_srs": hsrs,
"vertical_srs": vsrs,
"localFrame": local_frame,
"point": sPoint(*loc),
}
return location
def _parse_profile_bins(self, profbins_el):
data_array = profbins_el.content
# count = int(data_array.elementCount[0].text)
data = self._parse_data_array(data_array)
bin_center_quantity = data_array.elementType.content.get_by_name(
"binCenter"
)
bin_center = {
"referenceFrame": bin_center_quantity.content.referenceFrame,
"axisID": bin_center_quantity.content.axisID,
"uom": bin_center_quantity.content.uom,
"values": data[0],
}
bin_edges_quantityrange = data_array.elementType.content.get_by_name(
"binEdges"
)
bin_edges = {
"referenceFrame": bin_edges_quantityrange.content.referenceFrame,
"axisID": bin_edges_quantityrange.content.axisID,
"uom": bin_edges_quantityrange.content.uom,
"values": data[1],
}
profile_bins = {"bin_center": bin_center, "bin_edges": bin_edges}
return profile_bins
def _parse_profile_heights(self, profheights_el):
data_array = profheights_el.content
# count = int(data_array.elementCount[0].text)
data = self._parse_data_array(data_array)
height_el = data_array.elementType.get_by_name("height")
profile_definition = {
"referenceFrame": height_el.content.referenceFrame,
"axisID": height_el.content.axisID,
"uom": height_el.content.uom,
"values": data[0],
}
return profile_definition
def _parse_data_array(self, data_array):
"""
Parses a general DataArray.
"""
# decimalSeparator = data_array.encoding.decimalSeparator
tokenSeparator = data_array.encoding.tokenSeparator
blockSeparator = data_array.encoding.blockSeparator
# collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces
data_values = data_array.values
lines = [x for x in data_values.split(blockSeparator) if x != ""]
ret_val = []
for row in lines:
values = row.split(tokenSeparator)
ret_val.append(
[
float(v)
if " " not in v.strip()
else [float(vv) for vv in v.split()]
for v in values
]
)
# transpose into columns
return [list(x) for x in zip(*ret_val)]
def _parse_sensor_data(self, obs_el, sensor_info):
"""
Returns ProfileCollection
"""
data_array = obs_el.content
# get column defs
data_record = data_array.elementType.content
columns = []
for f in data_record.field:
columns.append(f)
# get more information on sensor cols
sensor_cols = defaultdict(list)
# sensor_vals = defaultdict(list)
sensor_rec = data_record.get_by_name("sensor")
for sendata in sensor_rec.content.item:
if sendata.content is None:
continue
for f in sendata.content.field:
sensor_cols[sendata.name].append(f)
# @TODO deduplicate
# decimalSeparator = data_array.encoding.decimalSeparator
tokenSeparator = data_array.encoding.tokenSeparator
blockSeparator = data_array.encoding.blockSeparator
# collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces
data_values = data_array.values
lines = [x for x in data_values.split(blockSeparator) if x != ""]
# profile cacher!
profile_cache = ProfileCache()
for row in lines:
values = row.split(tokenSeparator)
# skey = None
i = 0
cur_time = None
# cur_qual = None
for c in columns:
if (
isinstance(c.content, Time)
and c.content.definition
== "http://www.opengis.net/def/property/OGC/0/SamplingTime"
):
cur_time = parser.parse(values[i])
i += 1
if len(c.quality):
# @TODO: do some quality constraint checks
i += len(c.quality)
# for qua in c.quality:
elif isinstance(c.content, DataChoice) and c.name == "sensor":
sensor_key = values[i]
i += 1
sensor_dr = c.content.get_by_name(sensor_key).content
sensor_info_ = sensor_info[sensor_key]
parsed, nc = self._parse_sensor_record(
sensor_dr, sensor_info_, values[i:]
)
# turn these into Points/Members
for rec in parsed:
# calc a Z value from rec/sensor and build point
point, members = self._build_obs_point(
sensor_info_, rec
)
# add to profile
profile_cache.add_obs(
sensor_info_, cur_time, point, members
)
i += nc
return profile_cache.get_collections()
def _build_obs_point(self, sensor_info, obs_recs):
"""
Pulls bin/profile height info out and calcs a z.
TODO: currently extremely naive
Returns a 2-tuple: point, remaining obs_recs
"""
cur_point = sensor_info["location"]["point"]
keys = [m["name"] for m in obs_recs]
if "binIndex" in keys:
zidx = keys.index("binIndex")
bin_index = int(obs_recs[zidx]["value"])
z = sensor_info["profile_heights"]["values"][bin_index]
point = sPoint(cur_point.x, cur_point.y, cur_point.z + z)
elif "profileIndex" in keys:
zidx = keys.index("profileIndex")
bin_index = int(obs_recs[zidx]["value"])
# @TODO take into account orientation, may change x/y/z
# @TODO bin edges?
z = sensor_info["profile_bins"]["bin_center"]["values"][bin_index]
point = sPoint(cur_point.x, cur_point.y, cur_point.z + z)
else:
raise ValueError("no binIndex or profileIndex in Member: %s", keys)
# remove z related Member
obs_recs = obs_recs[:]
obs_recs.pop(zidx)
return point, obs_recs
|
ioos/pyoos | pyoos/parsers/ioos/one/timeseries_profile.py | TimeSeriesProfile._build_obs_point | python | def _build_obs_point(self, sensor_info, obs_recs):
cur_point = sensor_info["location"]["point"]
keys = [m["name"] for m in obs_recs]
if "binIndex" in keys:
zidx = keys.index("binIndex")
bin_index = int(obs_recs[zidx]["value"])
z = sensor_info["profile_heights"]["values"][bin_index]
point = sPoint(cur_point.x, cur_point.y, cur_point.z + z)
elif "profileIndex" in keys:
zidx = keys.index("profileIndex")
bin_index = int(obs_recs[zidx]["value"])
# @TODO take into account orientation, may change x/y/z
# @TODO bin edges?
z = sensor_info["profile_bins"]["bin_center"]["values"][bin_index]
point = sPoint(cur_point.x, cur_point.y, cur_point.z + z)
else:
raise ValueError("no binIndex or profileIndex in Member: %s", keys)
# remove z related Member
obs_recs = obs_recs[:]
obs_recs.pop(zidx)
return point, obs_recs | Pulls bin/profile height info out and calcs a z.
TODO: currently extremely naive
Returns a 2-tuple: point, remaining obs_recs | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries_profile.py#L448-L483 | null | class TimeSeriesProfile(object):
def __init__(self, element):
record = DataRecord(element)
stations_field = record.get_by_name("stations")
stations = {}
sensors = {}
for station in stations_field.content.field:
s = StationProfile()
s.name = station.name
s.uid = station.content.get_by_name("stationID").content.value
# Location
vector = station.content.get_by_name("platformLocation").content
srss = vector.referenceFrame.split("&")
hsrs = None
try:
hsrs = Crs(srss[0])
except ValueError:
pass
vsrs = None
try:
vsrs = Crs(srss[-1].replace("2=http:", "http:"))
except ValueError:
pass
s.set_property("horizontal_srs", hsrs)
s.set_property("vertical_srs", vsrs)
s.set_property("localFrame", vector.localFrame)
lat = vector.get_by_name("latitude").content.value
lon = vector.get_by_name("longitude").content.value
z = vector.get_by_name("height").content.value
loc = [lon, lat]
if z:
loc.append(z)
s.location = sPoint(*loc)
# sensors
for sensor in station.content.get_by_name("sensors").content.field:
name = sensor.name
uri = sensor.content.get_by_name("sensorID").content.value
sensors[name] = {"station": s.uid, "name": name, "uri": uri}
# orientation
ori_el = sensor.content.get_by_name("sensorOrientation")
if ori_el:
orientation = self._parse_sensor_orientation(ori_el)
sensors[name]["sensor_orientation"] = orientation
# location
loc_el = sensor.content.get_by_name("sensorLocation")
if loc_el:
location = self._parse_location(loc_el, s.location)
sensors[name]["location"] = location
# profile bins
profbins_el = sensor.content.get_by_name("profileBins")
if profbins_el:
profile_bins = self._parse_profile_bins(profbins_el)
sensors[name]["profile_bins"] = profile_bins
# OR profile heights
profheights_el = sensor.content.get_by_name("profileHeights")
if profheights_el:
profile_heights = self._parse_profile_heights(
profheights_el
)
sensors[name]["profile_heights"] = profile_heights
s.sensors = sensors
stations[s.uid] = s
sensor_data = self._parse_sensor_data(
record.get_by_name("observationData"), sensors
)
# sensor data is dict of station id -> profile collection
for station_id, sensor_profile_data in sensor_data.items():
stations[station_id].elements.extend(sensor_profile_data._elements)
if len(stations) > 1:
self.feature = StationCollection(elements=stations)
elif len(stations) == 1:
self.feature = next(iter(stations.values()))
else:
print("No stations found!")
self.feature = None
def _parse_sensor_orientation(self, ori_el):
# 'srs':Crs(), # @TODO (OWSLib cannot parse this Crs yet)
orientation = {}
for coord in ori_el.content.coordinate:
orientation[coord.axisID] = {
"name": coord.name,
"value": coord.value,
"axis": coord.axisID,
"uom": coord.uom,
}
return orientation
def _parse_location(self, loc_el, station_point):
vector = loc_el.content
srss = vector.referenceFrame.split("&")
hsrs = None
try:
hsrs = Crs(srss[0])
except ValueError:
pass
vsrs = None
try:
vsrs = Crs(srss[-1].replace("2=http:", "http:"))
except ValueError:
pass
local_frame = vector.localFrame
lat = vector.get_by_name("latitude").content.value
lon = vector.get_by_name("longitude").content.value
z = vector.get_by_name("height").content.value
loc = [lon, lat]
if z:
loc.append(z)
else:
loc.append(station_point.z)
location = {
"horizontal_srs": hsrs,
"vertical_srs": vsrs,
"localFrame": local_frame,
"point": sPoint(*loc),
}
return location
def _parse_profile_bins(self, profbins_el):
data_array = profbins_el.content
# count = int(data_array.elementCount[0].text)
data = self._parse_data_array(data_array)
bin_center_quantity = data_array.elementType.content.get_by_name(
"binCenter"
)
bin_center = {
"referenceFrame": bin_center_quantity.content.referenceFrame,
"axisID": bin_center_quantity.content.axisID,
"uom": bin_center_quantity.content.uom,
"values": data[0],
}
bin_edges_quantityrange = data_array.elementType.content.get_by_name(
"binEdges"
)
bin_edges = {
"referenceFrame": bin_edges_quantityrange.content.referenceFrame,
"axisID": bin_edges_quantityrange.content.axisID,
"uom": bin_edges_quantityrange.content.uom,
"values": data[1],
}
profile_bins = {"bin_center": bin_center, "bin_edges": bin_edges}
return profile_bins
def _parse_profile_heights(self, profheights_el):
data_array = profheights_el.content
# count = int(data_array.elementCount[0].text)
data = self._parse_data_array(data_array)
height_el = data_array.elementType.get_by_name("height")
profile_definition = {
"referenceFrame": height_el.content.referenceFrame,
"axisID": height_el.content.axisID,
"uom": height_el.content.uom,
"values": data[0],
}
return profile_definition
def _parse_data_array(self, data_array):
"""
Parses a general DataArray.
"""
# decimalSeparator = data_array.encoding.decimalSeparator
tokenSeparator = data_array.encoding.tokenSeparator
blockSeparator = data_array.encoding.blockSeparator
# collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces
data_values = data_array.values
lines = [x for x in data_values.split(blockSeparator) if x != ""]
ret_val = []
for row in lines:
values = row.split(tokenSeparator)
ret_val.append(
[
float(v)
if " " not in v.strip()
else [float(vv) for vv in v.split()]
for v in values
]
)
# transpose into columns
return [list(x) for x in zip(*ret_val)]
def _parse_sensor_data(self, obs_el, sensor_info):
"""
Returns ProfileCollection
"""
data_array = obs_el.content
# get column defs
data_record = data_array.elementType.content
columns = []
for f in data_record.field:
columns.append(f)
# get more information on sensor cols
sensor_cols = defaultdict(list)
# sensor_vals = defaultdict(list)
sensor_rec = data_record.get_by_name("sensor")
for sendata in sensor_rec.content.item:
if sendata.content is None:
continue
for f in sendata.content.field:
sensor_cols[sendata.name].append(f)
# @TODO deduplicate
# decimalSeparator = data_array.encoding.decimalSeparator
tokenSeparator = data_array.encoding.tokenSeparator
blockSeparator = data_array.encoding.blockSeparator
# collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces
data_values = data_array.values
lines = [x for x in data_values.split(blockSeparator) if x != ""]
# profile cacher!
profile_cache = ProfileCache()
for row in lines:
values = row.split(tokenSeparator)
# skey = None
i = 0
cur_time = None
# cur_qual = None
for c in columns:
if (
isinstance(c.content, Time)
and c.content.definition
== "http://www.opengis.net/def/property/OGC/0/SamplingTime"
):
cur_time = parser.parse(values[i])
i += 1
if len(c.quality):
# @TODO: do some quality constraint checks
i += len(c.quality)
# for qua in c.quality:
elif isinstance(c.content, DataChoice) and c.name == "sensor":
sensor_key = values[i]
i += 1
sensor_dr = c.content.get_by_name(sensor_key).content
sensor_info_ = sensor_info[sensor_key]
parsed, nc = self._parse_sensor_record(
sensor_dr, sensor_info_, values[i:]
)
# turn these into Points/Members
for rec in parsed:
# calc a Z value from rec/sensor and build point
point, members = self._build_obs_point(
sensor_info_, rec
)
# add to profile
profile_cache.add_obs(
sensor_info_, cur_time, point, members
)
i += nc
return profile_cache.get_collections()
def _parse_sensor_record(self, sensor_data_rec, sensor_info, rem_values):
"""
Parses values via sensor data record passed in.
Returns parsed values AND how many items it consumed out of rem_values.
"""
val_idx = 0
# @TODO seems there is only a single field in each of these
assert len(sensor_data_rec.field) == 1
sensor_data_array = sensor_data_rec.field[0].content
# there is probably not going to be a count in the def, it'll be in the data
count = None
count_text = sensor_data_array.elementCount.text
if count_text:
count = int(count_text.strip())
if not count:
count = int(rem_values[val_idx])
val_idx += 1
parsed = []
for recnum in range(count):
cur = []
for f in sensor_data_array.elementType.field:
cur_val = rem_values[val_idx]
val_idx += 1
m = Member(name=f.name, standard=f.content.definition)
if hasattr(f.content, "uom"):
m["units"] = f.content.uom
try:
m["value"] = float(cur_val)
except ValueError:
m["value"] = cur_val
if len(f.quality):
m["quality"] = []
for qual in f.quality:
cur_qual = rem_values[val_idx]
val_idx += 1
# @TODO check this against constraints
m["quality"].append(cur_qual)
cur.append(m)
parsed.append(cur)
return parsed, val_idx
|
ioos/pyoos | pyoos/parsers/ioos/one/describe_sensor.py | DescribeSensor.get_named_by_definition | python | def get_named_by_definition(cls, element_list, string_def):
try:
return next(
(
st.value
for st in element_list
if st.definition == string_def
)
)
except Exception:
return None | Attempts to get an IOOS definition from a list of xml elements | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/describe_sensor.py#L37-L48 | null | class DescribeSensor(IoosDescribeSensor):
@classmethod
def get_ioos_def(self, ident, elem_type, ont):
"""Gets a definition given an identifier and where to search for it"""
if elem_type == "identifier":
getter_fn = self.system.get_identifiers_by_name
elif elem_type == "classifier":
getter_fn = self.system.get_classifiers_by_name
else:
raise ValueError("Unknown element type '{}'".format(elem_type))
return DescribeSensor.get_named_by_definition(
getter_fn(ident), urljoin(ont, ident)
)
def __init__(self, element):
""" Common things between all describe sensor requests """
if isinstance(element, ElementType):
root = element
else:
root = etree.fromstring(element)
# sml_str = ".//{{{0}}}identifier/{{{0}}}Term[@definition='http://mmisw.org/ont/ioos/definition/%s']".format(SML_NS)
# TODO: make this cleaner
if hasattr(root, "getroot"):
root = root.getroot()
self.system = SensorML(element).members[0]
self.ioos_version = testXMLValue(
root.find(
".//{%s}field[@name='ioosTemplateVersion']/{%s}Text/{%s}value"
% (SWE_NS, SWE_NS, SWE_NS)
)
)
if self.ioos_version != "1.0":
warnings.warn(
"Warning: Unsupported IOOS version (%s). Supported: [1.0]"
% self.ioos_version
)
self.shortName = self.get_ioos_def("shortName", "identifier", ont)
self.longName = self.get_ioos_def("longName", "identifier", ont)
self.keywords = list(map(str, self.system.keywords))
# Location
try:
self.location = self.system.location[0]
except (TypeError, IndexError): # No location exists
self.location = None
# Timerange
try:
timerange = testXMLValue(
self.system.get_capabilities_by_name("observationTimeRange")[
0
].find(".//" + nsp("swe101:TimeRange/swe101:value"))
).split(" ")
self.starting = parser.parse(timerange[0])
self.ending = parser.parse(timerange[1])
except (AttributeError, TypeError, ValueError, IndexError):
self.starting = None
self.ending = None
|
ioos/pyoos | pyoos/parsers/ioos/one/describe_sensor.py | DescribeSensor.get_ioos_def | python | def get_ioos_def(self, ident, elem_type, ont):
if elem_type == "identifier":
getter_fn = self.system.get_identifiers_by_name
elif elem_type == "classifier":
getter_fn = self.system.get_classifiers_by_name
else:
raise ValueError("Unknown element type '{}'".format(elem_type))
return DescribeSensor.get_named_by_definition(
getter_fn(ident), urljoin(ont, ident)
) | Gets a definition given an identifier and where to search for it | train | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/describe_sensor.py#L50-L60 | [
"def get_named_by_definition(cls, element_list, string_def):\n \"\"\"Attempts to get an IOOS definition from a list of xml elements\"\"\"\n try:\n return next(\n (\n st.value\n for st in element_list\n if st.definition == string_def\n )\n )\n except Exception:\n return None\n"
] | class DescribeSensor(IoosDescribeSensor):
@classmethod
def get_named_by_definition(cls, element_list, string_def):
"""Attempts to get an IOOS definition from a list of xml elements"""
try:
return next(
(
st.value
for st in element_list
if st.definition == string_def
)
)
except Exception:
return None
def __init__(self, element):
""" Common things between all describe sensor requests """
if isinstance(element, ElementType):
root = element
else:
root = etree.fromstring(element)
# sml_str = ".//{{{0}}}identifier/{{{0}}}Term[@definition='http://mmisw.org/ont/ioos/definition/%s']".format(SML_NS)
# TODO: make this cleaner
if hasattr(root, "getroot"):
root = root.getroot()
self.system = SensorML(element).members[0]
self.ioos_version = testXMLValue(
root.find(
".//{%s}field[@name='ioosTemplateVersion']/{%s}Text/{%s}value"
% (SWE_NS, SWE_NS, SWE_NS)
)
)
if self.ioos_version != "1.0":
warnings.warn(
"Warning: Unsupported IOOS version (%s). Supported: [1.0]"
% self.ioos_version
)
self.shortName = self.get_ioos_def("shortName", "identifier", ont)
self.longName = self.get_ioos_def("longName", "identifier", ont)
self.keywords = list(map(str, self.system.keywords))
# Location
try:
self.location = self.system.location[0]
except (TypeError, IndexError): # No location exists
self.location = None
# Timerange
try:
timerange = testXMLValue(
self.system.get_capabilities_by_name("observationTimeRange")[
0
].find(".//" + nsp("swe101:TimeRange/swe101:value"))
).split(" ")
self.starting = parser.parse(timerange[0])
self.ending = parser.parse(timerange[1])
except (AttributeError, TypeError, ValueError, IndexError):
self.starting = None
self.ending = None
|
lexibank/pylexibank | src/pylexibank/dataset.py | Dataset.tokenizer | python | def tokenizer(self):
profile = self.dir / 'etc' / 'orthography.tsv'
if profile.exists():
profile = Profile.from_file(str(profile), form='NFC')
default_spec = list(next(iter(profile.graphemes.values())).keys())
for grapheme in ['^', '$']:
if grapheme not in profile.graphemes:
profile.graphemes[grapheme] = {k: None for k in default_spec}
profile.tree = Tree(list(profile.graphemes.keys()))
tokenizer = Tokenizer(profile=profile, errors_replace=lambda c: '<{0}>'.format(c))
def _tokenizer(item, string, **kw):
kw.setdefault("column", "IPA")
kw.setdefault("separator", " + ")
return tokenizer(unicodedata.normalize('NFC', '^' + string + '$'), **kw).split()
return _tokenizer | Datasets can provide support for segmentation (aka tokenization) in two ways:
- by providing an orthography profile at etc/orthography.tsv or
- by overwriting this method to return a custom tokenizer callable.
:return: A callable to do segmentation.
The expected signature of the callable is
def t(item, string, **kw)
where
- `item` is a `dict` representing the complete CLDF FormTable row
- `string` is the string to be segmented
- `kw` may be used to pass any context info to the tokenizer, when called
explicitly. | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/dataset.py#L316-L348 | null | class Dataset(object):
"""
A lexibank dataset.
This object provides access to a dataset's
- language list as attribute `languages`
- concept list as attribute `concepts`
- concepticon concept-list ID as attribute `conceptlist`
"""
dir = None # Derived classes must provide an existing directory here!
id = None # Derived classes must provide a unique ID here!
lexeme_class = Lexeme
cognate_class = Cognate
language_class = Language
concept_class = Concept
log = logging.getLogger(pylexibank.__name__)
@lazyproperty
def metadata(self):
return Metadata(**jsonlib.load(self.dir / 'metadata.json'))
@property
def stats(self):
if self.dir.joinpath('README.json').exists():
return jsonlib.load(self.dir / 'README.json')
return {}
def __init__(self, concepticon=None, glottolog=None):
if self.__class__ != Dataset:
if not self.dir:
raise ValueError(
"Dataset.dir needs to be specified in subclass for %s!" % self.__class__)
elif not self.id:
raise ValueError(
"Dataset.id needs to be specified in subclass for %s!" % self.__class__)
self.unmapped = Unmapped()
self.dir = DataDir(self.dir)
self._json = self.dir.joinpath('lexibank.json')
self.raw = DataDir(self.dir / 'raw')
self.raw.mkdir(exist_ok=True)
self.cldf_dir = self.dir / 'cldf'
self.cldf_dir.mkdir(exist_ok=True)
self.conceptlist = {}
self.glottolog = glottolog
self.concepticon = concepticon
try:
self.git_repo = git.Repo(str(self.dir)) # pragma: no cover
except git.InvalidGitRepositoryError:
self.git_repo = None
self.tr_analyses = {}
self.tr_bad_words = []
self.tr_invalid_words = []
def _iter_etc(self, what):
delimiter = '\t'
path = self.dir / 'etc' / (what + '.tsv')
if not path.exists():
delimiter = ','
path = path.parent / (what + '.csv')
return reader(path, dicts=True, delimiter=delimiter) if path.exists() else []
def read_json(self): # pragma: no cover
return jsonlib.load(self._json) if self._json.exists() else {}
def write_json(self, obj): # pragma: no cover
jsondump(obj, self._json)
@lazyproperty
def github_repo(self): # pragma: no cover
try:
match = re.search(
'github\.com/(?P<org>[^/]+)/(?P<repo>[^.]+)\.git',
self.git_repo.remotes.origin.url)
if match:
return match.group('org') + '/' + match.group('repo')
except AttributeError:
pass
@lazyproperty
def sources(self):
return list(self._iter_etc('sources'))
@lazyproperty
def concepts(self):
return list(self._iter_etc('concepts'))
@lazyproperty
def languages(self):
res = []
for item in self._iter_etc('languages'):
if item.get('GLOTTOCODE', None) and not \
Glottocode.pattern.match(item['GLOTTOCODE']): # pragma: no cover
raise ValueError(
"Invalid glottocode {0}".format(item['GLOTTOCODE']))
res.append(item)
return res
@lazyproperty
def lexemes(self):
res = {}
for item in self._iter_etc('lexemes'):
res[item['LEXEME']] = item['REPLACEMENT']
return res
# ---------------------------------------------------------------
# workflow actions whih should be overwritten by derived classes:
# ---------------------------------------------------------------
def cmd_download(self, **kw):
self._not_implemented('download')
return NOOP
def cmd_install(self, **kw):
self._not_implemented('install')
return NOOP
# ---------------------------------------------------------------
# handling of lexemes/forms/words
# ---------------------------------------------------------------
def iter_raw_lexemes(self):
"""
Datasets should overwrite this method, yielding raw lexical items, if seeding
an orthography profile via `lexibank orthography`.
"""
yield "abcde"
def clean_form(self, item, form):
"""
Called when a row is added to a CLDF dataset.
:param form:
:return: None to skip the form, or the cleaned form as string.
"""
if form not in ['?']:
return form
def split_forms(self, item, value):
if value in self.lexemes: # pragma: no cover
self.log.debug('overriding via lexemes.csv: %r -> %r' % (value, self.lexemes[value]))
value = self.lexemes.get(value, value)
return [self.clean_form(item, form)
for form in split_text_with_context(value, separators='/,;')]
@lazyproperty
# ---------------------------------------------------------------
# CLDF dataset access
# ---------------------------------------------------------------
@lazyproperty
def cldf(self):
return cldf.Dataset(self)
# ---------------------------------------------------------------
def _download(self, **kw):
self.cmd_download(**kw)
write_text(
self.raw / 'README.md',
'Raw data downloaded {0}'.format(datetime.utcnow().isoformat()))
def _install(self, **kw):
self.log = kw.get('log', self.log)
self.unmapped.clear()
for p in self.cldf_dir.iterdir():
if p.name not in ['README.md', '.gitattributes']:
p.unlink()
self.tr_analyses = {}
self.tr_bad_words = []
self.tr_invalid_words = []
if len(self.metadata.conceptlist):
self.conceptlist = self.concepticon.conceptlists[self.metadata.conceptlist[0]]
if self.cmd_install(**kw) == NOOP:
return
if self.metadata.known_license:
legalcode = self.metadata.known_license.legalcode
if legalcode:
write_text(self.dir / 'LICENSE', legalcode)
gitattributes = self.cldf_dir / '.gitattributes'
if not gitattributes.exists():
with gitattributes.open('wt') as fp:
fp.write('*.csv text eol=crlf')
if kw.get('verbose'):
self.unmapped.pprint()
self.cldf.validate(kw['log'])
stats = transcription.Stats(
bad_words=sorted(self.tr_bad_words[:100], key=lambda x: x['ID']),
bad_words_count=len(self.tr_bad_words),
invalid_words=sorted(self.tr_invalid_words[:100], key=lambda x: x['ID']),
invalid_words_count=len(self.tr_invalid_words))
for lid, analysis in self.tr_analyses.items():
for attribute in ['segments', 'bipa_errors', 'sclass_errors', 'replacements']:
getattr(stats, attribute).update(getattr(analysis, attribute))
stats.general_errors += analysis.general_errors
stats.inventory_size += len(analysis.segments) / len(self.tr_analyses)
error_segments = stats.bipa_errors.union(stats.sclass_errors)
for i, row in enumerate(stats.bad_words):
analyzed_segments = []
for s in row['Segments']:
analyzed_segments.append('<s> %s </s>' % s if s in error_segments else s)
stats.bad_words[i] = [
row['ID'],
row['Language_ID'],
row['Parameter_ID'],
row['Form'],
' '.join(analyzed_segments)]
for i, row in enumerate(stats.invalid_words):
stats.invalid_words[i] = [
row['ID'],
row['Language_ID'],
row['Parameter_ID'],
row['Form']]
# Aggregate transcription analysis results ...
tr = dict(
by_language={k: attr.asdict(v) for k, v in self.tr_analyses.items()},
stats=attr.asdict(stats))
# ... and write a report:
for text, fname in [
(transcription.report(tr), 'TRANSCRIPTION.md'),
(self.report(tr, log=kw.get('log')), 'README.md'),
]:
textdump(text, self.dir / fname, log=kw.get('log'))
def _clean(self, **kw):
self.log.debug('removing CLDF directory %s' % self.cldf_dir)
if self.cldf_dir.exists():
for f in self.cldf_dir.iterdir():
if f.is_file():
remove(f)
else:
rmtree(f)
def _not_implemented(self, method):
self.log.warning('cmd_{0} not implemented for dataset {1}'.format(method, self.id))
def coverage(self, vars, glangs, c): # pragma: no cover
for row in self.cldf['FormTable']:
try:
cid = int(row['Parameter_ID'])
except (ValueError, TypeError):
continue
vid = self.id + '-' + row['Language_ID']
c[cid].add(vid)
vars[vid].add(cid)
glangs[row['Language_ID']].add(cid)
def build_status_badge(self):
if not self.dir.joinpath('.travis.yml').exists():
return ''
try:
return "[]" \
"(https://travis-ci.org/{0})".format(self.github_repo)
except: # noqa
return ''
def report(self, tr_analysis, log=None):
#
# FIXME: write only summary into README.md
# in case of multiple cldf datasets:
# - separate lexemes.md and transcriptions.md
#
if not list(self.cldf_dir.glob('*.csv')):
return
lines = [
'# %s\n' % self.metadata.title,
'Cite the source dataset as\n',
'> %s\n' % self.metadata.citation,
]
if self.metadata.license:
lines.extend([
'This dataset is licensed under a %s license' % self.metadata.license, ''])
if self.metadata.url:
lines.extend(['Available online at %s' % self.metadata.url, ''])
if self.metadata.related:
lines.extend(['See also %s' % self.metadata.related, ''])
if self.metadata.conceptlist:
lines.append('Conceptlists in Concepticon:')
lines.extend([
'- [{0}](http://concepticon.clld.org/contributions/{0})'.format(cl)
for cl in self.metadata.conceptlist])
lines.append('')
# add NOTES.md
if self.dir.joinpath('NOTES.md').exists():
lines.extend(['## Notes', ''])
lines.extend(self.dir.joinpath('NOTES.md').read_text().split("\n"))
lines.extend(['', '']) # some blank lines
synonyms = defaultdict(Counter)
totals = {
'languages': Counter(),
'concepts': Counter(),
'sources': Counter(),
'cognate_sets': Counter(),
'lexemes': 0,
'lids': Counter(),
'cids': Counter(),
}
missing_source = []
missing_lang = []
param2concepticon = {r['ID']: r['Concepticon_ID'] for r in self.cldf['ParameterTable']}
lang2glottolog = {r['ID']: r['Glottocode'] for r in self.cldf['LanguageTable']}
for row in self.cldf['FormTable']:
if row['Source']:
totals['sources'].update(['y'])
else:
missing_source.append(row)
totals['concepts'].update([param2concepticon[row['Parameter_ID']]])
totals['languages'].update([lang2glottolog[row['Language_ID']]])
totals['lexemes'] += 1
totals['lids'].update([row['Language_ID']])
totals['cids'].update([row['Parameter_ID']])
synonyms[row['Language_ID']].update([row['Parameter_ID']])
for row in self.cldf['CognateTable']:
totals['cognate_sets'].update([row['Cognateset_ID']])
sindex = sum(
[sum(list(counts.values())) / float(len(counts)) for counts in synonyms.values()])
langs = set(synonyms.keys())
if langs:
sindex /= float(len(langs))
else:
sindex = 0
totals['SI'] = sindex
stats = tr_analysis['stats']
lsegments = len(stats['segments'])
lbipapyerr = len(stats['bipa_errors'])
lsclasserr = len(stats['sclass_errors'])
def ratio(prop):
if float(totals['lexemes']) == 0:
return 0
return sum(v for k, v in totals[prop].items() if k) / float(totals['lexemes'])
num_cognates = sum(1 for k, v in totals['cognate_sets'].items())
# see List et al. 2017
# diff between cognate sets and meanings / diff between words and meanings
cog_diversity = (num_cognates - len(totals['cids'])) \
/ (totals['lexemes'] - len(totals['cids']))
badges = [
self.build_status_badge(),
get_badge(ratio('languages'), 'Glottolog'),
get_badge(ratio('concepts'), 'Concepticon'),
get_badge(ratio('sources'), 'Source'),
]
if lsegments:
badges.extend([
get_badge((lsegments - lbipapyerr) / lsegments, 'BIPA'),
get_badge((lsegments - lsclasserr) / lsegments, 'CLTS SoundClass'),
])
lines.extend(['## Statistics', '\n', '\n'.join(badges), ''])
stats_lines = [
'- **Varieties:** {0:,}'.format(len(totals['lids'])),
'- **Concepts:** {0:,}'.format(len(totals['cids'])),
'- **Lexemes:** {0:,}'.format(totals['lexemes']),
'- **Synonymy:** {:0.2f}'.format(totals['SI']),
]
if num_cognates:
stats_lines.extend([
'- **Cognacy:** {0:,} cognates in {1:,} cognate sets ({2:,} singletons)'.format(
sum(v for k, v in totals['cognate_sets'].items()),
num_cognates, len([k for k, v in totals['cognate_sets'].items() if v == 1])),
'- **Cognate Diversity:** {:0.2f}'.format(cog_diversity)
])
if stats['segments']:
stats_lines.extend([
'- **Invalid lexemes:** {0:,}'.format(stats['invalid_words_count']),
'- **Tokens:** {0:,}'.format(sum(stats['segments'].values())),
'- **Segments:** {0:,} ({1} BIPA errors, {2} CTLS sound class errors, '
'{3} CLTS modified)'
.format(lsegments, lbipapyerr, lsclasserr, len(stats['replacements'])),
'- **Inventory size (avg):** {:0.2f}'.format(stats['inventory_size']),
])
if log:
log.info('\n'.join(['Summary for dataset {}'.format(self.id)] + stats_lines))
lines.extend(stats_lines)
totals['languages'] = len(totals['lids'])
totals['concepts'] = len(totals['cids'])
totals['cognate_sets'] = bool(1 for k, v in totals['cognate_sets'].items() if v > 1)
totals['sources'] = totals['sources'].get('y', 0)
bookkeeping_languoids = []
for lang in self.cldf['LanguageTable']:
gl_lang = self.glottolog.cached_languoids.get(lang.get('Glottocode'))
if gl_lang and gl_lang.category == 'Bookkeeping':
bookkeeping_languoids.append(lang)
# improvements section
if missing_lang or missing_source or bookkeeping_languoids:
lines.extend(['\n## Possible Improvements:\n', ])
if missing_lang:
lines.append("- Languages missing glottocodes: %d/%d (%.2f%%)" % (
len(missing_lang),
totals['languages'],
(len(missing_lang) / totals['languages']) * 100
))
if bookkeeping_languoids:
lines.append(
"- Languages linked to [bookkeeping languoids in Glottolog]"
"(http://glottolog.org/glottolog/glottologinformation"
"#bookkeepinglanguoids):")
for lang in bookkeeping_languoids:
lines.append(
' - {0} [{1}](http://glottolog.org/resource/languoid/id/{1})'.format(
lang.get('Name', lang.get('ID')), lang['Glottocode']))
lines.append('\n')
if missing_source:
lines.append("- Entries missing sources: %d/%d (%.2f%%)" % (
len(missing_source),
totals['lexemes'],
(len(missing_source) / totals['lexemes']) * 100
))
return lines
|
lexibank/pylexibank | src/pylexibank/cldf.py | Dataset.add_lexemes | python | def add_lexemes(self, **kw):
lexemes = []
# Do we have morpheme segmentation on top of phonemes?
with_morphemes = '+' in self['FormTable', 'Segments'].separator
for i, form in enumerate(self.dataset.split_forms(kw, kw['Value'])):
kw_ = kw.copy()
if form:
if form != kw_['Value']:
self.dataset.log.debug(
'iter_forms split: "{0}" -> "{1}"'.format(kw_['Value'], form))
if form:
kw_.setdefault('Segments', self.tokenize(kw_, form) or [])
kw_.update(ID=self.lexeme_id(kw), Form=form)
lexemes.append(self._add_object(self.dataset.lexeme_class, **kw_))
if kw_['Segments']:
analysis = self.dataset.tr_analyses.setdefault(
kw_['Language_ID'], Analysis())
try:
segments = kw_['Segments']
if with_morphemes:
segments = list(chain(*[s.split() for s in segments]))
_, _bipa, _sc, _analysis = analyze(segments, analysis)
# update the list of `bad_words` if necessary; we precompute a
# list of data types in `_bipa` just to make the conditional
# checking easier
_bipa_types = [type(s) for s in _bipa]
if pyclts.models.UnknownSound in _bipa_types or '?' in _sc:
self.dataset.tr_bad_words.append(kw_)
except ValueError: # pragma: no cover
self.dataset.tr_invalid_words.append(kw_)
except (KeyError, AttributeError): # pragma: no cover
print(kw_['Form'], kw_)
raise
return lexemes | :return: list of dicts corresponding to newly created Lexemes | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/cldf.py#L106-L147 | [
"def analyze(segments, analysis, lookup=dict(bipa={}, dolgo={})):\n \"\"\"\n Test a sequence for compatibility with CLPA and LingPy.\n\n :param analysis: Pass a `TranscriptionAnalysis` instance for cumulative reporting.\n \"\"\"\n # raise a ValueError in case of empty segments/strings\n if not segments:\n raise ValueError('Empty sequence.')\n\n # test if at least one element in `segments` has information\n # (helps to catch really badly formed input, such as ['\\n']\n if not [segment for segment in segments if segment.strip()]:\n raise ValueError('No information in the sequence.')\n\n # build the phonologic and sound class analyses\n try:\n bipa_analysis, sc_analysis = [], []\n for s in segments:\n a = lookup['bipa'].get(s)\n if a is None:\n a = lookup['bipa'].setdefault(s, BIPA[s])\n bipa_analysis.append(a)\n\n sc = lookup['dolgo'].get(s)\n if sc is None:\n sc = lookup['dolgo'].setdefault(s, BIPA.translate(s, DOLGO))\n sc_analysis.append(sc)\n except: # noqa\n print(segments)\n raise\n\n # compute general errors; this loop must take place outside the\n # following one because the code for computing single errors (either\n # in `bipa_analysis` or in `soundclass_analysis`) is unnecessary\n # complicated\n for sound_bipa, sound_class in zip(bipa_analysis, sc_analysis):\n if isinstance(sound_bipa, pyclts.models.UnknownSound) or sound_class == '?':\n analysis.general_errors += 1\n\n # iterate over the segments and analyses, updating counts of occurrences\n # and specific errors\n for segment, sound_bipa, sound_class in zip(segments, bipa_analysis, sc_analysis):\n # update the segment count\n analysis.segments.update([segment])\n\n # add an error if we got an unknown sound, otherwise just append\n # the `replacements` dictionary\n if isinstance(sound_bipa, pyclts.models.UnknownSound):\n analysis.bipa_errors.add(segment)\n else:\n analysis.replacements[sound_bipa.source].add(sound_bipa.__unicode__())\n\n # update sound class errors, if any\n if sound_class == '?':\n analysis.sclass_errors.add(segment)\n\n return segments, bipa_analysis, sc_analysis, analysis\n",
"def lexeme_id(self, kw):\n self._count[(kw['Language_ID'], kw['Parameter_ID'])] += 1\n return '{0}-{1}-{2}'.format(\n kw['Language_ID'],\n kw['Parameter_ID'],\n self._count[(kw['Language_ID'], kw['Parameter_ID'])])\n",
"def tokenize(self, item, string):\n if self.dataset.tokenizer:\n return self.dataset.tokenizer(item, string)\n",
"def _add_object(self, cls, **kw):\n # Instantiating an object will trigger potential validators:\n d = attr.asdict(cls(**kw))\n t = cls.__cldf_table__()\n for key in ['ID', 'Language_ID', 'Parameter_ID', 'Cognateset_ID']:\n # stringify/sluggify identifiers:\n if d.get(key) is not None:\n d[key] = '{0}'.format(d[key])\n if not ID_PATTERN.match(d[key]):\n raise ValueError(\n 'invalid CLDF identifier {0}-{1}: {2}'.format(t, key, d[key]))\n if 'ID' not in d or d['ID'] not in self._obj_index[t]:\n if 'ID' in d:\n self._obj_index[t].add(d['ID'])\n self.objects[t].append(d)\n return d\n"
] | class Dataset(object):
def __init__(self, dataset):
self._count = defaultdict(int)
self._cognate_count = defaultdict(int)
self.dataset = dataset
md = self.dataset.cldf_dir / MD_NAME
if not md.exists():
md = self.dataset.cldf_dir / ALT_MD_NAME
if not md.exists():
md = self.dataset.cldf_dir / MD_NAME
copy(Path(__file__).parent / MD_NAME, md)
self.wl = Wordlist.from_metadata(md)
default_cldf = Wordlist.from_metadata(Path(__file__).parent / 'cldf-metadata.json')
self.objects = {}
self._obj_index = {}
for cls in [
self.dataset.lexeme_class,
self.dataset.language_class,
self.dataset.concept_class,
self.dataset.cognate_class,
]:
self.objects[cls.__cldf_table__()] = []
self._obj_index[cls.__cldf_table__()] = set()
cols = set(
col.header for col in self.wl[cls.__cldf_table__()].tableSchema.columns)
properties = set(
col.propertyUrl.uri for col in self.wl[cls.__cldf_table__()].tableSchema.columns
if col.propertyUrl)
for field in cls.fieldnames():
try:
col = default_cldf[cls.__cldf_table__(), field]
#
# We added Latitude and Longitude to the default metadata later, and want to
# make sure, existing datasets are upgraded silently.
#
if field in ['Latitude', 'Longitude'] \
and cls.__cldf_table__() == 'LanguageTable':
properties.add(col.propertyUrl.uri)
self.wl[cls.__cldf_table__(), field].propertyUrl = col.propertyUrl
self.wl[cls.__cldf_table__(), field].datatype = col.datatype
except KeyError:
col = Column(name=field, datatype="string")
if (col.propertyUrl and col.propertyUrl.uri not in properties) or \
((not col.propertyUrl) and (field not in cols)):
self.wl[cls.__cldf_table__()].tableSchema.columns.append(col)
def validate(self, log=None):
return self.wl.validate(log)
def __getitem__(self, type_):
return self.wl[type_]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for table in ['FormTable', 'CognateTable', 'LanguageTable', 'ParameterTable']:
self.objects.setdefault(table, [])
# We only add concepts and languages that are referenced by forms!
for fk, table in [('Parameter_ID', 'ParameterTable'), ('Language_ID', 'LanguageTable')]:
refs = set(obj[fk] for obj in self.objects['FormTable'])
self.objects[table] = [obj for obj in self.objects[table] if obj['ID'] in refs]
self.write(**self.objects)
def add_sources(self, *args):
if not args and self.dataset.raw.joinpath('sources.bib').exists():
args = self.dataset.raw.read_bib()
self.wl.sources.add(*args)
def lexeme_id(self, kw):
self._count[(kw['Language_ID'], kw['Parameter_ID'])] += 1
return '{0}-{1}-{2}'.format(
kw['Language_ID'],
kw['Parameter_ID'],
self._count[(kw['Language_ID'], kw['Parameter_ID'])])
def cognate_id(self, kw):
self._cognate_count[kw['Form_ID']] += 1
return '{0}-{1}'.format(kw['Form_ID'], self._cognate_count[kw['Form_ID']])
def tokenize(self, item, string):
if self.dataset.tokenizer:
return self.dataset.tokenizer(item, string)
def _add_object(self, cls, **kw):
# Instantiating an object will trigger potential validators:
d = attr.asdict(cls(**kw))
t = cls.__cldf_table__()
for key in ['ID', 'Language_ID', 'Parameter_ID', 'Cognateset_ID']:
# stringify/sluggify identifiers:
if d.get(key) is not None:
d[key] = '{0}'.format(d[key])
if not ID_PATTERN.match(d[key]):
raise ValueError(
'invalid CLDF identifier {0}-{1}: {2}'.format(t, key, d[key]))
if 'ID' not in d or d['ID'] not in self._obj_index[t]:
if 'ID' in d:
self._obj_index[t].add(d['ID'])
self.objects[t].append(d)
return d
def add_cognate(self, lexeme=None, **kw):
if lexeme:
kw.setdefault('Form_ID', lexeme['ID'])
kw.setdefault('Form', lexeme['Form'])
kw.setdefault('ID', self.cognate_id(kw))
return self._add_object(self.dataset.cognate_class, **kw)
def add_language(self, **kw):
return self._add_object(self.dataset.language_class, **kw)
def add_languages(self, id_factory=lambda d: d['ID']):
"""
Add languages as specified in a dataset's etc/languages.csv
:param id_factory: A callable taking a dict describing a language as argument and returning\
a value to be used as ID for the language.
:return: The set of language IDs which have been added.
"""
ids = set()
for kw in self.dataset.languages:
if (not kw.get('Glottocode')) and kw.get('ISO639P3code'):
kw['Glottocode'] = self.dataset.glottolog.glottocode_by_iso.get(kw['ISO639P3code'])
kw['ID'] = id_factory(kw)
ids.add(kw['ID'])
self.add_language(**kw)
return ids
def add_concept(self, **kw):
if kw.get('Concepticon_ID'):
kw.setdefault(
'Concepticon_Gloss',
self.dataset.concepticon.cached_glosses[int(kw['Concepticon_ID'])])
return self._add_object(self.dataset.concept_class, **kw)
def add_concepts(self, id_factory=lambda d: d.number):
"""
Add concepts as specified in a dataset's associated Concepticon concept list or in
etc/concepts.csv
:param id_factory: A callable taking a pyconcepticon.api.Concept object as argument and \
returning a value to be used as ID for the concept.
:return: The set of concept IDs which have been added.
"""
ids, concepts = set(), []
if self.dataset.conceptlist:
concepts = self.dataset.conceptlist.concepts.values()
else:
fields = Concept.public_fields()
for i, concept in enumerate(self.dataset.concepts, start=1):
kw, attrs = {}, {}
for k, v in concept.items():
if k.lower() in fields:
kw[k.lower()] = v
else:
attrs[k.lower()] = v
if not kw.get('id'):
kw['id'] = str(i)
if not kw.get('number'):
kw['number'] = str(i)
concepts.append(Concept(attributes=attrs, **kw))
fieldnames = {f.lower(): f for f in self.dataset.concept_class.fieldnames()}
for c in concepts:
attrs = dict(
ID=id_factory(c),
Name=c.label,
Concepticon_ID=c.concepticon_id,
Concepticon_Gloss=c.concepticon_gloss)
for fl, f in fieldnames.items():
if fl in c.attributes:
attrs[f] = c.attributes[fl]
ids.add(attrs['ID'])
self.add_concept(**attrs)
return ids
def align_cognates(self,
alm=None,
cognates=None,
column='Segments',
method='library'):
from pylexibank.lingpy_util import iter_alignments
iter_alignments(
alm or self,
cognates or self.objects['CognateTable'],
column=column,
method=method)
def write(self, **kw):
self.wl.properties.update(self.dataset.metadata.common_props)
self.wl.properties['rdf:ID'] = self.dataset.id
self.wl.properties['rdf:type'] = 'http://www.w3.org/ns/dcat#Distribution'
if self.dataset.github_repo:
self.wl.properties['dcat:accessURL'] = 'https://github.com/{0}'.format(
self.dataset.github_repo)
self.wl.tablegroup.notes.append(OrderedDict([
('dc:title', 'environment'),
('properties', OrderedDict([
('glottolog_version', self.dataset.glottolog.version),
('concepticon_version', self.dataset.concepticon.version),
]))
]))
self.wl.write(**kw)
|
lexibank/pylexibank | src/pylexibank/cldf.py | Dataset.add_languages | python | def add_languages(self, id_factory=lambda d: d['ID']):
ids = set()
for kw in self.dataset.languages:
if (not kw.get('Glottocode')) and kw.get('ISO639P3code'):
kw['Glottocode'] = self.dataset.glottolog.glottocode_by_iso.get(kw['ISO639P3code'])
kw['ID'] = id_factory(kw)
ids.add(kw['ID'])
self.add_language(**kw)
return ids | Add languages as specified in a dataset's etc/languages.csv
:param id_factory: A callable taking a dict describing a language as argument and returning\
a value to be used as ID for the language.
:return: The set of language IDs which have been added. | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/cldf.py#L176-L191 | [
"def add_language(self, **kw):\n return self._add_object(self.dataset.language_class, **kw)\n",
"def add_languages(self, id_factory=lambda d: d['ID']):\n"
] | class Dataset(object):
def __init__(self, dataset):
self._count = defaultdict(int)
self._cognate_count = defaultdict(int)
self.dataset = dataset
md = self.dataset.cldf_dir / MD_NAME
if not md.exists():
md = self.dataset.cldf_dir / ALT_MD_NAME
if not md.exists():
md = self.dataset.cldf_dir / MD_NAME
copy(Path(__file__).parent / MD_NAME, md)
self.wl = Wordlist.from_metadata(md)
default_cldf = Wordlist.from_metadata(Path(__file__).parent / 'cldf-metadata.json')
self.objects = {}
self._obj_index = {}
for cls in [
self.dataset.lexeme_class,
self.dataset.language_class,
self.dataset.concept_class,
self.dataset.cognate_class,
]:
self.objects[cls.__cldf_table__()] = []
self._obj_index[cls.__cldf_table__()] = set()
cols = set(
col.header for col in self.wl[cls.__cldf_table__()].tableSchema.columns)
properties = set(
col.propertyUrl.uri for col in self.wl[cls.__cldf_table__()].tableSchema.columns
if col.propertyUrl)
for field in cls.fieldnames():
try:
col = default_cldf[cls.__cldf_table__(), field]
#
# We added Latitude and Longitude to the default metadata later, and want to
# make sure, existing datasets are upgraded silently.
#
if field in ['Latitude', 'Longitude'] \
and cls.__cldf_table__() == 'LanguageTable':
properties.add(col.propertyUrl.uri)
self.wl[cls.__cldf_table__(), field].propertyUrl = col.propertyUrl
self.wl[cls.__cldf_table__(), field].datatype = col.datatype
except KeyError:
col = Column(name=field, datatype="string")
if (col.propertyUrl and col.propertyUrl.uri not in properties) or \
((not col.propertyUrl) and (field not in cols)):
self.wl[cls.__cldf_table__()].tableSchema.columns.append(col)
def validate(self, log=None):
return self.wl.validate(log)
def __getitem__(self, type_):
return self.wl[type_]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for table in ['FormTable', 'CognateTable', 'LanguageTable', 'ParameterTable']:
self.objects.setdefault(table, [])
# We only add concepts and languages that are referenced by forms!
for fk, table in [('Parameter_ID', 'ParameterTable'), ('Language_ID', 'LanguageTable')]:
refs = set(obj[fk] for obj in self.objects['FormTable'])
self.objects[table] = [obj for obj in self.objects[table] if obj['ID'] in refs]
self.write(**self.objects)
def add_sources(self, *args):
if not args and self.dataset.raw.joinpath('sources.bib').exists():
args = self.dataset.raw.read_bib()
self.wl.sources.add(*args)
def lexeme_id(self, kw):
self._count[(kw['Language_ID'], kw['Parameter_ID'])] += 1
return '{0}-{1}-{2}'.format(
kw['Language_ID'],
kw['Parameter_ID'],
self._count[(kw['Language_ID'], kw['Parameter_ID'])])
def cognate_id(self, kw):
self._cognate_count[kw['Form_ID']] += 1
return '{0}-{1}'.format(kw['Form_ID'], self._cognate_count[kw['Form_ID']])
def tokenize(self, item, string):
if self.dataset.tokenizer:
return self.dataset.tokenizer(item, string)
def add_lexemes(self, **kw):
"""
:return: list of dicts corresponding to newly created Lexemes
"""
lexemes = []
# Do we have morpheme segmentation on top of phonemes?
with_morphemes = '+' in self['FormTable', 'Segments'].separator
for i, form in enumerate(self.dataset.split_forms(kw, kw['Value'])):
kw_ = kw.copy()
if form:
if form != kw_['Value']:
self.dataset.log.debug(
'iter_forms split: "{0}" -> "{1}"'.format(kw_['Value'], form))
if form:
kw_.setdefault('Segments', self.tokenize(kw_, form) or [])
kw_.update(ID=self.lexeme_id(kw), Form=form)
lexemes.append(self._add_object(self.dataset.lexeme_class, **kw_))
if kw_['Segments']:
analysis = self.dataset.tr_analyses.setdefault(
kw_['Language_ID'], Analysis())
try:
segments = kw_['Segments']
if with_morphemes:
segments = list(chain(*[s.split() for s in segments]))
_, _bipa, _sc, _analysis = analyze(segments, analysis)
# update the list of `bad_words` if necessary; we precompute a
# list of data types in `_bipa` just to make the conditional
# checking easier
_bipa_types = [type(s) for s in _bipa]
if pyclts.models.UnknownSound in _bipa_types or '?' in _sc:
self.dataset.tr_bad_words.append(kw_)
except ValueError: # pragma: no cover
self.dataset.tr_invalid_words.append(kw_)
except (KeyError, AttributeError): # pragma: no cover
print(kw_['Form'], kw_)
raise
return lexemes
def _add_object(self, cls, **kw):
# Instantiating an object will trigger potential validators:
d = attr.asdict(cls(**kw))
t = cls.__cldf_table__()
for key in ['ID', 'Language_ID', 'Parameter_ID', 'Cognateset_ID']:
# stringify/sluggify identifiers:
if d.get(key) is not None:
d[key] = '{0}'.format(d[key])
if not ID_PATTERN.match(d[key]):
raise ValueError(
'invalid CLDF identifier {0}-{1}: {2}'.format(t, key, d[key]))
if 'ID' not in d or d['ID'] not in self._obj_index[t]:
if 'ID' in d:
self._obj_index[t].add(d['ID'])
self.objects[t].append(d)
return d
def add_cognate(self, lexeme=None, **kw):
if lexeme:
kw.setdefault('Form_ID', lexeme['ID'])
kw.setdefault('Form', lexeme['Form'])
kw.setdefault('ID', self.cognate_id(kw))
return self._add_object(self.dataset.cognate_class, **kw)
def add_language(self, **kw):
return self._add_object(self.dataset.language_class, **kw)
def add_concept(self, **kw):
if kw.get('Concepticon_ID'):
kw.setdefault(
'Concepticon_Gloss',
self.dataset.concepticon.cached_glosses[int(kw['Concepticon_ID'])])
return self._add_object(self.dataset.concept_class, **kw)
def add_concepts(self, id_factory=lambda d: d.number):
"""
Add concepts as specified in a dataset's associated Concepticon concept list or in
etc/concepts.csv
:param id_factory: A callable taking a pyconcepticon.api.Concept object as argument and \
returning a value to be used as ID for the concept.
:return: The set of concept IDs which have been added.
"""
ids, concepts = set(), []
if self.dataset.conceptlist:
concepts = self.dataset.conceptlist.concepts.values()
else:
fields = Concept.public_fields()
for i, concept in enumerate(self.dataset.concepts, start=1):
kw, attrs = {}, {}
for k, v in concept.items():
if k.lower() in fields:
kw[k.lower()] = v
else:
attrs[k.lower()] = v
if not kw.get('id'):
kw['id'] = str(i)
if not kw.get('number'):
kw['number'] = str(i)
concepts.append(Concept(attributes=attrs, **kw))
fieldnames = {f.lower(): f for f in self.dataset.concept_class.fieldnames()}
for c in concepts:
attrs = dict(
ID=id_factory(c),
Name=c.label,
Concepticon_ID=c.concepticon_id,
Concepticon_Gloss=c.concepticon_gloss)
for fl, f in fieldnames.items():
if fl in c.attributes:
attrs[f] = c.attributes[fl]
ids.add(attrs['ID'])
self.add_concept(**attrs)
return ids
def align_cognates(self,
alm=None,
cognates=None,
column='Segments',
method='library'):
from pylexibank.lingpy_util import iter_alignments
iter_alignments(
alm or self,
cognates or self.objects['CognateTable'],
column=column,
method=method)
def write(self, **kw):
self.wl.properties.update(self.dataset.metadata.common_props)
self.wl.properties['rdf:ID'] = self.dataset.id
self.wl.properties['rdf:type'] = 'http://www.w3.org/ns/dcat#Distribution'
if self.dataset.github_repo:
self.wl.properties['dcat:accessURL'] = 'https://github.com/{0}'.format(
self.dataset.github_repo)
self.wl.tablegroup.notes.append(OrderedDict([
('dc:title', 'environment'),
('properties', OrderedDict([
('glottolog_version', self.dataset.glottolog.version),
('concepticon_version', self.dataset.concepticon.version),
]))
]))
self.wl.write(**kw)
|
lexibank/pylexibank | src/pylexibank/cldf.py | Dataset.add_concepts | python | def add_concepts(self, id_factory=lambda d: d.number):
ids, concepts = set(), []
if self.dataset.conceptlist:
concepts = self.dataset.conceptlist.concepts.values()
else:
fields = Concept.public_fields()
for i, concept in enumerate(self.dataset.concepts, start=1):
kw, attrs = {}, {}
for k, v in concept.items():
if k.lower() in fields:
kw[k.lower()] = v
else:
attrs[k.lower()] = v
if not kw.get('id'):
kw['id'] = str(i)
if not kw.get('number'):
kw['number'] = str(i)
concepts.append(Concept(attributes=attrs, **kw))
fieldnames = {f.lower(): f for f in self.dataset.concept_class.fieldnames()}
for c in concepts:
attrs = dict(
ID=id_factory(c),
Name=c.label,
Concepticon_ID=c.concepticon_id,
Concepticon_Gloss=c.concepticon_gloss)
for fl, f in fieldnames.items():
if fl in c.attributes:
attrs[f] = c.attributes[fl]
ids.add(attrs['ID'])
self.add_concept(**attrs)
return ids | Add concepts as specified in a dataset's associated Concepticon concept list or in
etc/concepts.csv
:param id_factory: A callable taking a pyconcepticon.api.Concept object as argument and \
returning a value to be used as ID for the concept.
:return: The set of concept IDs which have been added. | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/cldf.py#L200-L240 | [
"def add_concepts(self, id_factory=lambda d: d.number):\n",
"def add_concept(self, **kw):\n if kw.get('Concepticon_ID'):\n kw.setdefault(\n 'Concepticon_Gloss',\n self.dataset.concepticon.cached_glosses[int(kw['Concepticon_ID'])])\n return self._add_object(self.dataset.concept_class, **kw)\n"
] | class Dataset(object):
def __init__(self, dataset):
self._count = defaultdict(int)
self._cognate_count = defaultdict(int)
self.dataset = dataset
md = self.dataset.cldf_dir / MD_NAME
if not md.exists():
md = self.dataset.cldf_dir / ALT_MD_NAME
if not md.exists():
md = self.dataset.cldf_dir / MD_NAME
copy(Path(__file__).parent / MD_NAME, md)
self.wl = Wordlist.from_metadata(md)
default_cldf = Wordlist.from_metadata(Path(__file__).parent / 'cldf-metadata.json')
self.objects = {}
self._obj_index = {}
for cls in [
self.dataset.lexeme_class,
self.dataset.language_class,
self.dataset.concept_class,
self.dataset.cognate_class,
]:
self.objects[cls.__cldf_table__()] = []
self._obj_index[cls.__cldf_table__()] = set()
cols = set(
col.header for col in self.wl[cls.__cldf_table__()].tableSchema.columns)
properties = set(
col.propertyUrl.uri for col in self.wl[cls.__cldf_table__()].tableSchema.columns
if col.propertyUrl)
for field in cls.fieldnames():
try:
col = default_cldf[cls.__cldf_table__(), field]
#
# We added Latitude and Longitude to the default metadata later, and want to
# make sure, existing datasets are upgraded silently.
#
if field in ['Latitude', 'Longitude'] \
and cls.__cldf_table__() == 'LanguageTable':
properties.add(col.propertyUrl.uri)
self.wl[cls.__cldf_table__(), field].propertyUrl = col.propertyUrl
self.wl[cls.__cldf_table__(), field].datatype = col.datatype
except KeyError:
col = Column(name=field, datatype="string")
if (col.propertyUrl and col.propertyUrl.uri not in properties) or \
((not col.propertyUrl) and (field not in cols)):
self.wl[cls.__cldf_table__()].tableSchema.columns.append(col)
def validate(self, log=None):
return self.wl.validate(log)
def __getitem__(self, type_):
return self.wl[type_]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for table in ['FormTable', 'CognateTable', 'LanguageTable', 'ParameterTable']:
self.objects.setdefault(table, [])
# We only add concepts and languages that are referenced by forms!
for fk, table in [('Parameter_ID', 'ParameterTable'), ('Language_ID', 'LanguageTable')]:
refs = set(obj[fk] for obj in self.objects['FormTable'])
self.objects[table] = [obj for obj in self.objects[table] if obj['ID'] in refs]
self.write(**self.objects)
def add_sources(self, *args):
if not args and self.dataset.raw.joinpath('sources.bib').exists():
args = self.dataset.raw.read_bib()
self.wl.sources.add(*args)
def lexeme_id(self, kw):
self._count[(kw['Language_ID'], kw['Parameter_ID'])] += 1
return '{0}-{1}-{2}'.format(
kw['Language_ID'],
kw['Parameter_ID'],
self._count[(kw['Language_ID'], kw['Parameter_ID'])])
def cognate_id(self, kw):
self._cognate_count[kw['Form_ID']] += 1
return '{0}-{1}'.format(kw['Form_ID'], self._cognate_count[kw['Form_ID']])
def tokenize(self, item, string):
if self.dataset.tokenizer:
return self.dataset.tokenizer(item, string)
def add_lexemes(self, **kw):
"""
:return: list of dicts corresponding to newly created Lexemes
"""
lexemes = []
# Do we have morpheme segmentation on top of phonemes?
with_morphemes = '+' in self['FormTable', 'Segments'].separator
for i, form in enumerate(self.dataset.split_forms(kw, kw['Value'])):
kw_ = kw.copy()
if form:
if form != kw_['Value']:
self.dataset.log.debug(
'iter_forms split: "{0}" -> "{1}"'.format(kw_['Value'], form))
if form:
kw_.setdefault('Segments', self.tokenize(kw_, form) or [])
kw_.update(ID=self.lexeme_id(kw), Form=form)
lexemes.append(self._add_object(self.dataset.lexeme_class, **kw_))
if kw_['Segments']:
analysis = self.dataset.tr_analyses.setdefault(
kw_['Language_ID'], Analysis())
try:
segments = kw_['Segments']
if with_morphemes:
segments = list(chain(*[s.split() for s in segments]))
_, _bipa, _sc, _analysis = analyze(segments, analysis)
# update the list of `bad_words` if necessary; we precompute a
# list of data types in `_bipa` just to make the conditional
# checking easier
_bipa_types = [type(s) for s in _bipa]
if pyclts.models.UnknownSound in _bipa_types or '?' in _sc:
self.dataset.tr_bad_words.append(kw_)
except ValueError: # pragma: no cover
self.dataset.tr_invalid_words.append(kw_)
except (KeyError, AttributeError): # pragma: no cover
print(kw_['Form'], kw_)
raise
return lexemes
def _add_object(self, cls, **kw):
# Instantiating an object will trigger potential validators:
d = attr.asdict(cls(**kw))
t = cls.__cldf_table__()
for key in ['ID', 'Language_ID', 'Parameter_ID', 'Cognateset_ID']:
# stringify/sluggify identifiers:
if d.get(key) is not None:
d[key] = '{0}'.format(d[key])
if not ID_PATTERN.match(d[key]):
raise ValueError(
'invalid CLDF identifier {0}-{1}: {2}'.format(t, key, d[key]))
if 'ID' not in d or d['ID'] not in self._obj_index[t]:
if 'ID' in d:
self._obj_index[t].add(d['ID'])
self.objects[t].append(d)
return d
def add_cognate(self, lexeme=None, **kw):
if lexeme:
kw.setdefault('Form_ID', lexeme['ID'])
kw.setdefault('Form', lexeme['Form'])
kw.setdefault('ID', self.cognate_id(kw))
return self._add_object(self.dataset.cognate_class, **kw)
def add_language(self, **kw):
return self._add_object(self.dataset.language_class, **kw)
def add_languages(self, id_factory=lambda d: d['ID']):
"""
Add languages as specified in a dataset's etc/languages.csv
:param id_factory: A callable taking a dict describing a language as argument and returning\
a value to be used as ID for the language.
:return: The set of language IDs which have been added.
"""
ids = set()
for kw in self.dataset.languages:
if (not kw.get('Glottocode')) and kw.get('ISO639P3code'):
kw['Glottocode'] = self.dataset.glottolog.glottocode_by_iso.get(kw['ISO639P3code'])
kw['ID'] = id_factory(kw)
ids.add(kw['ID'])
self.add_language(**kw)
return ids
def add_concept(self, **kw):
if kw.get('Concepticon_ID'):
kw.setdefault(
'Concepticon_Gloss',
self.dataset.concepticon.cached_glosses[int(kw['Concepticon_ID'])])
return self._add_object(self.dataset.concept_class, **kw)
def align_cognates(self,
alm=None,
cognates=None,
column='Segments',
method='library'):
from pylexibank.lingpy_util import iter_alignments
iter_alignments(
alm or self,
cognates or self.objects['CognateTable'],
column=column,
method=method)
def write(self, **kw):
self.wl.properties.update(self.dataset.metadata.common_props)
self.wl.properties['rdf:ID'] = self.dataset.id
self.wl.properties['rdf:type'] = 'http://www.w3.org/ns/dcat#Distribution'
if self.dataset.github_repo:
self.wl.properties['dcat:accessURL'] = 'https://github.com/{0}'.format(
self.dataset.github_repo)
self.wl.tablegroup.notes.append(OrderedDict([
('dc:title', 'environment'),
('properties', OrderedDict([
('glottolog_version', self.dataset.glottolog.version),
('concepticon_version', self.dataset.concepticon.version),
]))
]))
self.wl.write(**kw)
|
lexibank/pylexibank | src/pylexibank/db.py | schema | python | def schema(ds):
tables, ref_tables = {}, {}
table_lookup = {t.url.string: t for t in ds.tables if ds.get_tabletype(t)}
for table in table_lookup.values():
spec = TableSpec(ds.get_tabletype(table))
spec.primary_key = [
c for c in table.tableSchema.columns if
c.propertyUrl and c.propertyUrl.uri == term_uri('id')][0].name
# Map the column name to the default:
if spec.name in PROPERTY_URL_TO_COL:
spec.primary_key = PROPERTY_URL_TO_COL[spec.name][term_uri('id')]
for c in table.tableSchema.columns:
if c.propertyUrl and c.propertyUrl.uri == term_uri('source'):
# A column referencing sources is replaced by an association table.
otype = ds.get_tabletype(table).replace('Table', '')
ref_tables[ds.get_tabletype(table)] = TableSpec(
'{0}Source'.format(otype), # The name of the association table.
[ColSpec(otype + '_ID'), ColSpec('Source_ID'), ColSpec('Context')],
[
( # The foreign key to the referencing object:
['dataset_ID', otype + '_ID'],
ds.get_tabletype(table),
['dataset_ID', spec.primary_key]),
( # The foreign key to the referenced source:
['dataset_ID', 'Source_ID'],
'SourceTable',
['dataset_ID', 'ID']),
],
c.name)
else:
cname = c.header
if c.propertyUrl and spec.name in PROPERTY_URL_TO_COL:
if c.propertyUrl.uri in PROPERTY_URL_TO_COL[spec.name]:
cname = PROPERTY_URL_TO_COL[spec.name][c.propertyUrl.uri]
spec.columns.append(ColSpec(
cname,
c.datatype.base if c.datatype else c.datatype,
c.separator,
cname == spec.primary_key,
cldf_name=c.header))
for fk in table.tableSchema.foreignKeys:
if fk.reference.schemaReference:
# We only support Foreign Key references between tables!
continue # pragma: no cover
ref = table_lookup[fk.reference.resource.string]
ref_type = ds.get_tabletype(ref)
if ref_type:
colRefs = sorted(fk.columnReference)
if spec.name in PROPERTY_URL_TO_COL:
# Must map foreign keys
colRefs = []
for c in sorted(fk.columnReference):
c = ds[spec.name, c]
if c.propertyUrl and c.propertyUrl.uri in PROPERTY_URL_TO_COL[spec.name]:
colRefs.append(PROPERTY_URL_TO_COL[spec.name][c.propertyUrl.uri])
else:
colRefs.append(c.header)
rcolRefs = sorted(fk.reference.columnReference)
if ref_type in PROPERTY_URL_TO_COL:
# Must map foreign key targets!
rcolRefs = []
for c in sorted(fk.reference.columnReference):
c = ds[ref_type, c]
if c.propertyUrl and c.propertyUrl.uri in PROPERTY_URL_TO_COL[ref_type]:
rcolRefs.append(PROPERTY_URL_TO_COL[ref_type][c.propertyUrl.uri])
else:
rcolRefs.append(c.header)
spec.foreign_keys.append((
tuple(['dataset_ID'] + colRefs),
ds.get_tabletype(table_lookup[fk.reference.resource.string]),
tuple(['dataset_ID'] + rcolRefs)))
tables[spec.name] = spec
# must determine the order in which tables must be created!
ordered = OrderedDict()
i = 0
#
# We loop through the tables repeatedly, and whenever we find one, which has all
# referenced tables already in ordered, we move it from tables to ordered.
#
while tables and i < 100:
i += 1
for table in list(tables.keys()):
if all(ref[1] in ordered for ref in tables[table].foreign_keys):
# All referenced tables are already created.
ordered[table] = tables.pop(table)
break
if tables: # pragma: no cover
raise ValueError('there seem to be cyclic dependencies between the tables')
return list(ordered.values()), ref_tables | Convert the table and column descriptions of a `Dataset` into specifications for the
DB schema.
:param ds:
:return: A pair (tables, reference_tables). | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/db.py#L141-L238 | null | """Functionality to load a set of CLDF datasets into a sqlite db.
Notes:
- Only CLDF components will be loaded into the db.
- The names of the columns in the database are the names from the CSV files, not the
preferred labels for the corresponding CLDF properties.
"""
from collections import OrderedDict, defaultdict
import sqlite3
from contextlib import closing
from json import dumps
import attr
from csvw.datatypes import DATATYPES
from clldutils.path import Path, remove
from clldutils.misc import nfilter
from clldutils.jsonlib import load
from pycldf.terms import term_uri
from pycldf.sources import Sources
from pylexibank.util import git_hash
def identity(s):
return s
TYPE_MAP = {
'string': ('TEXT', identity),
'integer': ('INTEGER', identity),
'boolean': ('INTEGER', lambda s: s if s is None else int(s)),
'decimal': ('REAL', lambda s: s if s is None else float(s)),
}
BIBTEX_FIELDS = [
'address', # Publisher's address
'annote', # An annotation for annotated bibliography styles (not typical)
'author', # The name(s) of the author(s) (separated by and)
'booktitle', # The title of the book, if only part of it is being cited
'chapter', # The chapter number
'crossref', # The key of the cross-referenced entry
'edition', # The edition of a book, long form (such as "First" or "Second")
'editor', # The name(s) of the editor(s)
'eprint', # A specification of electronic publication, preprint or technical report
'howpublished', # How it was published, if the publishing method is nonstandard
'institution', # institution involved in the publishing,not necessarily the publisher
'journal', # The journal or magazine the work was published in
'key', # A hidden field used for specifying or overriding the orderalphabetical order
'month', # The month of publication (or, if unpublished, the month of creation)
'note', # Miscellaneous extra information
'number', # The "(issue) number" of a journal, magazine, or tech-report
'organization', # The conference sponsor
'pages', # Page numbers, separated either by commas or double-hyphens.
'publisher', # The publisher's name
'school', # The school where the thesis was written
'series', # The series of books the book was published in
'title', # The title of the work
'type', # The field overriding the default type of publication
'url', # The WWW address
'volume', # The volume of a journal or multi-volume book
'year',
]
PROPERTY_URL_TO_COL = defaultdict(dict)
for table in load(Path(__file__).parent / 'cldf-metadata.json')['tables']:
for col in table['tableSchema']['columns']:
if col.get('propertyUrl'):
PROPERTY_URL_TO_COL[table['dc:conformsTo'].split('#')[1]][col['propertyUrl']] = \
col['name']
def insert(db, table, keys, *rows, **kw):
if rows:
if isinstance(keys, str):
keys = [k.strip() for k in keys.split(',')]
sql = "INSERT INTO {0} ({1}) VALUES ({2})".format(
table, ','.join(keys), ','.join(['?' for _ in keys]))
if kw.get('verbose'): # pragma: no cover
print(sql)
print(rows)
db.executemany(sql, rows)
def quoted(*names):
return ','.join('`{0}`'.format(name) for name in names)
@attr.s
class ColSpec(object):
"""
A `ColSpec` captures sufficient information about a `Column` for the DB schema.
"""
name = attr.ib()
csvw_type = attr.ib(default='string', converter=lambda s: s if s else 'string')
separator = attr.ib(default=None)
primary_key = attr.ib(default=None)
db_type = attr.ib(default=None)
convert = attr.ib(default=None)
cldf_name = attr.ib(default=None)
def __attrs_post_init__(self):
if self.csvw_type in TYPE_MAP:
self.db_type, self.convert = TYPE_MAP[self.csvw_type]
else:
self.db_type = 'TEXT'
self.convert = DATATYPES[self.csvw_type].to_string
if not self.cldf_name:
self.cldf_name = self.name
@property
def sql(self):
return '`{0.name}` {0.db_type}'.format(self)
@attr.s
class TableSpec(object):
"""
A `TableSpec` captures sufficient information about a `Table` for the DB schema.
"""
name = attr.ib()
columns = attr.ib(default=attr.Factory(list))
foreign_keys = attr.ib(default=attr.Factory(list))
consumes = attr.ib(default=None)
primary_key = attr.ib(default=None)
@property
def sql(self):
clauses = [col.sql for col in self.columns]
clauses.append('`dataset_ID` TEXT NOT NULL')
if self.primary_key:
clauses.append('PRIMARY KEY(`dataset_ID`, `{0}`)'.format(self.primary_key))
clauses.append('FOREIGN KEY(`dataset_ID`) REFERENCES dataset(`ID`) ON DELETE CASCADE')
for fk, ref, refcols in self.foreign_keys:
clauses.append('FOREIGN KEY({0}) REFERENCES {1}({2}) ON DELETE CASCADE'.format(
quoted(*fk), ref, quoted(*refcols)))
return "CREATE TABLE {0} (\n {1}\n)".format(self.name, ',\n '.join(clauses))
class Database(object):
def __init__(self, fname):
"""
A `Database` instance is initialized with a file path.
:param fname: Path to a file in the file system where the db is to be stored.
"""
self.fname = Path(fname)
def drop(self):
if self.fname.exists():
remove(self.fname)
def connection(self):
return closing(sqlite3.connect(self.fname.as_posix()))
def create(self, force=False, exists_ok=False):
"""
Creates a db file with the core schema.
:param force: If `True` an existing db file will be overwritten.
"""
if self.fname and self.fname.exists():
if force:
self.drop()
elif exists_ok:
return
else:
raise ValueError('db file already exists, use force=True to overwrite')
with self.connection() as db:
db.execute(
"""\
CREATE TABLE dataset (
ID TEXT PRIMARY KEY NOT NULL,
name TEXT,
version TEXT,
metadata_json TEXT
)""")
db.execute("""\
CREATE TABLE datasetmeta (
dataset_ID TEXT ,
key TEXT,
value TEXT,
PRIMARY KEY (dataset_ID, key),
FOREIGN KEY(dataset_ID) REFERENCES dataset(ID)
)""")
db.execute("""\
CREATE TABLE SourceTable (
dataset_ID TEXT ,
ID TEXT ,
bibtex_type TEXT,
{0}
extra TEXT,
PRIMARY KEY (dataset_ID, ID),
FOREIGN KEY(dataset_ID) REFERENCES dataset(ID)
)""".format('\n '.join('`{0}` TEXT,'.format(f) for f in BIBTEX_FIELDS)))
def fetchone(self, sql, params=None, conn=None, verbose=False):
return self._fetch(sql, 'fetchone', params, conn, verbose=verbose)
def fetchall(self, sql, params=None, conn=None, verbose=False):
return self._fetch(sql, 'fetchall', params, conn, verbose=verbose)
def _fetch(self, sql, method, params, conn, verbose=False):
sql = self.sql.get(sql, sql)
def _do(conn, sql, method):
cu = conn.cursor()
if verbose:
print(sql)
cu.execute(sql, params or ())
return getattr(cu, method)()
if not conn:
with self.connection() as conn:
return _do(conn, sql, method)
else:
return _do(conn, sql, method)
@property
def tables(self):
res = {r[0]: {} for r in self.fetchall(
"SELECT name FROM sqlite_master WHERE type='table'")}
for t in res:
res[t] = {r[1]: r[2] for r in self.fetchall(
"PRAGMA table_info({0})".format(t))}
return res
def unload(self, dataset_id):
dataset_id = getattr(dataset_id, 'id', dataset_id)
with self.connection() as db:
for table in self.tables:
if table != 'dataset':
db.execute(
"DELETE FROM {0} WHERE dataset_ID = ?".format(table),
(dataset_id,))
db.execute("DELETE FROM dataset WHERE ID = ?", (dataset_id,))
db.commit()
def _create_table_if_not_exists(self, table):
if table.name in self.tables:
return False
with self.connection() as conn:
conn.execute(table.sql)
return True
def load(self, ds, verbose=False):
"""
Load a CLDF dataset into the database.
:param dataset:
:return:
"""
try:
self.fetchone('select ID from dataset')
except sqlite3.OperationalError:
self.create(force=True)
self.unload(ds)
dataset = ds.cldf.wl
tables, ref_tables = schema(dataset)
# update the DB schema:
for t in tables:
if self._create_table_if_not_exists(t):
continue
db_cols = self.tables[t.name]
for col in t.columns:
if col.name not in db_cols:
with self.connection() as conn:
conn.execute(
"ALTER TABLE {0} ADD COLUMN `{1.name}` {1.db_type}".format(
t.name, col))
else:
if db_cols[col.name] != col.db_type:
raise ValueError(
'column {0}:{1} {2} redefined with new type {3}'.format(
t.name, col.name, db_cols[col.name], col.db_type))
for t in ref_tables.values():
self._create_table_if_not_exists(t)
self.update_schema()
# then load the data:
with self.connection() as db:
db.execute('PRAGMA foreign_keys = ON;')
insert(
db,
'dataset',
'ID,name,version,metadata_json',
(
ds.id,
'{0}'.format(dataset),
git_hash(ds.dir),
dumps(dataset.metadata_dict)))
insert(
db,
'datasetmeta',
'dataset_ID,key,value',
*[(ds.id, k, '{0}'.format(v)) for k, v in dataset.properties.items()])
# load sources:
rows = []
for src in dataset.sources.items():
values = [ds.id, src.id, src.genre] + [src.get(k) for k in BIBTEX_FIELDS]
values.append(
dumps({k: v for k, v in src.items() if k not in BIBTEX_FIELDS}))
rows.append(tuple(values))
insert(
db,
'SourceTable',
['dataset_ID', 'ID', 'bibtex_type'] + BIBTEX_FIELDS + ['extra'],
*rows)
# For regular tables, we extract and keep references to sources.
refs = defaultdict(list)
for t in tables:
# We want to lookup columns by the name used in the CLDF dataset.
cols = {col.cldf_name: col for col in t.columns}
# But we also want to look up primary keys by the database column name.
cols_by_name = {col.name: col for col in t.columns}
ref_table = ref_tables.get(t.name)
rows, keys = [], []
try:
for row in dataset[t.name]:
keys, values = ['dataset_ID'], [ds.id]
for k, v in row.items():
if ref_table and k == ref_table.consumes:
col = cols_by_name[t.primary_key]
refs[ref_table.name].append((row[col.cldf_name], v))
else:
col = cols[k]
if isinstance(v, list):
v = (col.separator or ';').join(
nfilter(col.convert(vv) for vv in v))
else:
v = col.convert(v)
keys.append("`{0}`".format(col.name))
values.append(v)
keys, values = self.update_row(t.name, keys, values)
rows.append(tuple(values))
insert(db, t.name, keys, *rows, **{'verbose': verbose})
except FileNotFoundError:
if t.name != 'CognateTable': # An empty CognateTable is allowed.
raise # pragma: no cover
# Now insert the references, i.e. the associations with sources:
for tname, items in refs.items():
rows = []
for oid, sources in items:
for source in sources:
sid, context = Sources.parse(source)
rows.append([ds.id, oid, sid, context])
oid_col = '{0}_ID'.format(tname.replace('Source', ''))
insert(db, tname, ['dataset_ID', oid_col, 'Source_ID', 'Context'], *rows)
db.commit()
def update_schema(self):
for tname, cname, type_ in [
('ParameterTable', 'Ontological_Category', 'TEXT'),
('ParameterTable', 'Semantic_Field', 'TEXT'),
('LanguageTable', 'Latitude', 'REAL'),
('LanguageTable', 'Longitude', 'REAL'),
]:
if cname not in self.tables[tname]:
with self.connection() as conn:
conn.execute("ALTER TABLE {0} ADD COLUMN `{1}` {2}".format(
tname, cname, type_))
def update_row(self, table, keys, values):
return keys, values
def load_concepticon_data(self, concepticon):
conceptsets = []
for csid in self.fetchall("SELECT distinct concepticon_id FROM parametertable"):
cs = concepticon.conceptsets.get(csid[0])
if cs:
conceptsets.append((
cs.gloss, cs.ontological_category, cs.semanticfield, cs.id))
with self.connection() as db:
db.executemany(
"""\
UPDATE parametertable
SET concepticon_gloss = ?, ontological_category = ?, semantic_field = ?
WHERE concepticon_id = ?""",
conceptsets)
db.commit()
def load_glottolog_data(self, glottolog):
langs = []
languoids = {l.id: l for l in glottolog.languoids()}
for gc in self.fetchall("SELECT distinct glottocode FROM languagetable"):
lang = languoids.get(gc[0])
if lang:
langs.append((
lang.lineage[0][0] if lang.lineage else lang.name,
lang.macroareas[0].value if lang.macroareas else None,
lang.latitude,
lang.longitude,
lang.id))
with self.connection() as db:
db.executemany(
"""\
UPDATE languagetable
SET family = ?, macroarea = ?, latitude = ?, longitude = ?
WHERE glottocode = ?""",
langs)
db.commit()
sql = {
"conceptsets_by_dataset":
"SELECT ds.id, count(distinct p.concepticon_id) "
"FROM dataset as ds, parametertable as p "
"WHERE ds.id = p.dataset_id GROUP BY ds.id",
"families_by_dataset":
"SELECT ds.id, count(distinct l.family) "
"FROM dataset as ds, languagetable as l "
"WHERE ds.id = l.dataset_id GROUP BY ds.id",
"macroareas_by_dataset":
"SELECT ds.id, group_concat(distinct l.macroarea) "
"FROM dataset as ds, languagetable as l "
"WHERE ds.id = l.dataset_id GROUP BY ds.id",
"glottocodes_by_dataset":
"SELECT ds.id, count(distinct l.glottocode) "
"FROM dataset as ds, languagetable as l "
"WHERE ds.id = l.dataset_id GROUP BY ds.id",
"mapped_lexemes_by_dataset":
"SELECT ds.id, count(distinct f.ID) "
"FROM dataset as ds, formtable as f, languagetable as l, parametertable as p "
"WHERE ds.id = f.dataset_id and f.Language_ID = l.ID and "
"f.Parameter_ID = p.ID and l.glottocode is not null and "
"p.concepticon_id is not null "
"GROUP BY ds.id",
"lexemes_by_dataset":
"SELECT ds.id, count(f.ID) FROM dataset as ds, formtable as f "
"WHERE ds.id = f.dataset_id GROUP BY ds.id",
}
|
lexibank/pylexibank | src/pylexibank/db.py | Database.create | python | def create(self, force=False, exists_ok=False):
if self.fname and self.fname.exists():
if force:
self.drop()
elif exists_ok:
return
else:
raise ValueError('db file already exists, use force=True to overwrite')
with self.connection() as db:
db.execute(
"""\
CREATE TABLE dataset (
ID TEXT PRIMARY KEY NOT NULL,
name TEXT,
version TEXT,
metadata_json TEXT
)""")
db.execute("""\
CREATE TABLE datasetmeta (
dataset_ID TEXT ,
key TEXT,
value TEXT,
PRIMARY KEY (dataset_ID, key),
FOREIGN KEY(dataset_ID) REFERENCES dataset(ID)
)""")
db.execute("""\
CREATE TABLE SourceTable (
dataset_ID TEXT ,
ID TEXT ,
bibtex_type TEXT,
{0}
extra TEXT,
PRIMARY KEY (dataset_ID, ID),
FOREIGN KEY(dataset_ID) REFERENCES dataset(ID)
)""".format('\n '.join('`{0}` TEXT,'.format(f) for f in BIBTEX_FIELDS))) | Creates a db file with the core schema.
:param force: If `True` an existing db file will be overwritten. | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/db.py#L257-L296 | [
"def connection(self):\n return closing(sqlite3.connect(self.fname.as_posix()))\n"
] | class Database(object):
def __init__(self, fname):
"""
A `Database` instance is initialized with a file path.
:param fname: Path to a file in the file system where the db is to be stored.
"""
self.fname = Path(fname)
def drop(self):
if self.fname.exists():
remove(self.fname)
def connection(self):
return closing(sqlite3.connect(self.fname.as_posix()))
def fetchone(self, sql, params=None, conn=None, verbose=False):
return self._fetch(sql, 'fetchone', params, conn, verbose=verbose)
def fetchall(self, sql, params=None, conn=None, verbose=False):
return self._fetch(sql, 'fetchall', params, conn, verbose=verbose)
def _fetch(self, sql, method, params, conn, verbose=False):
sql = self.sql.get(sql, sql)
def _do(conn, sql, method):
cu = conn.cursor()
if verbose:
print(sql)
cu.execute(sql, params or ())
return getattr(cu, method)()
if not conn:
with self.connection() as conn:
return _do(conn, sql, method)
else:
return _do(conn, sql, method)
@property
def tables(self):
res = {r[0]: {} for r in self.fetchall(
"SELECT name FROM sqlite_master WHERE type='table'")}
for t in res:
res[t] = {r[1]: r[2] for r in self.fetchall(
"PRAGMA table_info({0})".format(t))}
return res
def unload(self, dataset_id):
dataset_id = getattr(dataset_id, 'id', dataset_id)
with self.connection() as db:
for table in self.tables:
if table != 'dataset':
db.execute(
"DELETE FROM {0} WHERE dataset_ID = ?".format(table),
(dataset_id,))
db.execute("DELETE FROM dataset WHERE ID = ?", (dataset_id,))
db.commit()
def _create_table_if_not_exists(self, table):
if table.name in self.tables:
return False
with self.connection() as conn:
conn.execute(table.sql)
return True
def load(self, ds, verbose=False):
"""
Load a CLDF dataset into the database.
:param dataset:
:return:
"""
try:
self.fetchone('select ID from dataset')
except sqlite3.OperationalError:
self.create(force=True)
self.unload(ds)
dataset = ds.cldf.wl
tables, ref_tables = schema(dataset)
# update the DB schema:
for t in tables:
if self._create_table_if_not_exists(t):
continue
db_cols = self.tables[t.name]
for col in t.columns:
if col.name not in db_cols:
with self.connection() as conn:
conn.execute(
"ALTER TABLE {0} ADD COLUMN `{1.name}` {1.db_type}".format(
t.name, col))
else:
if db_cols[col.name] != col.db_type:
raise ValueError(
'column {0}:{1} {2} redefined with new type {3}'.format(
t.name, col.name, db_cols[col.name], col.db_type))
for t in ref_tables.values():
self._create_table_if_not_exists(t)
self.update_schema()
# then load the data:
with self.connection() as db:
db.execute('PRAGMA foreign_keys = ON;')
insert(
db,
'dataset',
'ID,name,version,metadata_json',
(
ds.id,
'{0}'.format(dataset),
git_hash(ds.dir),
dumps(dataset.metadata_dict)))
insert(
db,
'datasetmeta',
'dataset_ID,key,value',
*[(ds.id, k, '{0}'.format(v)) for k, v in dataset.properties.items()])
# load sources:
rows = []
for src in dataset.sources.items():
values = [ds.id, src.id, src.genre] + [src.get(k) for k in BIBTEX_FIELDS]
values.append(
dumps({k: v for k, v in src.items() if k not in BIBTEX_FIELDS}))
rows.append(tuple(values))
insert(
db,
'SourceTable',
['dataset_ID', 'ID', 'bibtex_type'] + BIBTEX_FIELDS + ['extra'],
*rows)
# For regular tables, we extract and keep references to sources.
refs = defaultdict(list)
for t in tables:
# We want to lookup columns by the name used in the CLDF dataset.
cols = {col.cldf_name: col for col in t.columns}
# But we also want to look up primary keys by the database column name.
cols_by_name = {col.name: col for col in t.columns}
ref_table = ref_tables.get(t.name)
rows, keys = [], []
try:
for row in dataset[t.name]:
keys, values = ['dataset_ID'], [ds.id]
for k, v in row.items():
if ref_table and k == ref_table.consumes:
col = cols_by_name[t.primary_key]
refs[ref_table.name].append((row[col.cldf_name], v))
else:
col = cols[k]
if isinstance(v, list):
v = (col.separator or ';').join(
nfilter(col.convert(vv) for vv in v))
else:
v = col.convert(v)
keys.append("`{0}`".format(col.name))
values.append(v)
keys, values = self.update_row(t.name, keys, values)
rows.append(tuple(values))
insert(db, t.name, keys, *rows, **{'verbose': verbose})
except FileNotFoundError:
if t.name != 'CognateTable': # An empty CognateTable is allowed.
raise # pragma: no cover
# Now insert the references, i.e. the associations with sources:
for tname, items in refs.items():
rows = []
for oid, sources in items:
for source in sources:
sid, context = Sources.parse(source)
rows.append([ds.id, oid, sid, context])
oid_col = '{0}_ID'.format(tname.replace('Source', ''))
insert(db, tname, ['dataset_ID', oid_col, 'Source_ID', 'Context'], *rows)
db.commit()
def update_schema(self):
for tname, cname, type_ in [
('ParameterTable', 'Ontological_Category', 'TEXT'),
('ParameterTable', 'Semantic_Field', 'TEXT'),
('LanguageTable', 'Latitude', 'REAL'),
('LanguageTable', 'Longitude', 'REAL'),
]:
if cname not in self.tables[tname]:
with self.connection() as conn:
conn.execute("ALTER TABLE {0} ADD COLUMN `{1}` {2}".format(
tname, cname, type_))
def update_row(self, table, keys, values):
return keys, values
def load_concepticon_data(self, concepticon):
conceptsets = []
for csid in self.fetchall("SELECT distinct concepticon_id FROM parametertable"):
cs = concepticon.conceptsets.get(csid[0])
if cs:
conceptsets.append((
cs.gloss, cs.ontological_category, cs.semanticfield, cs.id))
with self.connection() as db:
db.executemany(
"""\
UPDATE parametertable
SET concepticon_gloss = ?, ontological_category = ?, semantic_field = ?
WHERE concepticon_id = ?""",
conceptsets)
db.commit()
def load_glottolog_data(self, glottolog):
langs = []
languoids = {l.id: l for l in glottolog.languoids()}
for gc in self.fetchall("SELECT distinct glottocode FROM languagetable"):
lang = languoids.get(gc[0])
if lang:
langs.append((
lang.lineage[0][0] if lang.lineage else lang.name,
lang.macroareas[0].value if lang.macroareas else None,
lang.latitude,
lang.longitude,
lang.id))
with self.connection() as db:
db.executemany(
"""\
UPDATE languagetable
SET family = ?, macroarea = ?, latitude = ?, longitude = ?
WHERE glottocode = ?""",
langs)
db.commit()
sql = {
"conceptsets_by_dataset":
"SELECT ds.id, count(distinct p.concepticon_id) "
"FROM dataset as ds, parametertable as p "
"WHERE ds.id = p.dataset_id GROUP BY ds.id",
"families_by_dataset":
"SELECT ds.id, count(distinct l.family) "
"FROM dataset as ds, languagetable as l "
"WHERE ds.id = l.dataset_id GROUP BY ds.id",
"macroareas_by_dataset":
"SELECT ds.id, group_concat(distinct l.macroarea) "
"FROM dataset as ds, languagetable as l "
"WHERE ds.id = l.dataset_id GROUP BY ds.id",
"glottocodes_by_dataset":
"SELECT ds.id, count(distinct l.glottocode) "
"FROM dataset as ds, languagetable as l "
"WHERE ds.id = l.dataset_id GROUP BY ds.id",
"mapped_lexemes_by_dataset":
"SELECT ds.id, count(distinct f.ID) "
"FROM dataset as ds, formtable as f, languagetable as l, parametertable as p "
"WHERE ds.id = f.dataset_id and f.Language_ID = l.ID and "
"f.Parameter_ID = p.ID and l.glottocode is not null and "
"p.concepticon_id is not null "
"GROUP BY ds.id",
"lexemes_by_dataset":
"SELECT ds.id, count(f.ID) FROM dataset as ds, formtable as f "
"WHERE ds.id = f.dataset_id GROUP BY ds.id",
}
|
lexibank/pylexibank | src/pylexibank/db.py | Database.load | python | def load(self, ds, verbose=False):
try:
self.fetchone('select ID from dataset')
except sqlite3.OperationalError:
self.create(force=True)
self.unload(ds)
dataset = ds.cldf.wl
tables, ref_tables = schema(dataset)
# update the DB schema:
for t in tables:
if self._create_table_if_not_exists(t):
continue
db_cols = self.tables[t.name]
for col in t.columns:
if col.name not in db_cols:
with self.connection() as conn:
conn.execute(
"ALTER TABLE {0} ADD COLUMN `{1.name}` {1.db_type}".format(
t.name, col))
else:
if db_cols[col.name] != col.db_type:
raise ValueError(
'column {0}:{1} {2} redefined with new type {3}'.format(
t.name, col.name, db_cols[col.name], col.db_type))
for t in ref_tables.values():
self._create_table_if_not_exists(t)
self.update_schema()
# then load the data:
with self.connection() as db:
db.execute('PRAGMA foreign_keys = ON;')
insert(
db,
'dataset',
'ID,name,version,metadata_json',
(
ds.id,
'{0}'.format(dataset),
git_hash(ds.dir),
dumps(dataset.metadata_dict)))
insert(
db,
'datasetmeta',
'dataset_ID,key,value',
*[(ds.id, k, '{0}'.format(v)) for k, v in dataset.properties.items()])
# load sources:
rows = []
for src in dataset.sources.items():
values = [ds.id, src.id, src.genre] + [src.get(k) for k in BIBTEX_FIELDS]
values.append(
dumps({k: v for k, v in src.items() if k not in BIBTEX_FIELDS}))
rows.append(tuple(values))
insert(
db,
'SourceTable',
['dataset_ID', 'ID', 'bibtex_type'] + BIBTEX_FIELDS + ['extra'],
*rows)
# For regular tables, we extract and keep references to sources.
refs = defaultdict(list)
for t in tables:
# We want to lookup columns by the name used in the CLDF dataset.
cols = {col.cldf_name: col for col in t.columns}
# But we also want to look up primary keys by the database column name.
cols_by_name = {col.name: col for col in t.columns}
ref_table = ref_tables.get(t.name)
rows, keys = [], []
try:
for row in dataset[t.name]:
keys, values = ['dataset_ID'], [ds.id]
for k, v in row.items():
if ref_table and k == ref_table.consumes:
col = cols_by_name[t.primary_key]
refs[ref_table.name].append((row[col.cldf_name], v))
else:
col = cols[k]
if isinstance(v, list):
v = (col.separator or ';').join(
nfilter(col.convert(vv) for vv in v))
else:
v = col.convert(v)
keys.append("`{0}`".format(col.name))
values.append(v)
keys, values = self.update_row(t.name, keys, values)
rows.append(tuple(values))
insert(db, t.name, keys, *rows, **{'verbose': verbose})
except FileNotFoundError:
if t.name != 'CognateTable': # An empty CognateTable is allowed.
raise # pragma: no cover
# Now insert the references, i.e. the associations with sources:
for tname, items in refs.items():
rows = []
for oid, sources in items:
for source in sources:
sid, context = Sources.parse(source)
rows.append([ds.id, oid, sid, context])
oid_col = '{0}_ID'.format(tname.replace('Source', ''))
insert(db, tname, ['dataset_ID', oid_col, 'Source_ID', 'Context'], *rows)
db.commit() | Load a CLDF dataset into the database.
:param dataset:
:return: | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/db.py#L348-L459 | [
"def insert(db, table, keys, *rows, **kw):\n if rows:\n if isinstance(keys, str):\n keys = [k.strip() for k in keys.split(',')]\n sql = \"INSERT INTO {0} ({1}) VALUES ({2})\".format(\n table, ','.join(keys), ','.join(['?' for _ in keys]))\n if kw.get('verbose'): # pragma: no cover\n print(sql)\n print(rows)\n db.executemany(sql, rows)\n",
"def git_hash(d):\n return git_describe(d).split('-g')[-1]\n",
"def schema(ds):\n \"\"\"\n Convert the table and column descriptions of a `Dataset` into specifications for the\n DB schema.\n\n :param ds:\n :return: A pair (tables, reference_tables).\n \"\"\"\n tables, ref_tables = {}, {}\n table_lookup = {t.url.string: t for t in ds.tables if ds.get_tabletype(t)}\n for table in table_lookup.values():\n spec = TableSpec(ds.get_tabletype(table))\n spec.primary_key = [\n c for c in table.tableSchema.columns if\n c.propertyUrl and c.propertyUrl.uri == term_uri('id')][0].name\n # Map the column name to the default:\n if spec.name in PROPERTY_URL_TO_COL:\n spec.primary_key = PROPERTY_URL_TO_COL[spec.name][term_uri('id')]\n for c in table.tableSchema.columns:\n if c.propertyUrl and c.propertyUrl.uri == term_uri('source'):\n # A column referencing sources is replaced by an association table.\n otype = ds.get_tabletype(table).replace('Table', '')\n ref_tables[ds.get_tabletype(table)] = TableSpec(\n '{0}Source'.format(otype), # The name of the association table.\n [ColSpec(otype + '_ID'), ColSpec('Source_ID'), ColSpec('Context')],\n [\n ( # The foreign key to the referencing object:\n ['dataset_ID', otype + '_ID'],\n ds.get_tabletype(table),\n ['dataset_ID', spec.primary_key]),\n ( # The foreign key to the referenced source:\n ['dataset_ID', 'Source_ID'],\n 'SourceTable',\n ['dataset_ID', 'ID']),\n ],\n c.name)\n else:\n cname = c.header\n if c.propertyUrl and spec.name in PROPERTY_URL_TO_COL:\n if c.propertyUrl.uri in PROPERTY_URL_TO_COL[spec.name]:\n cname = PROPERTY_URL_TO_COL[spec.name][c.propertyUrl.uri]\n spec.columns.append(ColSpec(\n cname,\n c.datatype.base if c.datatype else c.datatype,\n c.separator,\n cname == spec.primary_key,\n cldf_name=c.header))\n for fk in table.tableSchema.foreignKeys:\n if fk.reference.schemaReference:\n # We only support Foreign Key references between tables!\n continue # pragma: no cover\n ref = table_lookup[fk.reference.resource.string]\n ref_type = ds.get_tabletype(ref)\n if ref_type:\n colRefs = sorted(fk.columnReference)\n if spec.name in PROPERTY_URL_TO_COL:\n # Must map foreign keys\n colRefs = []\n for c in sorted(fk.columnReference):\n c = ds[spec.name, c]\n if c.propertyUrl and c.propertyUrl.uri in PROPERTY_URL_TO_COL[spec.name]:\n colRefs.append(PROPERTY_URL_TO_COL[spec.name][c.propertyUrl.uri])\n else:\n colRefs.append(c.header)\n rcolRefs = sorted(fk.reference.columnReference)\n if ref_type in PROPERTY_URL_TO_COL:\n # Must map foreign key targets!\n rcolRefs = []\n for c in sorted(fk.reference.columnReference):\n c = ds[ref_type, c]\n if c.propertyUrl and c.propertyUrl.uri in PROPERTY_URL_TO_COL[ref_type]:\n rcolRefs.append(PROPERTY_URL_TO_COL[ref_type][c.propertyUrl.uri])\n else:\n rcolRefs.append(c.header)\n spec.foreign_keys.append((\n tuple(['dataset_ID'] + colRefs),\n ds.get_tabletype(table_lookup[fk.reference.resource.string]),\n tuple(['dataset_ID'] + rcolRefs)))\n tables[spec.name] = spec\n\n # must determine the order in which tables must be created!\n ordered = OrderedDict()\n i = 0\n #\n # We loop through the tables repeatedly, and whenever we find one, which has all\n # referenced tables already in ordered, we move it from tables to ordered.\n #\n while tables and i < 100:\n i += 1\n for table in list(tables.keys()):\n if all(ref[1] in ordered for ref in tables[table].foreign_keys):\n # All referenced tables are already created.\n ordered[table] = tables.pop(table)\n break\n if tables: # pragma: no cover\n raise ValueError('there seem to be cyclic dependencies between the tables')\n\n return list(ordered.values()), ref_tables\n",
"def connection(self):\n return closing(sqlite3.connect(self.fname.as_posix()))\n",
" def create(self, force=False, exists_ok=False):\n \"\"\"\n Creates a db file with the core schema.\n\n :param force: If `True` an existing db file will be overwritten.\n \"\"\"\n if self.fname and self.fname.exists():\n if force:\n self.drop()\n elif exists_ok:\n return\n else:\n raise ValueError('db file already exists, use force=True to overwrite')\n with self.connection() as db:\n db.execute(\n \"\"\"\\\nCREATE TABLE dataset (\n ID TEXT PRIMARY KEY NOT NULL,\n name TEXT,\n version TEXT,\n metadata_json TEXT\n)\"\"\")\n db.execute(\"\"\"\\\nCREATE TABLE datasetmeta (\n dataset_ID TEXT ,\n key TEXT,\n value TEXT,\n PRIMARY KEY (dataset_ID, key),\n FOREIGN KEY(dataset_ID) REFERENCES dataset(ID)\n)\"\"\")\n db.execute(\"\"\"\\\nCREATE TABLE SourceTable (\n dataset_ID TEXT ,\n ID TEXT ,\n bibtex_type TEXT,\n {0}\n extra TEXT,\n PRIMARY KEY (dataset_ID, ID),\n FOREIGN KEY(dataset_ID) REFERENCES dataset(ID)\n)\"\"\".format('\\n '.join('`{0}` TEXT,'.format(f) for f in BIBTEX_FIELDS)))\n",
"def fetchone(self, sql, params=None, conn=None, verbose=False):\n return self._fetch(sql, 'fetchone', params, conn, verbose=verbose)\n",
"def unload(self, dataset_id):\n dataset_id = getattr(dataset_id, 'id', dataset_id)\n with self.connection() as db:\n for table in self.tables:\n if table != 'dataset':\n db.execute(\n \"DELETE FROM {0} WHERE dataset_ID = ?\".format(table),\n (dataset_id,))\n db.execute(\"DELETE FROM dataset WHERE ID = ?\", (dataset_id,))\n db.commit()\n",
"def _create_table_if_not_exists(self, table):\n if table.name in self.tables:\n return False\n\n with self.connection() as conn:\n conn.execute(table.sql)\n return True\n",
"def update_schema(self):\n for tname, cname, type_ in [\n ('ParameterTable', 'Ontological_Category', 'TEXT'),\n ('ParameterTable', 'Semantic_Field', 'TEXT'),\n ('LanguageTable', 'Latitude', 'REAL'),\n ('LanguageTable', 'Longitude', 'REAL'),\n ]:\n if cname not in self.tables[tname]:\n with self.connection() as conn:\n conn.execute(\"ALTER TABLE {0} ADD COLUMN `{1}` {2}\".format(\n tname, cname, type_))\n",
"def update_row(self, table, keys, values):\n return keys, values\n"
] | class Database(object):
def __init__(self, fname):
"""
A `Database` instance is initialized with a file path.
:param fname: Path to a file in the file system where the db is to be stored.
"""
self.fname = Path(fname)
def drop(self):
if self.fname.exists():
remove(self.fname)
def connection(self):
return closing(sqlite3.connect(self.fname.as_posix()))
def create(self, force=False, exists_ok=False):
"""
Creates a db file with the core schema.
:param force: If `True` an existing db file will be overwritten.
"""
if self.fname and self.fname.exists():
if force:
self.drop()
elif exists_ok:
return
else:
raise ValueError('db file already exists, use force=True to overwrite')
with self.connection() as db:
db.execute(
"""\
CREATE TABLE dataset (
ID TEXT PRIMARY KEY NOT NULL,
name TEXT,
version TEXT,
metadata_json TEXT
)""")
db.execute("""\
CREATE TABLE datasetmeta (
dataset_ID TEXT ,
key TEXT,
value TEXT,
PRIMARY KEY (dataset_ID, key),
FOREIGN KEY(dataset_ID) REFERENCES dataset(ID)
)""")
db.execute("""\
CREATE TABLE SourceTable (
dataset_ID TEXT ,
ID TEXT ,
bibtex_type TEXT,
{0}
extra TEXT,
PRIMARY KEY (dataset_ID, ID),
FOREIGN KEY(dataset_ID) REFERENCES dataset(ID)
)""".format('\n '.join('`{0}` TEXT,'.format(f) for f in BIBTEX_FIELDS)))
def fetchone(self, sql, params=None, conn=None, verbose=False):
return self._fetch(sql, 'fetchone', params, conn, verbose=verbose)
def fetchall(self, sql, params=None, conn=None, verbose=False):
return self._fetch(sql, 'fetchall', params, conn, verbose=verbose)
def _fetch(self, sql, method, params, conn, verbose=False):
sql = self.sql.get(sql, sql)
def _do(conn, sql, method):
cu = conn.cursor()
if verbose:
print(sql)
cu.execute(sql, params or ())
return getattr(cu, method)()
if not conn:
with self.connection() as conn:
return _do(conn, sql, method)
else:
return _do(conn, sql, method)
@property
def tables(self):
res = {r[0]: {} for r in self.fetchall(
"SELECT name FROM sqlite_master WHERE type='table'")}
for t in res:
res[t] = {r[1]: r[2] for r in self.fetchall(
"PRAGMA table_info({0})".format(t))}
return res
def unload(self, dataset_id):
dataset_id = getattr(dataset_id, 'id', dataset_id)
with self.connection() as db:
for table in self.tables:
if table != 'dataset':
db.execute(
"DELETE FROM {0} WHERE dataset_ID = ?".format(table),
(dataset_id,))
db.execute("DELETE FROM dataset WHERE ID = ?", (dataset_id,))
db.commit()
def _create_table_if_not_exists(self, table):
if table.name in self.tables:
return False
with self.connection() as conn:
conn.execute(table.sql)
return True
def update_schema(self):
for tname, cname, type_ in [
('ParameterTable', 'Ontological_Category', 'TEXT'),
('ParameterTable', 'Semantic_Field', 'TEXT'),
('LanguageTable', 'Latitude', 'REAL'),
('LanguageTable', 'Longitude', 'REAL'),
]:
if cname not in self.tables[tname]:
with self.connection() as conn:
conn.execute("ALTER TABLE {0} ADD COLUMN `{1}` {2}".format(
tname, cname, type_))
def update_row(self, table, keys, values):
return keys, values
def load_concepticon_data(self, concepticon):
conceptsets = []
for csid in self.fetchall("SELECT distinct concepticon_id FROM parametertable"):
cs = concepticon.conceptsets.get(csid[0])
if cs:
conceptsets.append((
cs.gloss, cs.ontological_category, cs.semanticfield, cs.id))
with self.connection() as db:
db.executemany(
"""\
UPDATE parametertable
SET concepticon_gloss = ?, ontological_category = ?, semantic_field = ?
WHERE concepticon_id = ?""",
conceptsets)
db.commit()
def load_glottolog_data(self, glottolog):
langs = []
languoids = {l.id: l for l in glottolog.languoids()}
for gc in self.fetchall("SELECT distinct glottocode FROM languagetable"):
lang = languoids.get(gc[0])
if lang:
langs.append((
lang.lineage[0][0] if lang.lineage else lang.name,
lang.macroareas[0].value if lang.macroareas else None,
lang.latitude,
lang.longitude,
lang.id))
with self.connection() as db:
db.executemany(
"""\
UPDATE languagetable
SET family = ?, macroarea = ?, latitude = ?, longitude = ?
WHERE glottocode = ?""",
langs)
db.commit()
sql = {
"conceptsets_by_dataset":
"SELECT ds.id, count(distinct p.concepticon_id) "
"FROM dataset as ds, parametertable as p "
"WHERE ds.id = p.dataset_id GROUP BY ds.id",
"families_by_dataset":
"SELECT ds.id, count(distinct l.family) "
"FROM dataset as ds, languagetable as l "
"WHERE ds.id = l.dataset_id GROUP BY ds.id",
"macroareas_by_dataset":
"SELECT ds.id, group_concat(distinct l.macroarea) "
"FROM dataset as ds, languagetable as l "
"WHERE ds.id = l.dataset_id GROUP BY ds.id",
"glottocodes_by_dataset":
"SELECT ds.id, count(distinct l.glottocode) "
"FROM dataset as ds, languagetable as l "
"WHERE ds.id = l.dataset_id GROUP BY ds.id",
"mapped_lexemes_by_dataset":
"SELECT ds.id, count(distinct f.ID) "
"FROM dataset as ds, formtable as f, languagetable as l, parametertable as p "
"WHERE ds.id = f.dataset_id and f.Language_ID = l.ID and "
"f.Parameter_ID = p.ID and l.glottocode is not null and "
"p.concepticon_id is not null "
"GROUP BY ds.id",
"lexemes_by_dataset":
"SELECT ds.id, count(f.ID) FROM dataset as ds, formtable as f "
"WHERE ds.id = f.dataset_id GROUP BY ds.id",
}
|
lexibank/pylexibank | src/pylexibank/util.py | getEvoBibAsBibtex | python | def getEvoBibAsBibtex(*keys, **kw):
res = []
for key in keys:
bib = get_url(
"http://bibliography.lingpy.org/raw.php?key=" + key,
log=kw.get('log')).text
try:
res.append('@' + bib.split('@')[1].split('</pre>')[0])
except IndexError: # pragma: no cover
res.append('@misc{' + key + ',\nNote={missing source}\n\n}')
return '\n\n'.join(res) | Download bibtex format and parse it from EvoBib | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/util.py#L233-L244 | [
"def get_url(url, log=None, **kw):\n res = requests.get(url, **kw)\n if log:\n level = log.info if res.status_code == 200 else log.warn\n level('HTTP {0} for {1}'.format(\n colored(res.status_code, 'blue'), colored(url, 'blue')))\n return res\n"
] | import logging
import re
import zipfile
from collections import OrderedDict
from xml.etree import cElementTree as et
from contextlib import contextmanager
import requests
import requests.packages.urllib3
from termcolor import colored
import xlrd
from tqdm import tqdm
from clldutils.dsv import UnicodeWriter, reader
from clldutils.path import (
Path, as_posix, copy, TemporaryDirectory, git_describe, remove,
read_text, write_text,
)
from clldutils.misc import slug, xmlchars
from clldutils.badge import Colors, badge
from clldutils import jsonlib
from pycldf.sources import Source, Reference
from pybtex import database
import pylexibank
requests.packages.urllib3.disable_warnings()
logging.basicConfig(level=logging.INFO)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
REPOS_PATH = Path(pylexibank.__file__).parent.parent
YEAR_PATTERN = re.compile('\s+\(?(?P<year>[1-9][0-9]{3}(-[0-9]+)?)(\)|\.)')
def aligned(pairs):
maxlabel = max(len(p[0]) for p in pairs)
return '\n'.join(' {0} {1}'.format(p[0].ljust(maxlabel), p[1] or '') for p in pairs)
def git_hash(d):
return git_describe(d).split('-g')[-1]
def pb(iterable=None, **kw):
kw.setdefault('leave', False)
return tqdm(iterable=iterable, **kw)
class Repos(object):
@property
def version(self):
return git_describe(self.repos)
def split_by_year(s):
match = YEAR_PATTERN.search(s)
if match:
return s[:match.start()].strip(), match.group('year'), s[match.end():].strip()
return None, None, s
def get_reference(author, year, title, pages, sources, id_=None, genre='misc'):
kw = {'title': title}
id_ = id_ or None
if author and year:
id_ = id_ or slug(author + year)
kw.update(author=author, year=year)
elif title:
id_ = id_ or slug(title)
if not id_:
return
source = sources.get(id_)
if source is None:
sources[id_] = source = Source(genre, id_, **kw)
return Reference(source, pages)
def data_path(*comps, **kw):
return kw.get('repos', REPOS_PATH).joinpath('datasets', *comps)
def get_badge(ratio, name):
if ratio >= 0.99:
color = Colors.brightgreen
elif ratio >= 0.9:
color = 'green'
elif ratio >= 0.8:
color = Colors.yellowgreen
elif ratio >= 0.7:
color = Colors.yellow
elif ratio >= 0.6:
color = Colors.orange
else:
color = Colors.red
ratio = int(round(ratio * 100))
return badge(name, '%s%%' % ratio, color, label="{0}: {1}%".format(name, ratio))
def sorted_obj(obj):
res = obj
if isinstance(obj, dict):
res = OrderedDict()
obj.pop(None, None)
for k, v in sorted(obj.items()):
res[k] = sorted_obj(v)
elif isinstance(obj, (list, set)):
res = [sorted_obj(v) for v in obj]
return res
def log_dump(fname, log=None):
if log:
log.info('file written: {0}'.format(colored(fname.as_posix(), 'green')))
def jsondump(obj, fname, log=None):
fname = Path(fname)
if fname.exists():
d = jsonlib.load(fname)
d.update(obj)
obj = d
jsonlib.dump(sorted_obj(obj), fname, indent=4)
log_dump(fname, log=log)
return obj
def textdump(text, fname, log=None):
if isinstance(text, list):
text = '\n'.join(text)
with fname.open('w', encoding='utf8') as fp:
fp.write(text)
log_dump(fname, log=log)
def get_url(url, log=None, **kw):
res = requests.get(url, **kw)
if log:
level = log.info if res.status_code == 200 else log.warn
level('HTTP {0} for {1}'.format(
colored(res.status_code, 'blue'), colored(url, 'blue')))
return res
class DataDir(type(Path())):
def posix(self, *comps):
return self.joinpath(*comps).as_posix()
def read(self, fname, encoding='utf8'):
return read_text(self.joinpath(fname), encoding=encoding)
def write(self, fname, text, encoding='utf8'):
write_text(self.joinpath(fname), text, encoding=encoding)
return fname
def remove(self, fname):
remove(self.joinpath(fname))
def read_csv(self, fname, **kw):
return list(reader(self.joinpath(fname), **kw))
def read_tsv(self, fname, **kw):
return self.read_csv(fname, delimiter='\t', **kw)
def read_xml(self, fname, wrap=True):
xml = xmlchars(self.read(fname))
if wrap:
xml = '<r>{0}</r>'.format(xml)
return et.fromstring(xml.encode('utf8'))
def read_json(self, fname, **kw):
return jsonlib.load(fname)
def read_bib(self, fname='sources.bib'):
bib = database.parse_string(self.read(fname), bib_format='bibtex')
return [Source.from_entry(k, e) for k, e in bib.entries.items()]
def xls2csv(self, fname, outdir=None):
if isinstance(fname, str):
fname = self.joinpath(fname)
res = {}
outdir = outdir or self
wb = xlrd.open_workbook(fname.as_posix())
for sname in wb.sheet_names():
sheet = wb.sheet_by_name(sname)
if sheet.nrows:
path = outdir.joinpath(
fname.stem + '.' + slug(sname, lowercase=False) + '.csv')
with UnicodeWriter(path) as writer:
for i in range(sheet.nrows):
writer.writerow([col.value for col in sheet.row(i)])
res[sname] = path
return res
@contextmanager
def temp_download(self, url, fname, log=None):
p = None
try:
p = self.download(url, fname, log=log)
yield p
finally:
if p and p.exists():
remove(p)
def download(self, url, fname, log=None, skip_if_exists=False):
p = self.joinpath(fname)
if p.exists() and skip_if_exists:
return p
res = get_url(url, log=log, stream=True)
with open(self.posix(fname), 'wb') as fp:
for chunk in res.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
fp.write(chunk)
return p
def download_and_unpack(self, url, *paths, **kw):
"""
Download a zipfile and immediately unpack selected content.
:param url:
:param paths:
:param kw:
:return:
"""
with self.temp_download(url, 'ds.zip', log=kw.pop('log', None)) as zipp:
with TemporaryDirectory() as tmpdir:
with zipfile.ZipFile(zipp.as_posix()) as zipf:
for path in paths:
zipf.extract(as_posix(path), path=tmpdir.as_posix())
copy(tmpdir.joinpath(path), self)
|
lexibank/pylexibank | src/pylexibank/util.py | DataDir.download_and_unpack | python | def download_and_unpack(self, url, *paths, **kw):
with self.temp_download(url, 'ds.zip', log=kw.pop('log', None)) as zipp:
with TemporaryDirectory() as tmpdir:
with zipfile.ZipFile(zipp.as_posix()) as zipf:
for path in paths:
zipf.extract(as_posix(path), path=tmpdir.as_posix())
copy(tmpdir.joinpath(path), self) | Download a zipfile and immediately unpack selected content.
:param url:
:param paths:
:param kw:
:return: | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/util.py#L216-L230 | null | class DataDir(type(Path())):
def posix(self, *comps):
return self.joinpath(*comps).as_posix()
def read(self, fname, encoding='utf8'):
return read_text(self.joinpath(fname), encoding=encoding)
def write(self, fname, text, encoding='utf8'):
write_text(self.joinpath(fname), text, encoding=encoding)
return fname
def remove(self, fname):
remove(self.joinpath(fname))
def read_csv(self, fname, **kw):
return list(reader(self.joinpath(fname), **kw))
def read_tsv(self, fname, **kw):
return self.read_csv(fname, delimiter='\t', **kw)
def read_xml(self, fname, wrap=True):
xml = xmlchars(self.read(fname))
if wrap:
xml = '<r>{0}</r>'.format(xml)
return et.fromstring(xml.encode('utf8'))
def read_json(self, fname, **kw):
return jsonlib.load(fname)
def read_bib(self, fname='sources.bib'):
bib = database.parse_string(self.read(fname), bib_format='bibtex')
return [Source.from_entry(k, e) for k, e in bib.entries.items()]
def xls2csv(self, fname, outdir=None):
if isinstance(fname, str):
fname = self.joinpath(fname)
res = {}
outdir = outdir or self
wb = xlrd.open_workbook(fname.as_posix())
for sname in wb.sheet_names():
sheet = wb.sheet_by_name(sname)
if sheet.nrows:
path = outdir.joinpath(
fname.stem + '.' + slug(sname, lowercase=False) + '.csv')
with UnicodeWriter(path) as writer:
for i in range(sheet.nrows):
writer.writerow([col.value for col in sheet.row(i)])
res[sname] = path
return res
@contextmanager
def temp_download(self, url, fname, log=None):
p = None
try:
p = self.download(url, fname, log=log)
yield p
finally:
if p and p.exists():
remove(p)
def download(self, url, fname, log=None, skip_if_exists=False):
p = self.joinpath(fname)
if p.exists() and skip_if_exists:
return p
res = get_url(url, log=log, stream=True)
with open(self.posix(fname), 'wb') as fp:
for chunk in res.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
fp.write(chunk)
return p
|
lexibank/pylexibank | src/pylexibank/lingpy_util.py | wordlist2cognates | python | def wordlist2cognates(wordlist, source, expert='expert', ref='cogid'):
for k in wordlist:
yield dict(
Form_ID=wordlist[k, 'lid'],
ID=k,
Form=wordlist[k, 'ipa'],
Cognateset_ID='{0}-{1}'.format(
slug(wordlist[k, 'concept']), wordlist[k, ref]),
Cognate_Detection_Method=expert,
Source=source) | Turn a wordlist into a cognate set list, using the cldf parameters. | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L9-L19 | null | from copy import deepcopy
import lingpy
from clldutils.misc import slug
from pylexibank.dataset import Cognate
def _cldf2wld(dataset):
"""Make lingpy-compatible dictinary out of cldf main data."""
header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID']
D = {0: ['lid'] + [h.lower() for h in header]}
for idx, row in enumerate(dataset.objects['FormTable']):
row = deepcopy(row)
row['Segments'] = ' '.join(row['Segments'])
D[idx + 1] = [row['ID']] + [row[h] for h in header]
return D
def _cldf2lexstat(
dataset,
segments='segments',
transcription='value',
row='parameter_id',
col='language_id'):
"""Read LexStat object from cldf dataset."""
D = _cldf2wld(dataset)
return lingpy.LexStat(D, segments=segments, transcription=transcription, row=row, col=col)
def _cldf2wordlist(dataset, row='parameter_id', col='language_id'):
"""Read worldist object from cldf dataset."""
return lingpy.Wordlist(_cldf2wld(dataset), row=row, col=col)
def iter_cognates(dataset, column='Segments', method='turchin', threshold=0.5, **kw):
"""
Compute cognates automatically for a given dataset.
"""
if method == 'turchin':
for row in dataset.objects['FormTable']:
sounds = ''.join(lingpy.tokens2class(row[column], 'dolgo'))
if sounds.startswith('V'):
sounds = 'H' + sounds
sounds = '-'.join([s for s in sounds if s != 'V'][:2])
cogid = slug(row['Parameter_ID']) + '-' + sounds
if '0' not in sounds:
yield dict(
Form_ID=row['ID'],
Form=row['Value'],
Cognateset_ID=cogid,
Cognate_Detection_Method='CMM')
if method in ['sca', 'lexstat']:
lex = _cldf2lexstat(dataset)
if method == 'lexstat':
lex.get_scorer(**kw)
lex.cluster(method=method, threshold=threshold, ref='cogid')
for k in lex:
yield Cognate(
Form_ID=lex[k, 'lid'],
Form=lex[k, 'value'],
Cognateset_ID=lex[k, 'cogid'],
Cognate_Detection_Method=method + '-t{0:.2f}'.format(threshold))
def iter_alignments(dataset, cognate_sets, column='Segments', method='library'):
"""
Function computes automatic alignments and writes them to file.
"""
if not isinstance(dataset, lingpy.basic.parser.QLCParser):
wordlist = _cldf2wordlist(dataset)
cognates = {r['Form_ID']: r for r in cognate_sets}
wordlist.add_entries(
'cogid',
'lid',
lambda x: cognates[x]['Cognateset_ID'] if x in cognates else 0)
alm = lingpy.Alignments(
wordlist,
ref='cogid',
row='parameter_id',
col='language_id',
segments=column.lower())
alm.align(method=method)
for k in alm:
if alm[k, 'lid'] in cognates:
cognate = cognates[alm[k, 'lid']]
cognate['Alignment'] = alm[k, 'alignment']
cognate['Alignment_Method'] = method
else:
alm = lingpy.Alignments(dataset, ref='cogid')
alm.align(method=method)
for cognate in cognate_sets:
idx = cognate['ID'] or cognate['Form_ID']
cognate['Alignment'] = alm[int(idx), 'alignment']
cognate['Alignment_Method'] = 'SCA-' + method
|
lexibank/pylexibank | src/pylexibank/lingpy_util.py | _cldf2wld | python | def _cldf2wld(dataset):
header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID']
D = {0: ['lid'] + [h.lower() for h in header]}
for idx, row in enumerate(dataset.objects['FormTable']):
row = deepcopy(row)
row['Segments'] = ' '.join(row['Segments'])
D[idx + 1] = [row['ID']] + [row[h] for h in header]
return D | Make lingpy-compatible dictinary out of cldf main data. | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L22-L30 | null | from copy import deepcopy
import lingpy
from clldutils.misc import slug
from pylexibank.dataset import Cognate
def wordlist2cognates(wordlist, source, expert='expert', ref='cogid'):
"""Turn a wordlist into a cognate set list, using the cldf parameters."""
for k in wordlist:
yield dict(
Form_ID=wordlist[k, 'lid'],
ID=k,
Form=wordlist[k, 'ipa'],
Cognateset_ID='{0}-{1}'.format(
slug(wordlist[k, 'concept']), wordlist[k, ref]),
Cognate_Detection_Method=expert,
Source=source)
def _cldf2lexstat(
dataset,
segments='segments',
transcription='value',
row='parameter_id',
col='language_id'):
"""Read LexStat object from cldf dataset."""
D = _cldf2wld(dataset)
return lingpy.LexStat(D, segments=segments, transcription=transcription, row=row, col=col)
def _cldf2wordlist(dataset, row='parameter_id', col='language_id'):
"""Read worldist object from cldf dataset."""
return lingpy.Wordlist(_cldf2wld(dataset), row=row, col=col)
def iter_cognates(dataset, column='Segments', method='turchin', threshold=0.5, **kw):
"""
Compute cognates automatically for a given dataset.
"""
if method == 'turchin':
for row in dataset.objects['FormTable']:
sounds = ''.join(lingpy.tokens2class(row[column], 'dolgo'))
if sounds.startswith('V'):
sounds = 'H' + sounds
sounds = '-'.join([s for s in sounds if s != 'V'][:2])
cogid = slug(row['Parameter_ID']) + '-' + sounds
if '0' not in sounds:
yield dict(
Form_ID=row['ID'],
Form=row['Value'],
Cognateset_ID=cogid,
Cognate_Detection_Method='CMM')
if method in ['sca', 'lexstat']:
lex = _cldf2lexstat(dataset)
if method == 'lexstat':
lex.get_scorer(**kw)
lex.cluster(method=method, threshold=threshold, ref='cogid')
for k in lex:
yield Cognate(
Form_ID=lex[k, 'lid'],
Form=lex[k, 'value'],
Cognateset_ID=lex[k, 'cogid'],
Cognate_Detection_Method=method + '-t{0:.2f}'.format(threshold))
def iter_alignments(dataset, cognate_sets, column='Segments', method='library'):
"""
Function computes automatic alignments and writes them to file.
"""
if not isinstance(dataset, lingpy.basic.parser.QLCParser):
wordlist = _cldf2wordlist(dataset)
cognates = {r['Form_ID']: r for r in cognate_sets}
wordlist.add_entries(
'cogid',
'lid',
lambda x: cognates[x]['Cognateset_ID'] if x in cognates else 0)
alm = lingpy.Alignments(
wordlist,
ref='cogid',
row='parameter_id',
col='language_id',
segments=column.lower())
alm.align(method=method)
for k in alm:
if alm[k, 'lid'] in cognates:
cognate = cognates[alm[k, 'lid']]
cognate['Alignment'] = alm[k, 'alignment']
cognate['Alignment_Method'] = method
else:
alm = lingpy.Alignments(dataset, ref='cogid')
alm.align(method=method)
for cognate in cognate_sets:
idx = cognate['ID'] or cognate['Form_ID']
cognate['Alignment'] = alm[int(idx), 'alignment']
cognate['Alignment_Method'] = 'SCA-' + method
|
lexibank/pylexibank | src/pylexibank/lingpy_util.py | _cldf2lexstat | python | def _cldf2lexstat(
dataset,
segments='segments',
transcription='value',
row='parameter_id',
col='language_id'):
D = _cldf2wld(dataset)
return lingpy.LexStat(D, segments=segments, transcription=transcription, row=row, col=col) | Read LexStat object from cldf dataset. | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L33-L41 | [
"def _cldf2wld(dataset):\n \"\"\"Make lingpy-compatible dictinary out of cldf main data.\"\"\"\n header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID']\n D = {0: ['lid'] + [h.lower() for h in header]}\n for idx, row in enumerate(dataset.objects['FormTable']):\n row = deepcopy(row)\n row['Segments'] = ' '.join(row['Segments'])\n D[idx + 1] = [row['ID']] + [row[h] for h in header]\n return D\n"
] | from copy import deepcopy
import lingpy
from clldutils.misc import slug
from pylexibank.dataset import Cognate
def wordlist2cognates(wordlist, source, expert='expert', ref='cogid'):
"""Turn a wordlist into a cognate set list, using the cldf parameters."""
for k in wordlist:
yield dict(
Form_ID=wordlist[k, 'lid'],
ID=k,
Form=wordlist[k, 'ipa'],
Cognateset_ID='{0}-{1}'.format(
slug(wordlist[k, 'concept']), wordlist[k, ref]),
Cognate_Detection_Method=expert,
Source=source)
def _cldf2wld(dataset):
"""Make lingpy-compatible dictinary out of cldf main data."""
header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID']
D = {0: ['lid'] + [h.lower() for h in header]}
for idx, row in enumerate(dataset.objects['FormTable']):
row = deepcopy(row)
row['Segments'] = ' '.join(row['Segments'])
D[idx + 1] = [row['ID']] + [row[h] for h in header]
return D
def _cldf2wordlist(dataset, row='parameter_id', col='language_id'):
"""Read worldist object from cldf dataset."""
return lingpy.Wordlist(_cldf2wld(dataset), row=row, col=col)
def iter_cognates(dataset, column='Segments', method='turchin', threshold=0.5, **kw):
"""
Compute cognates automatically for a given dataset.
"""
if method == 'turchin':
for row in dataset.objects['FormTable']:
sounds = ''.join(lingpy.tokens2class(row[column], 'dolgo'))
if sounds.startswith('V'):
sounds = 'H' + sounds
sounds = '-'.join([s for s in sounds if s != 'V'][:2])
cogid = slug(row['Parameter_ID']) + '-' + sounds
if '0' not in sounds:
yield dict(
Form_ID=row['ID'],
Form=row['Value'],
Cognateset_ID=cogid,
Cognate_Detection_Method='CMM')
if method in ['sca', 'lexstat']:
lex = _cldf2lexstat(dataset)
if method == 'lexstat':
lex.get_scorer(**kw)
lex.cluster(method=method, threshold=threshold, ref='cogid')
for k in lex:
yield Cognate(
Form_ID=lex[k, 'lid'],
Form=lex[k, 'value'],
Cognateset_ID=lex[k, 'cogid'],
Cognate_Detection_Method=method + '-t{0:.2f}'.format(threshold))
def iter_alignments(dataset, cognate_sets, column='Segments', method='library'):
"""
Function computes automatic alignments and writes them to file.
"""
if not isinstance(dataset, lingpy.basic.parser.QLCParser):
wordlist = _cldf2wordlist(dataset)
cognates = {r['Form_ID']: r for r in cognate_sets}
wordlist.add_entries(
'cogid',
'lid',
lambda x: cognates[x]['Cognateset_ID'] if x in cognates else 0)
alm = lingpy.Alignments(
wordlist,
ref='cogid',
row='parameter_id',
col='language_id',
segments=column.lower())
alm.align(method=method)
for k in alm:
if alm[k, 'lid'] in cognates:
cognate = cognates[alm[k, 'lid']]
cognate['Alignment'] = alm[k, 'alignment']
cognate['Alignment_Method'] = method
else:
alm = lingpy.Alignments(dataset, ref='cogid')
alm.align(method=method)
for cognate in cognate_sets:
idx = cognate['ID'] or cognate['Form_ID']
cognate['Alignment'] = alm[int(idx), 'alignment']
cognate['Alignment_Method'] = 'SCA-' + method
|
lexibank/pylexibank | src/pylexibank/lingpy_util.py | _cldf2wordlist | python | def _cldf2wordlist(dataset, row='parameter_id', col='language_id'):
return lingpy.Wordlist(_cldf2wld(dataset), row=row, col=col) | Read worldist object from cldf dataset. | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L44-L46 | [
"def _cldf2wld(dataset):\n \"\"\"Make lingpy-compatible dictinary out of cldf main data.\"\"\"\n header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID']\n D = {0: ['lid'] + [h.lower() for h in header]}\n for idx, row in enumerate(dataset.objects['FormTable']):\n row = deepcopy(row)\n row['Segments'] = ' '.join(row['Segments'])\n D[idx + 1] = [row['ID']] + [row[h] for h in header]\n return D\n"
] | from copy import deepcopy
import lingpy
from clldutils.misc import slug
from pylexibank.dataset import Cognate
def wordlist2cognates(wordlist, source, expert='expert', ref='cogid'):
"""Turn a wordlist into a cognate set list, using the cldf parameters."""
for k in wordlist:
yield dict(
Form_ID=wordlist[k, 'lid'],
ID=k,
Form=wordlist[k, 'ipa'],
Cognateset_ID='{0}-{1}'.format(
slug(wordlist[k, 'concept']), wordlist[k, ref]),
Cognate_Detection_Method=expert,
Source=source)
def _cldf2wld(dataset):
"""Make lingpy-compatible dictinary out of cldf main data."""
header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID']
D = {0: ['lid'] + [h.lower() for h in header]}
for idx, row in enumerate(dataset.objects['FormTable']):
row = deepcopy(row)
row['Segments'] = ' '.join(row['Segments'])
D[idx + 1] = [row['ID']] + [row[h] for h in header]
return D
def _cldf2lexstat(
dataset,
segments='segments',
transcription='value',
row='parameter_id',
col='language_id'):
"""Read LexStat object from cldf dataset."""
D = _cldf2wld(dataset)
return lingpy.LexStat(D, segments=segments, transcription=transcription, row=row, col=col)
def iter_cognates(dataset, column='Segments', method='turchin', threshold=0.5, **kw):
"""
Compute cognates automatically for a given dataset.
"""
if method == 'turchin':
for row in dataset.objects['FormTable']:
sounds = ''.join(lingpy.tokens2class(row[column], 'dolgo'))
if sounds.startswith('V'):
sounds = 'H' + sounds
sounds = '-'.join([s for s in sounds if s != 'V'][:2])
cogid = slug(row['Parameter_ID']) + '-' + sounds
if '0' not in sounds:
yield dict(
Form_ID=row['ID'],
Form=row['Value'],
Cognateset_ID=cogid,
Cognate_Detection_Method='CMM')
if method in ['sca', 'lexstat']:
lex = _cldf2lexstat(dataset)
if method == 'lexstat':
lex.get_scorer(**kw)
lex.cluster(method=method, threshold=threshold, ref='cogid')
for k in lex:
yield Cognate(
Form_ID=lex[k, 'lid'],
Form=lex[k, 'value'],
Cognateset_ID=lex[k, 'cogid'],
Cognate_Detection_Method=method + '-t{0:.2f}'.format(threshold))
def iter_alignments(dataset, cognate_sets, column='Segments', method='library'):
"""
Function computes automatic alignments and writes them to file.
"""
if not isinstance(dataset, lingpy.basic.parser.QLCParser):
wordlist = _cldf2wordlist(dataset)
cognates = {r['Form_ID']: r for r in cognate_sets}
wordlist.add_entries(
'cogid',
'lid',
lambda x: cognates[x]['Cognateset_ID'] if x in cognates else 0)
alm = lingpy.Alignments(
wordlist,
ref='cogid',
row='parameter_id',
col='language_id',
segments=column.lower())
alm.align(method=method)
for k in alm:
if alm[k, 'lid'] in cognates:
cognate = cognates[alm[k, 'lid']]
cognate['Alignment'] = alm[k, 'alignment']
cognate['Alignment_Method'] = method
else:
alm = lingpy.Alignments(dataset, ref='cogid')
alm.align(method=method)
for cognate in cognate_sets:
idx = cognate['ID'] or cognate['Form_ID']
cognate['Alignment'] = alm[int(idx), 'alignment']
cognate['Alignment_Method'] = 'SCA-' + method
|
lexibank/pylexibank | src/pylexibank/lingpy_util.py | iter_cognates | python | def iter_cognates(dataset, column='Segments', method='turchin', threshold=0.5, **kw):
if method == 'turchin':
for row in dataset.objects['FormTable']:
sounds = ''.join(lingpy.tokens2class(row[column], 'dolgo'))
if sounds.startswith('V'):
sounds = 'H' + sounds
sounds = '-'.join([s for s in sounds if s != 'V'][:2])
cogid = slug(row['Parameter_ID']) + '-' + sounds
if '0' not in sounds:
yield dict(
Form_ID=row['ID'],
Form=row['Value'],
Cognateset_ID=cogid,
Cognate_Detection_Method='CMM')
if method in ['sca', 'lexstat']:
lex = _cldf2lexstat(dataset)
if method == 'lexstat':
lex.get_scorer(**kw)
lex.cluster(method=method, threshold=threshold, ref='cogid')
for k in lex:
yield Cognate(
Form_ID=lex[k, 'lid'],
Form=lex[k, 'value'],
Cognateset_ID=lex[k, 'cogid'],
Cognate_Detection_Method=method + '-t{0:.2f}'.format(threshold)) | Compute cognates automatically for a given dataset. | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L49-L77 | [
"def _cldf2lexstat(\n dataset,\n segments='segments',\n transcription='value',\n row='parameter_id',\n col='language_id'):\n \"\"\"Read LexStat object from cldf dataset.\"\"\"\n D = _cldf2wld(dataset)\n return lingpy.LexStat(D, segments=segments, transcription=transcription, row=row, col=col)\n"
] | from copy import deepcopy
import lingpy
from clldutils.misc import slug
from pylexibank.dataset import Cognate
def wordlist2cognates(wordlist, source, expert='expert', ref='cogid'):
"""Turn a wordlist into a cognate set list, using the cldf parameters."""
for k in wordlist:
yield dict(
Form_ID=wordlist[k, 'lid'],
ID=k,
Form=wordlist[k, 'ipa'],
Cognateset_ID='{0}-{1}'.format(
slug(wordlist[k, 'concept']), wordlist[k, ref]),
Cognate_Detection_Method=expert,
Source=source)
def _cldf2wld(dataset):
"""Make lingpy-compatible dictinary out of cldf main data."""
header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID']
D = {0: ['lid'] + [h.lower() for h in header]}
for idx, row in enumerate(dataset.objects['FormTable']):
row = deepcopy(row)
row['Segments'] = ' '.join(row['Segments'])
D[idx + 1] = [row['ID']] + [row[h] for h in header]
return D
def _cldf2lexstat(
dataset,
segments='segments',
transcription='value',
row='parameter_id',
col='language_id'):
"""Read LexStat object from cldf dataset."""
D = _cldf2wld(dataset)
return lingpy.LexStat(D, segments=segments, transcription=transcription, row=row, col=col)
def _cldf2wordlist(dataset, row='parameter_id', col='language_id'):
"""Read worldist object from cldf dataset."""
return lingpy.Wordlist(_cldf2wld(dataset), row=row, col=col)
def iter_alignments(dataset, cognate_sets, column='Segments', method='library'):
"""
Function computes automatic alignments and writes them to file.
"""
if not isinstance(dataset, lingpy.basic.parser.QLCParser):
wordlist = _cldf2wordlist(dataset)
cognates = {r['Form_ID']: r for r in cognate_sets}
wordlist.add_entries(
'cogid',
'lid',
lambda x: cognates[x]['Cognateset_ID'] if x in cognates else 0)
alm = lingpy.Alignments(
wordlist,
ref='cogid',
row='parameter_id',
col='language_id',
segments=column.lower())
alm.align(method=method)
for k in alm:
if alm[k, 'lid'] in cognates:
cognate = cognates[alm[k, 'lid']]
cognate['Alignment'] = alm[k, 'alignment']
cognate['Alignment_Method'] = method
else:
alm = lingpy.Alignments(dataset, ref='cogid')
alm.align(method=method)
for cognate in cognate_sets:
idx = cognate['ID'] or cognate['Form_ID']
cognate['Alignment'] = alm[int(idx), 'alignment']
cognate['Alignment_Method'] = 'SCA-' + method
|
lexibank/pylexibank | src/pylexibank/lingpy_util.py | iter_alignments | python | def iter_alignments(dataset, cognate_sets, column='Segments', method='library'):
if not isinstance(dataset, lingpy.basic.parser.QLCParser):
wordlist = _cldf2wordlist(dataset)
cognates = {r['Form_ID']: r for r in cognate_sets}
wordlist.add_entries(
'cogid',
'lid',
lambda x: cognates[x]['Cognateset_ID'] if x in cognates else 0)
alm = lingpy.Alignments(
wordlist,
ref='cogid',
row='parameter_id',
col='language_id',
segments=column.lower())
alm.align(method=method)
for k in alm:
if alm[k, 'lid'] in cognates:
cognate = cognates[alm[k, 'lid']]
cognate['Alignment'] = alm[k, 'alignment']
cognate['Alignment_Method'] = method
else:
alm = lingpy.Alignments(dataset, ref='cogid')
alm.align(method=method)
for cognate in cognate_sets:
idx = cognate['ID'] or cognate['Form_ID']
cognate['Alignment'] = alm[int(idx), 'alignment']
cognate['Alignment_Method'] = 'SCA-' + method | Function computes automatic alignments and writes them to file. | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L80-L110 | [
"def _cldf2wordlist(dataset, row='parameter_id', col='language_id'):\n \"\"\"Read worldist object from cldf dataset.\"\"\"\n return lingpy.Wordlist(_cldf2wld(dataset), row=row, col=col)\n"
] | from copy import deepcopy
import lingpy
from clldutils.misc import slug
from pylexibank.dataset import Cognate
def wordlist2cognates(wordlist, source, expert='expert', ref='cogid'):
"""Turn a wordlist into a cognate set list, using the cldf parameters."""
for k in wordlist:
yield dict(
Form_ID=wordlist[k, 'lid'],
ID=k,
Form=wordlist[k, 'ipa'],
Cognateset_ID='{0}-{1}'.format(
slug(wordlist[k, 'concept']), wordlist[k, ref]),
Cognate_Detection_Method=expert,
Source=source)
def _cldf2wld(dataset):
"""Make lingpy-compatible dictinary out of cldf main data."""
header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID']
D = {0: ['lid'] + [h.lower() for h in header]}
for idx, row in enumerate(dataset.objects['FormTable']):
row = deepcopy(row)
row['Segments'] = ' '.join(row['Segments'])
D[idx + 1] = [row['ID']] + [row[h] for h in header]
return D
def _cldf2lexstat(
dataset,
segments='segments',
transcription='value',
row='parameter_id',
col='language_id'):
"""Read LexStat object from cldf dataset."""
D = _cldf2wld(dataset)
return lingpy.LexStat(D, segments=segments, transcription=transcription, row=row, col=col)
def _cldf2wordlist(dataset, row='parameter_id', col='language_id'):
"""Read worldist object from cldf dataset."""
return lingpy.Wordlist(_cldf2wld(dataset), row=row, col=col)
def iter_cognates(dataset, column='Segments', method='turchin', threshold=0.5, **kw):
"""
Compute cognates automatically for a given dataset.
"""
if method == 'turchin':
for row in dataset.objects['FormTable']:
sounds = ''.join(lingpy.tokens2class(row[column], 'dolgo'))
if sounds.startswith('V'):
sounds = 'H' + sounds
sounds = '-'.join([s for s in sounds if s != 'V'][:2])
cogid = slug(row['Parameter_ID']) + '-' + sounds
if '0' not in sounds:
yield dict(
Form_ID=row['ID'],
Form=row['Value'],
Cognateset_ID=cogid,
Cognate_Detection_Method='CMM')
if method in ['sca', 'lexstat']:
lex = _cldf2lexstat(dataset)
if method == 'lexstat':
lex.get_scorer(**kw)
lex.cluster(method=method, threshold=threshold, ref='cogid')
for k in lex:
yield Cognate(
Form_ID=lex[k, 'lid'],
Form=lex[k, 'value'],
Cognateset_ID=lex[k, 'cogid'],
Cognate_Detection_Method=method + '-t{0:.2f}'.format(threshold))
|
lexibank/pylexibank | src/pylexibank/__main__.py | get_path | python | def get_path(src): # pragma: no cover
res = None
while not res:
if res is False:
print(colored('You must provide a path to an existing directory!', 'red'))
print('You need a local clone or release of (a fork of) '
'https://github.com/{0}'.format(src))
res = input(colored('Local path to {0}: '.format(src), 'green', attrs=['blink']))
if res and Path(res).exists():
return Path(res).resolve()
res = False | Prompts the user to input a local path.
:param src: github repository name
:return: Absolute local path | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/__main__.py#L53-L69 | null | """
Main command line interface of the pylexibank package.
Like programs such as git, this cli splits its functionality into sub-commands
(see e.g. https://docs.python.org/2/library/argparse.html#sub-commands).
The rationale behind this is that while a lot of different tasks may be
triggered using this cli, most of them require common configuration.
The basic invocation looks like
lexibank [OPTIONS] <command> [args]
"""
import sys
import os
import argparse
import readline
import glob
from termcolor import colored
from appdirs import user_config_dir
from clldutils.inifile import INI
from clldutils.clilib import ArgumentParserWithLogging, ParserError
from clldutils.path import Path
from clldutils.misc import lazyproperty
import pylexibank
from pylexibank.dataset import iter_datasets
from pylexibank.glottolog import Glottolog
from pylexibank.concepticon import Concepticon
import pylexibank.commands
assert pylexibank.commands
REPOS = [
('glottolog', 'clld/glottolog'),
('concepticon', 'clld/concepticon-data'),
]
# We want to provide tab-completion when the user is asked to provide local paths to
# repository clones.
def complete_dir(text, state): # pragma: no cover
if os.path.isdir(text) and not text.endswith(os.sep):
text += os.sep
return ([p for p in glob.glob(text + '*') if os.path.isdir(p)] + [None])[state]
readline.parse_and_bind("tab: complete")
readline.set_completer_delims('\t')
readline.set_completer(complete_dir)
class Config(INI):
@lazyproperty
def concepticon(self):
return Concepticon(self['paths']['concepticon'])
@lazyproperty
def glottolog(self):
return Glottolog(self['paths']['glottolog'])
@lazyproperty
def datasets(self):
return sorted(
iter_datasets(glottolog=self.glottolog, concepticon=self.concepticon, verbose=True),
key=lambda d: d.id)
def configure(cfgpath=None):
"""
Configure lexibank.
:return: a pair (config, logger)
"""
cfgpath = Path(cfgpath) \
if cfgpath else Path(user_config_dir(pylexibank.__name__)) / 'config.ini'
if not cfgpath.exists():
print("""
{0}
You seem to be running lexibank for the first time.
Your system configuration will now be written to a config file to be used
whenever lexibank is run lateron.
""".format(
colored('Welcome to lexibank!', 'blue', attrs=['bold', 'reverse'])))
if not cfgpath.parent.exists():
cfgpath.parent.mkdir(parents=True)
cfg = Config()
cfg['paths'] = {k: get_path(src) for k, src in REPOS}
cfg.write(cfgpath)
print("""
Configuration has been written to:
{0}
You may edit this file to adapt to changes in your system or to reconfigure settings
such as the logging level.""".format(cfgpath.resolve()))
else:
cfg = Config.from_file(cfgpath)
try:
cfg.glottolog
except (FileNotFoundError, ValueError):
raise ParserError('Misconfigured Glottolog path in {0}'.format(cfgpath))
if not Path(cfg['paths']['concepticon']).exists():
raise ParserError('Misconfigured Concepticon path in {0}'.format(cfgpath))
# Print the configuration directory for reference:
print("Using configuration file at:")
print(str(cfgpath) + '\n')
return cfg
def main(): # pragma: no cover
cfg = configure()
parser = ArgumentParserWithLogging(pylexibank.__name__)
parser.add_argument('--cfg', help=argparse.SUPPRESS, default=cfg)
parser.add_argument(
'--db',
help='path to SQLite db file',
default=os.path.join(os.getcwd(), 'lexibank.sqlite'))
sys.exit(parser.main())
|
lexibank/pylexibank | src/pylexibank/__main__.py | configure | python | def configure(cfgpath=None):
cfgpath = Path(cfgpath) \
if cfgpath else Path(user_config_dir(pylexibank.__name__)) / 'config.ini'
if not cfgpath.exists():
print("""
{0}
You seem to be running lexibank for the first time.
Your system configuration will now be written to a config file to be used
whenever lexibank is run lateron.
""".format(
colored('Welcome to lexibank!', 'blue', attrs=['bold', 'reverse'])))
if not cfgpath.parent.exists():
cfgpath.parent.mkdir(parents=True)
cfg = Config()
cfg['paths'] = {k: get_path(src) for k, src in REPOS}
cfg.write(cfgpath)
print("""
Configuration has been written to:
{0}
You may edit this file to adapt to changes in your system or to reconfigure settings
such as the logging level.""".format(cfgpath.resolve()))
else:
cfg = Config.from_file(cfgpath)
try:
cfg.glottolog
except (FileNotFoundError, ValueError):
raise ParserError('Misconfigured Glottolog path in {0}'.format(cfgpath))
if not Path(cfg['paths']['concepticon']).exists():
raise ParserError('Misconfigured Concepticon path in {0}'.format(cfgpath))
# Print the configuration directory for reference:
print("Using configuration file at:")
print(str(cfgpath) + '\n')
return cfg | Configure lexibank.
:return: a pair (config, logger) | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/__main__.py#L88-L128 | null | """
Main command line interface of the pylexibank package.
Like programs such as git, this cli splits its functionality into sub-commands
(see e.g. https://docs.python.org/2/library/argparse.html#sub-commands).
The rationale behind this is that while a lot of different tasks may be
triggered using this cli, most of them require common configuration.
The basic invocation looks like
lexibank [OPTIONS] <command> [args]
"""
import sys
import os
import argparse
import readline
import glob
from termcolor import colored
from appdirs import user_config_dir
from clldutils.inifile import INI
from clldutils.clilib import ArgumentParserWithLogging, ParserError
from clldutils.path import Path
from clldutils.misc import lazyproperty
import pylexibank
from pylexibank.dataset import iter_datasets
from pylexibank.glottolog import Glottolog
from pylexibank.concepticon import Concepticon
import pylexibank.commands
assert pylexibank.commands
REPOS = [
('glottolog', 'clld/glottolog'),
('concepticon', 'clld/concepticon-data'),
]
# We want to provide tab-completion when the user is asked to provide local paths to
# repository clones.
def complete_dir(text, state): # pragma: no cover
if os.path.isdir(text) and not text.endswith(os.sep):
text += os.sep
return ([p for p in glob.glob(text + '*') if os.path.isdir(p)] + [None])[state]
readline.parse_and_bind("tab: complete")
readline.set_completer_delims('\t')
readline.set_completer(complete_dir)
def get_path(src): # pragma: no cover
"""
Prompts the user to input a local path.
:param src: github repository name
:return: Absolute local path
"""
res = None
while not res:
if res is False:
print(colored('You must provide a path to an existing directory!', 'red'))
print('You need a local clone or release of (a fork of) '
'https://github.com/{0}'.format(src))
res = input(colored('Local path to {0}: '.format(src), 'green', attrs=['blink']))
if res and Path(res).exists():
return Path(res).resolve()
res = False
class Config(INI):
@lazyproperty
def concepticon(self):
return Concepticon(self['paths']['concepticon'])
@lazyproperty
def glottolog(self):
return Glottolog(self['paths']['glottolog'])
@lazyproperty
def datasets(self):
return sorted(
iter_datasets(glottolog=self.glottolog, concepticon=self.concepticon, verbose=True),
key=lambda d: d.id)
def main(): # pragma: no cover
cfg = configure()
parser = ArgumentParserWithLogging(pylexibank.__name__)
parser.add_argument('--cfg', help=argparse.SUPPRESS, default=cfg)
parser.add_argument(
'--db',
help='path to SQLite db file',
default=os.path.join(os.getcwd(), 'lexibank.sqlite'))
sys.exit(parser.main())
|
lexibank/pylexibank | src/pylexibank/commands/misc.py | new_dataset | python | def new_dataset(args):
if not args.args:
raise ParserError('you must specify an existing directory')
outdir = Path(args.args.pop(0))
if not outdir.exists():
raise ParserError('you must specify an existing directory')
id_pattern = re.compile('[a-z_0-9]+$')
md = {}
if args.args:
md['id'] = args.args.pop(0)
else:
md['id'] = input('Dataset ID: ')
while not id_pattern.match(md['id']):
print('dataset id must only consist of lowercase ascii letters, digits and _ (underscore)!')
md['id'] = input('Dataset ID: ')
outdir = outdir / md['id']
if not outdir.exists():
outdir.mkdir()
for key in ['title', 'url', 'license', 'conceptlist', 'citation']:
md[key] = input('Dataset {0}: '.format(key))
# check license!
# check conceptlist!
for path in Path(pylexibank.__file__).parent.joinpath('dataset_template').iterdir():
if path.is_file():
if path.suffix in ['.pyc']:
continue # pragma: no cover
target = path.name
content = read_text(path)
if '+' in path.name:
target = re.sub(
'\+([a-z]+)\+',
lambda m: '{' + m.groups()[0] + '}',
path.name
).format(**md)
if target.endswith('_tmpl'):
target = target[:-5]
content = content.format(**md)
write_text(outdir / target, content)
else:
target = outdir / path.name
if target.exists():
shutil.rmtree(str(target))
shutil.copytree(str(path), str(target))
del md['id']
jsonlib.dump(md, outdir / 'metadata.json', indent=4) | lexibank new-dataset OUTDIR [ID] | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/commands/misc.py#L25-L78 | null | from collections import defaultdict, Counter, OrderedDict
from subprocess import check_call
import re
import shutil
from termcolor import colored
from segments.util import grapheme_pattern
from clldutils import licenses
from clldutils.path import Path, read_text, write_text
from clldutils.dsv import UnicodeWriter
from clldutils.markup import Table
from clldutils.clilib import command, confirm, ParserError
from clldutils.text import truncate_with_ellipsis
from clldutils import jsonlib
from pybtex.database import parse_file, BibliographyData
import pylexibank
from pylexibank.commands.util import with_dataset, get_dataset, _load, _unload
from pylexibank.util import log_dump, git_hash
from pylexibank.dataset import Dataset
from pylexibank.db import Database
@command('new-dataset')
@command()
def requirements(args):
if args.cfg.datasets:
print(
'-e git+https://github.com/clld/glottolog.git@{0}#egg=pyglottolog'.format(
git_hash(args.cfg.datasets[0].glottolog.repos)))
print(
'-e git+https://github.com/clld/concepticon-data.git@{0}#egg=pyconcepticon'.format(
git_hash(args.cfg.datasets[0].concepticon.repos)))
if pylexibank.__version__.endswith('dev0'):
print(
'-e git+https://github.com/lexibank/pylexibank.git@{0}#egg=pylexibank'.format(
git_hash(Path(pylexibank.__file__).parent.parent.parent)))
db = Database(args.db)
db.create(exists_ok=True)
for r in db.fetchall('select id, version from dataset'):
print(
'-e git+https://github.com/lexibank/{0}.git@{1}#egg=lexibank_{0}'.format(*r))
@command()
def orthography(args): # pragma: no cover
ds = get_dataset(args)
out = ds.dir.joinpath('orthography.tsv')
if out.exists():
if not confirm(
'There already is an orthography profile for this dataset. Overwrite?',
default=False):
return
graphemes = Counter()
for line in ds.iter_raw_lexemes():
graphemes.update(grapheme_pattern.findall(line))
with UnicodeWriter(out, delimiter='\t') as writer:
writer.writerow(['graphemes', 'frequency', 'IPA'])
for grapheme, frequency in graphemes.most_common():
writer.writerow([grapheme, '{0}'.format(frequency), grapheme])
log_dump(out, log=args.log)
@command()
def load(args):
with_dataset(args, _load, default_to_all=True)
@command()
def unload(args):
with_dataset(args, _unload, default_to_all=True)
@command()
def download(args):
"""Run a dataset's download command
lexibank download DATASET_ID
"""
with_dataset(args, Dataset._download)
@command()
def makecldf(args):
"""Convert a dataset into CLDF
lexibank makecldf DATASET_ID
"""
with_dataset(args, Dataset._install)
@command()
def db(args):
db = str(Database(args.db).fname)
args.log.info('connecting to {0}'.format(colored(db, 'green')))
check_call(['sqlite3', db])
@command()
def diff(args):
def _diff(ds, **kw):
repo = ds.git_repo
if repo and repo.is_dirty():
print('{0} at {1}'.format(
colored(ds.id, 'blue', attrs=['bold']),
colored(str(ds.dir), 'blue')))
for i, item in enumerate(repo.index.diff(None)):
if i == 0:
print(colored('modified:', attrs=['bold']))
print(colored(item.a_path, 'green'))
for i, path in enumerate(repo.untracked_files):
if i == 0:
print(colored('untracked:', attrs=['bold']))
print(colored(path, 'green'))
print()
if not args.args:
args.args = [ds.id for ds in args.cfg.datasets]
with_dataset(args, _diff)
@command()
def ls(args):
"""
lexibank ls [COLS]+
column specification:
- license
- lexemes
- macroareas
"""
db = Database(args.db)
db.create(exists_ok=True)
in_db = {r[0]: r[1] for r in db.fetchall('select id, version from dataset')}
# FIXME: how to smartly choose columns?
table = Table('ID', 'Title')
cols = OrderedDict([
(col, {}) for col in args.args if col in [
'version',
'location',
'changes',
'license',
'all_lexemes',
'lexemes',
'concepts',
'languages',
'families',
'varieties',
'macroareas',
]])
tl = 40
if cols:
tl = 25
table.columns.extend(col.capitalize() for col in cols)
for col, sql in [
('languages', 'glottocodes_by_dataset'),
('concepts', 'conceptsets_by_dataset'),
('lexemes', 'mapped_lexemes_by_dataset'),
('all_lexemes', 'lexemes_by_dataset'),
('macroareas', 'macroareas_by_dataset'),
('families', 'families_by_dataset'),
]:
if col in cols:
cols[col] = {r[0]: r[1] for r in db.fetchall(sql)}
for ds in args.cfg.datasets:
row = [
colored(ds.id, 'green' if ds.id in in_db else 'red'),
truncate_with_ellipsis(ds.metadata.title or '', width=tl),
]
for col in cols:
if col == 'version':
row.append(git_hash(ds.dir))
elif col == 'location':
row.append(colored(str(ds.dir), 'green'))
elif col == 'changes':
row.append(ds.git_repo.is_dirty())
elif col == 'license':
lic = licenses.find(ds.metadata.license or '')
row.append(lic.id if lic else ds.metadata.license)
elif col in ['languages', 'concepts', 'lexemes', 'all_lexemes', 'families']:
row.append(float(cols[col].get(ds.id, 0)))
elif col == 'macroareas':
row.append(', '.join(sorted((cols[col].get(ds.id) or '').split(','))))
else:
row.append('')
table.append(row)
totals = ['zztotal', len(args.cfg.datasets)]
for i, col in enumerate(cols):
if col in ['lexemes', 'all_lexemes']:
totals.append(sum([r[i + 2] for r in table]))
elif col == 'languages':
totals.append(float(db.fetchone(
"SELECT count(distinct glottocode) FROM languagetable")[0]))
elif col == 'concepts':
totals.append(float(db.fetchone(
"SELECT count(distinct concepticon_id) FROM parametertable")[0]))
elif col == 'families':
totals.append(float(db.fetchone(
"SELECT count(distinct family) FROM languagetable")[0]))
else:
totals.append('')
table.append(totals)
print(table.render(
tablefmt='simple', sortkey=lambda r: r[0], condensed=False, floatfmt=',.0f'))
@command()
def bib(args):
gbib = BibliographyData()
def _harvest(ds, **kw):
for bib in ds.cldf_dir.glob('*.bib'):
bib = parse_file(str(bib))
for id_, entry in bib.entries.items():
id_ = '{0}:{1}'.format(ds.id, id_)
if id_ not in gbib.entries:
gbib.add_entry(id_, entry)
with_dataset(args, _harvest, default_to_all=True)
gbib.to_file(str(Path(args.cfg['paths']['lexibank']).joinpath('lexibank.bib')))
@command()
def clean(args):
"""
Remove CLDF formatted data for given dataset.
lexibank clean [DATASET_ID]
"""
with_dataset(args, Dataset._clean)
# - need set of all concepts per variety.
# - loop over concept lists
# - if concept ids is subset of variety, count that language.
@command()
def coverage(args): # pragma: no cover
from pyconcepticon.api import Concepticon
varieties = defaultdict(set)
glangs = defaultdict(set)
concept_count = defaultdict(set)
res80 = Counter()
res85 = Counter()
res90 = Counter()
res80v = Counter()
res85v = Counter()
res90v = Counter()
def _coverage(ds, **kw):
ds.coverage(varieties, glangs, concept_count)
with_dataset(args, _coverage)
print('varieties', len(varieties))
concepticon = Concepticon(args.cfg['paths']['concepticon'])
for cl in concepticon.conceptlists.values():
try:
concepts = set(
int(cc.concepticon_id) for cc in cl.concepts.values() if cc.concepticon_id
)
except: # noqa: E722
continue
for varid, meanings in varieties.items():
# compute relative size of intersection instead!
c = len(concepts.intersection(meanings)) / len(concepts)
if c >= 0.8:
res80v.update([cl.id])
if c >= 0.85:
res85v.update([cl.id])
if c >= 0.9:
res90v.update([cl.id])
for varid, meanings in glangs.items():
# compute relative size of intersection instead!
c = len(concepts.intersection(meanings)) / len(concepts)
if c >= 0.8:
res80.update([cl.id])
if c >= 0.85:
res85.update([cl.id])
if c >= 0.9:
res90.update([cl.id])
def print_count(count):
t = Table('concept list', 'glang count')
for p in count.most_common(n=10):
t.append(list(p))
print(t.render(tablefmt='simple', condensed=False))
print('\nGlottolog languages with coverage > 80%:')
print_count(res80)
print('\nGlottolog languages with coverage > 85%:')
print_count(res85)
print('\nGlottolog languages with coverage > 90%:')
print_count(res90)
print('\nVarieties with coverage > 80%:')
print_count(res80v)
print('\nVarieties with coverage > 85%:')
print_count(res85v)
print('\nVarieties with coverage > 90%:')
print_count(res90v)
print('\ntop-200 concepts:')
t = Table('cid', 'gloss', 'varieties')
for n, m in sorted(
[(cid, len(vars)) for cid, vars in concept_count.items()],
key=lambda i: -i[1])[:200]:
t.append([n, concepticon.conceptsets['%s' % n].gloss, m])
print(t.render(tablefmt='simple', condensed=False))
|
lexibank/pylexibank | src/pylexibank/commands/misc.py | ls | python | def ls(args):
db = Database(args.db)
db.create(exists_ok=True)
in_db = {r[0]: r[1] for r in db.fetchall('select id, version from dataset')}
# FIXME: how to smartly choose columns?
table = Table('ID', 'Title')
cols = OrderedDict([
(col, {}) for col in args.args if col in [
'version',
'location',
'changes',
'license',
'all_lexemes',
'lexemes',
'concepts',
'languages',
'families',
'varieties',
'macroareas',
]])
tl = 40
if cols:
tl = 25
table.columns.extend(col.capitalize() for col in cols)
for col, sql in [
('languages', 'glottocodes_by_dataset'),
('concepts', 'conceptsets_by_dataset'),
('lexemes', 'mapped_lexemes_by_dataset'),
('all_lexemes', 'lexemes_by_dataset'),
('macroareas', 'macroareas_by_dataset'),
('families', 'families_by_dataset'),
]:
if col in cols:
cols[col] = {r[0]: r[1] for r in db.fetchall(sql)}
for ds in args.cfg.datasets:
row = [
colored(ds.id, 'green' if ds.id in in_db else 'red'),
truncate_with_ellipsis(ds.metadata.title or '', width=tl),
]
for col in cols:
if col == 'version':
row.append(git_hash(ds.dir))
elif col == 'location':
row.append(colored(str(ds.dir), 'green'))
elif col == 'changes':
row.append(ds.git_repo.is_dirty())
elif col == 'license':
lic = licenses.find(ds.metadata.license or '')
row.append(lic.id if lic else ds.metadata.license)
elif col in ['languages', 'concepts', 'lexemes', 'all_lexemes', 'families']:
row.append(float(cols[col].get(ds.id, 0)))
elif col == 'macroareas':
row.append(', '.join(sorted((cols[col].get(ds.id) or '').split(','))))
else:
row.append('')
table.append(row)
totals = ['zztotal', len(args.cfg.datasets)]
for i, col in enumerate(cols):
if col in ['lexemes', 'all_lexemes']:
totals.append(sum([r[i + 2] for r in table]))
elif col == 'languages':
totals.append(float(db.fetchone(
"SELECT count(distinct glottocode) FROM languagetable")[0]))
elif col == 'concepts':
totals.append(float(db.fetchone(
"SELECT count(distinct concepticon_id) FROM parametertable")[0]))
elif col == 'families':
totals.append(float(db.fetchone(
"SELECT count(distinct family) FROM languagetable")[0]))
else:
totals.append('')
table.append(totals)
print(table.render(
tablefmt='simple', sortkey=lambda r: r[0], condensed=False, floatfmt=',.0f')) | lexibank ls [COLS]+
column specification:
- license
- lexemes
- macroareas | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/commands/misc.py#L181-L264 | [
"def git_hash(d):\n return git_describe(d).split('-g')[-1]\n",
" def create(self, force=False, exists_ok=False):\n \"\"\"\n Creates a db file with the core schema.\n\n :param force: If `True` an existing db file will be overwritten.\n \"\"\"\n if self.fname and self.fname.exists():\n if force:\n self.drop()\n elif exists_ok:\n return\n else:\n raise ValueError('db file already exists, use force=True to overwrite')\n with self.connection() as db:\n db.execute(\n \"\"\"\\\nCREATE TABLE dataset (\n ID TEXT PRIMARY KEY NOT NULL,\n name TEXT,\n version TEXT,\n metadata_json TEXT\n)\"\"\")\n db.execute(\"\"\"\\\nCREATE TABLE datasetmeta (\n dataset_ID TEXT ,\n key TEXT,\n value TEXT,\n PRIMARY KEY (dataset_ID, key),\n FOREIGN KEY(dataset_ID) REFERENCES dataset(ID)\n)\"\"\")\n db.execute(\"\"\"\\\nCREATE TABLE SourceTable (\n dataset_ID TEXT ,\n ID TEXT ,\n bibtex_type TEXT,\n {0}\n extra TEXT,\n PRIMARY KEY (dataset_ID, ID),\n FOREIGN KEY(dataset_ID) REFERENCES dataset(ID)\n)\"\"\".format('\\n '.join('`{0}` TEXT,'.format(f) for f in BIBTEX_FIELDS)))\n",
"def fetchone(self, sql, params=None, conn=None, verbose=False):\n return self._fetch(sql, 'fetchone', params, conn, verbose=verbose)\n",
"def fetchall(self, sql, params=None, conn=None, verbose=False):\n return self._fetch(sql, 'fetchall', params, conn, verbose=verbose)\n"
] | from collections import defaultdict, Counter, OrderedDict
from subprocess import check_call
import re
import shutil
from termcolor import colored
from segments.util import grapheme_pattern
from clldutils import licenses
from clldutils.path import Path, read_text, write_text
from clldutils.dsv import UnicodeWriter
from clldutils.markup import Table
from clldutils.clilib import command, confirm, ParserError
from clldutils.text import truncate_with_ellipsis
from clldutils import jsonlib
from pybtex.database import parse_file, BibliographyData
import pylexibank
from pylexibank.commands.util import with_dataset, get_dataset, _load, _unload
from pylexibank.util import log_dump, git_hash
from pylexibank.dataset import Dataset
from pylexibank.db import Database
@command('new-dataset')
def new_dataset(args):
"""
lexibank new-dataset OUTDIR [ID]
"""
if not args.args:
raise ParserError('you must specify an existing directory')
outdir = Path(args.args.pop(0))
if not outdir.exists():
raise ParserError('you must specify an existing directory')
id_pattern = re.compile('[a-z_0-9]+$')
md = {}
if args.args:
md['id'] = args.args.pop(0)
else:
md['id'] = input('Dataset ID: ')
while not id_pattern.match(md['id']):
print('dataset id must only consist of lowercase ascii letters, digits and _ (underscore)!')
md['id'] = input('Dataset ID: ')
outdir = outdir / md['id']
if not outdir.exists():
outdir.mkdir()
for key in ['title', 'url', 'license', 'conceptlist', 'citation']:
md[key] = input('Dataset {0}: '.format(key))
# check license!
# check conceptlist!
for path in Path(pylexibank.__file__).parent.joinpath('dataset_template').iterdir():
if path.is_file():
if path.suffix in ['.pyc']:
continue # pragma: no cover
target = path.name
content = read_text(path)
if '+' in path.name:
target = re.sub(
'\+([a-z]+)\+',
lambda m: '{' + m.groups()[0] + '}',
path.name
).format(**md)
if target.endswith('_tmpl'):
target = target[:-5]
content = content.format(**md)
write_text(outdir / target, content)
else:
target = outdir / path.name
if target.exists():
shutil.rmtree(str(target))
shutil.copytree(str(path), str(target))
del md['id']
jsonlib.dump(md, outdir / 'metadata.json', indent=4)
@command()
def requirements(args):
if args.cfg.datasets:
print(
'-e git+https://github.com/clld/glottolog.git@{0}#egg=pyglottolog'.format(
git_hash(args.cfg.datasets[0].glottolog.repos)))
print(
'-e git+https://github.com/clld/concepticon-data.git@{0}#egg=pyconcepticon'.format(
git_hash(args.cfg.datasets[0].concepticon.repos)))
if pylexibank.__version__.endswith('dev0'):
print(
'-e git+https://github.com/lexibank/pylexibank.git@{0}#egg=pylexibank'.format(
git_hash(Path(pylexibank.__file__).parent.parent.parent)))
db = Database(args.db)
db.create(exists_ok=True)
for r in db.fetchall('select id, version from dataset'):
print(
'-e git+https://github.com/lexibank/{0}.git@{1}#egg=lexibank_{0}'.format(*r))
@command()
def orthography(args): # pragma: no cover
ds = get_dataset(args)
out = ds.dir.joinpath('orthography.tsv')
if out.exists():
if not confirm(
'There already is an orthography profile for this dataset. Overwrite?',
default=False):
return
graphemes = Counter()
for line in ds.iter_raw_lexemes():
graphemes.update(grapheme_pattern.findall(line))
with UnicodeWriter(out, delimiter='\t') as writer:
writer.writerow(['graphemes', 'frequency', 'IPA'])
for grapheme, frequency in graphemes.most_common():
writer.writerow([grapheme, '{0}'.format(frequency), grapheme])
log_dump(out, log=args.log)
@command()
def load(args):
with_dataset(args, _load, default_to_all=True)
@command()
def unload(args):
with_dataset(args, _unload, default_to_all=True)
@command()
def download(args):
"""Run a dataset's download command
lexibank download DATASET_ID
"""
with_dataset(args, Dataset._download)
@command()
def makecldf(args):
"""Convert a dataset into CLDF
lexibank makecldf DATASET_ID
"""
with_dataset(args, Dataset._install)
@command()
def db(args):
db = str(Database(args.db).fname)
args.log.info('connecting to {0}'.format(colored(db, 'green')))
check_call(['sqlite3', db])
@command()
def diff(args):
def _diff(ds, **kw):
repo = ds.git_repo
if repo and repo.is_dirty():
print('{0} at {1}'.format(
colored(ds.id, 'blue', attrs=['bold']),
colored(str(ds.dir), 'blue')))
for i, item in enumerate(repo.index.diff(None)):
if i == 0:
print(colored('modified:', attrs=['bold']))
print(colored(item.a_path, 'green'))
for i, path in enumerate(repo.untracked_files):
if i == 0:
print(colored('untracked:', attrs=['bold']))
print(colored(path, 'green'))
print()
if not args.args:
args.args = [ds.id for ds in args.cfg.datasets]
with_dataset(args, _diff)
@command()
@command()
def bib(args):
gbib = BibliographyData()
def _harvest(ds, **kw):
for bib in ds.cldf_dir.glob('*.bib'):
bib = parse_file(str(bib))
for id_, entry in bib.entries.items():
id_ = '{0}:{1}'.format(ds.id, id_)
if id_ not in gbib.entries:
gbib.add_entry(id_, entry)
with_dataset(args, _harvest, default_to_all=True)
gbib.to_file(str(Path(args.cfg['paths']['lexibank']).joinpath('lexibank.bib')))
@command()
def clean(args):
"""
Remove CLDF formatted data for given dataset.
lexibank clean [DATASET_ID]
"""
with_dataset(args, Dataset._clean)
# - need set of all concepts per variety.
# - loop over concept lists
# - if concept ids is subset of variety, count that language.
@command()
def coverage(args): # pragma: no cover
from pyconcepticon.api import Concepticon
varieties = defaultdict(set)
glangs = defaultdict(set)
concept_count = defaultdict(set)
res80 = Counter()
res85 = Counter()
res90 = Counter()
res80v = Counter()
res85v = Counter()
res90v = Counter()
def _coverage(ds, **kw):
ds.coverage(varieties, glangs, concept_count)
with_dataset(args, _coverage)
print('varieties', len(varieties))
concepticon = Concepticon(args.cfg['paths']['concepticon'])
for cl in concepticon.conceptlists.values():
try:
concepts = set(
int(cc.concepticon_id) for cc in cl.concepts.values() if cc.concepticon_id
)
except: # noqa: E722
continue
for varid, meanings in varieties.items():
# compute relative size of intersection instead!
c = len(concepts.intersection(meanings)) / len(concepts)
if c >= 0.8:
res80v.update([cl.id])
if c >= 0.85:
res85v.update([cl.id])
if c >= 0.9:
res90v.update([cl.id])
for varid, meanings in glangs.items():
# compute relative size of intersection instead!
c = len(concepts.intersection(meanings)) / len(concepts)
if c >= 0.8:
res80.update([cl.id])
if c >= 0.85:
res85.update([cl.id])
if c >= 0.9:
res90.update([cl.id])
def print_count(count):
t = Table('concept list', 'glang count')
for p in count.most_common(n=10):
t.append(list(p))
print(t.render(tablefmt='simple', condensed=False))
print('\nGlottolog languages with coverage > 80%:')
print_count(res80)
print('\nGlottolog languages with coverage > 85%:')
print_count(res85)
print('\nGlottolog languages with coverage > 90%:')
print_count(res90)
print('\nVarieties with coverage > 80%:')
print_count(res80v)
print('\nVarieties with coverage > 85%:')
print_count(res85v)
print('\nVarieties with coverage > 90%:')
print_count(res90v)
print('\ntop-200 concepts:')
t = Table('cid', 'gloss', 'varieties')
for n, m in sorted(
[(cid, len(vars)) for cid, vars in concept_count.items()],
key=lambda i: -i[1])[:200]:
t.append([n, concepticon.conceptsets['%s' % n].gloss, m])
print(t.render(tablefmt='simple', condensed=False))
|
lexibank/pylexibank | src/pylexibank/transcription.py | analyze | python | def analyze(segments, analysis, lookup=dict(bipa={}, dolgo={})):
# raise a ValueError in case of empty segments/strings
if not segments:
raise ValueError('Empty sequence.')
# test if at least one element in `segments` has information
# (helps to catch really badly formed input, such as ['\n']
if not [segment for segment in segments if segment.strip()]:
raise ValueError('No information in the sequence.')
# build the phonologic and sound class analyses
try:
bipa_analysis, sc_analysis = [], []
for s in segments:
a = lookup['bipa'].get(s)
if a is None:
a = lookup['bipa'].setdefault(s, BIPA[s])
bipa_analysis.append(a)
sc = lookup['dolgo'].get(s)
if sc is None:
sc = lookup['dolgo'].setdefault(s, BIPA.translate(s, DOLGO))
sc_analysis.append(sc)
except: # noqa
print(segments)
raise
# compute general errors; this loop must take place outside the
# following one because the code for computing single errors (either
# in `bipa_analysis` or in `soundclass_analysis`) is unnecessary
# complicated
for sound_bipa, sound_class in zip(bipa_analysis, sc_analysis):
if isinstance(sound_bipa, pyclts.models.UnknownSound) or sound_class == '?':
analysis.general_errors += 1
# iterate over the segments and analyses, updating counts of occurrences
# and specific errors
for segment, sound_bipa, sound_class in zip(segments, bipa_analysis, sc_analysis):
# update the segment count
analysis.segments.update([segment])
# add an error if we got an unknown sound, otherwise just append
# the `replacements` dictionary
if isinstance(sound_bipa, pyclts.models.UnknownSound):
analysis.bipa_errors.add(segment)
else:
analysis.replacements[sound_bipa.source].add(sound_bipa.__unicode__())
# update sound class errors, if any
if sound_class == '?':
analysis.sclass_errors.add(segment)
return segments, bipa_analysis, sc_analysis, analysis | Test a sequence for compatibility with CLPA and LingPy.
:param analysis: Pass a `TranscriptionAnalysis` instance for cumulative reporting. | train | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/transcription.py#L37-L94 | null | from collections import Counter, defaultdict
import attr
import pyclts
import pyclts.models
from pyclts import TranscriptionSystem, SoundClasses
from clldutils.markup import Table
BIPA = TranscriptionSystem('bipa')
DOLGO = SoundClasses('dolgo')
@attr.s
class Analysis(object):
# map segments to frequency
segments = attr.ib(default=attr.Factory(Counter))
# aggregate segments which are invalid for lingpy
bipa_errors = attr.ib(default=attr.Factory(set))
# aggregate segments which are invalid for clpa
sclass_errors = attr.ib(default=attr.Factory(set))
# map clpa-replaceable segments to their replacements
replacements = attr.ib(default=defaultdict(set))
# count number of errors
general_errors = attr.ib(default=0)
@attr.s
class Stats(Analysis):
inventory_size = attr.ib(default=0)
invalid_words = attr.ib(default=attr.Factory(list))
invalid_words_count = attr.ib(default=0)
bad_words = attr.ib(default=attr.Factory(list))
bad_words_count = attr.ib(default=0)
# Note: We use a mutable default argument intentionally to serve as a cache.
TEMPLATE = """
# Detailed transcription record
## Segments
{0}
## Unsegmentable lexemes (up to 100 only)
{1}
## Words with invalid segments (up to 100 only)
{2}
"""
def report(analysis):
segments = Table('Segment', 'Occurrence', 'BIPA', 'CLTS SoundClass')
for a, b in sorted(
analysis['stats']['segments'].items(), key=lambda x: (-x[1], x[0])):
c, d = '✓', '✓'
if a in analysis['stats']['sclass_errors']:
c = '✓' if a not in analysis['stats']['bipa_errors'] else '?'
d = ', '.join(analysis['stats']['sclass_errors'][a]) \
if a not in analysis['stats']['sclass_errors'] else '?'
# escape pipe for markdown table if necessary
a = a.replace('|', '|')
segments.append([a, b, c, d])
invalid = Table('ID', 'LANGUAGE', 'CONCEPT', 'FORM')
for row in analysis['stats']['invalid_words']:
invalid.append(row)
words = Table('ID', 'LANGUAGE', 'CONCEPT', 'FORM', 'SEGMENTS')
for row in analysis['stats']['bad_words']:
words.append(row)
return TEMPLATE.format(
segments.render(verbose=True),
invalid.render(verbose=True),
words.render(verbose=True))
|
transifex/transifex-python-library | txlib/api/base.py | BaseModel.get | python | def get(cls, **kwargs):
fields = {}
for field in cls.url_fields:
value = kwargs.pop(field, None)
if value is None:
cls._handle_wrong_field(field, ATTR_TYPE_URL)
fields[field] = value
# Create an instance of the model class and make the GET request
model = cls(**fields)
model._populate(**kwargs)
return model | Retrieve an object by making a GET request to Transifex.
Each value in `kwargs` that corresponds to a field
defined in `self.url_fields` will be used in the URL path
of the request, so that a particular entry of this model
is identified and retrieved.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed as kwargs
txlib.http.exceptions.NotFoundError: if the object with these
attributes is not found on the remote server
txlib.http.exceptions.ServerError subclass: depending on
the particular server response
Example:
# Note: also catch exceptions
>>> obj = MyModel.get(attr1=value1, attr2=value2) | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/api/base.py#L109-L139 | [
"def _populate(self, **kwargs):\n \"\"\"Populate the instance with the values from the server.\"\"\"\n self._populated_fields = self._get(**kwargs)\n"
] | class BaseModel(object):
"""Base class for Transifex models.
Each model has a list of fields. The model works as a proxy between the
local application and the remote Transifex server.
The user of the class can:
a) retrieve an existing remote instance of the model
by using the static method `MyModel.get(...)`
b) create a new local instance with a set of populated fields
and call `save()` in order to save it to the remote server
c) delete an existing remote instance, by first creating a local instance
and all necessary attributes that identify the object, and then
calling `delete()` on it.
"""
# The URI prefix of all API endpoints for this model
_prefix = ''
# The URI for retrieving a collection of multiple items
# (shouldn't start with a slash)
_path_to_collection = ''
# The URl for retrieving a single item
# (shouldn't start with a slash)
_path_to_item = ''
# All fields defined here will be used for constructing
# the URL of the request
url_fields = set()
# These fields can be modified in POST/PUT requests
writable_fields = set()
# Initially False, set to True when an instance of the class is created
_is_initialized = False
@classmethod
def __init__(self, prefix='/api/2/', **url_values):
"""Constructor.
Initializes various variables, setup the HTTP handler and
stores all values
Args:
prefix: The prefix of the urls.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed
"""
self._http = registry.http_handler
self._prefix = prefix
self._modified_fields = {}
self._populated_fields = {}
for field in url_values:
if field in self.url_fields:
setattr(self, field, url_values[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_URL)
# From now on only, only specific attributes can be set
# on this object:
# a) one of the instance variables set above
# b) one of the attributes found in `self.writable_fields`
self._is_initialized = True
def __getattr__(self, name, default=None):
"""Return the value of the field with the given name.
Looks in `self._modified_fields` and `self._populated_fields`.
Raises:
AttributeError: if the requested attribute does not exist
"""
if name in self._modified_fields:
return self._modified_fields[name]
elif name in self._populated_fields:
return self._populated_fields[name]
else:
self._handle_wrong_field(name, ATTR_TYPE_READ)
def __setattr__(self, name, value):
"""Set the value of a field.
This method only allows certain attributes to be set:
a) Any attribute that is defined in `__init__()`
b) Any attribute found in `self.writable_fields`
For the rest it will raise an AttributeError.
For case (a), the attribute is saved directly on this object
For case (b), the attribute is saved in `self.writable_fields`
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
# If __init__() hasn't finished yet, accept anything
if ('_is_initialized' not in self.__dict__) or (name in self.__dict__):
return super(BaseModel, self).__setattr__(name, value)
elif name in self.writable_fields:
self._modified_fields[name] = value
else:
self._handle_wrong_field(name, ATTR_TYPE_WRITE)
def save(self, **fields):
"""Save the instance to the remote Transifex server.
If it was pre-populated, it updates the instance on the server,
otherwise it creates a new object.
Any values given in `fields` will be attempted to be saved
on the object. The same goes for any other values already set
to the object by `model_instance.attr = value`.
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
for field in fields:
if field in self.writable_fields:
setattr(self, field, fields[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_WRITE)
if self._populated_fields:
self._update(**self._modified_fields)
else:
self._create(**self._modified_fields)
def delete(self):
"""Delete the instance from the remote Transifex server."""
self._delete()
def _populate(self, **kwargs):
"""Populate the instance with the values from the server."""
self._populated_fields = self._get(**kwargs)
def _get(self, **kwargs):
"""Get the resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.get(path)
def _create(self, **kwargs):
"""Create a resource in the remote Transifex server."""
path = self._construct_path_to_collection()
# Use the fields for which we have values
for field in self.writable_fields:
try:
value = getattr(self, field)
kwargs[field] = value
except AttributeError:
pass
return self._http.post(path, json.dumps(kwargs))
def _update(self, **kwargs):
"""Update a resource in a remote Transifex server."""
path = self._construct_path_to_item()
if not kwargs:
return
return self._http.put(path, json.dumps(kwargs))
def _delete(self, **kwargs):
"""Delete a resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.delete(path)
def _construct_path_to_collection(self):
"""Construct the path to an actual collection."""
template = self.get_path_to_collection_template() # flake8 fix
return template % self.get_url_parameters()
def _construct_path_to_item(self):
"""Construct the path to an actual item."""
return self.get_path_to_item_template() % self.get_url_parameters()
def get_url_parameters(self):
"""Create a dictionary of parameters used in URLs for this model."""
url_fields = {}
for field in self.url_fields:
url_fields[field] = getattr(self, field)
return url_fields
def get_path_to_collection_template(self):
"""The URL to access the collection of the model."""
return self._join_subpaths(self._prefix, self._path_to_collection)
def get_path_to_item_template(self):
"""The URL to access a specific item of the model."""
return self._join_subpaths(self._prefix, self._path_to_item)
def _join_subpaths(self, *args):
"""Join subpaths (given as arguments) to form a
well-defined URL path.
"""
return '/'.join(args).replace('///', '/').replace('//', '/')
@classmethod
def _handle_wrong_field(cls, field_name, field_type):
"""Raise an exception whenever an invalid attribute with
the given name was attempted to be set to or retrieved from
this model class.
Assumes that the given field is invalid, without making any checks.
Also adds an entry to the logs.
"""
if field_type == ATTR_TYPE_READ:
field_type = 'readable'
elif field_type == ATTR_TYPE_WRITE:
field_type = 'writable'
elif field_type == ATTR_TYPE_URL:
field_type = 'URL'
else:
raise AttributeError('Invalid attribute type: {}'.format(
field_type
))
msg = '{} has no {} attribute "{}"'.format(
cls.__name__,
field_type,
field_name
)
_logger.error(msg)
raise AttributeError(msg)
|
transifex/transifex-python-library | txlib/api/base.py | BaseModel.save | python | def save(self, **fields):
for field in fields:
if field in self.writable_fields:
setattr(self, field, fields[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_WRITE)
if self._populated_fields:
self._update(**self._modified_fields)
else:
self._create(**self._modified_fields) | Save the instance to the remote Transifex server.
If it was pre-populated, it updates the instance on the server,
otherwise it creates a new object.
Any values given in `fields` will be attempted to be saved
on the object. The same goes for any other values already set
to the object by `model_instance.attr = value`.
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`, | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/api/base.py#L213-L236 | [
"def _create(self, **kwargs):\n \"\"\"Create a resource in the remote Transifex server.\"\"\"\n path = self._construct_path_to_collection()\n\n # Use the fields for which we have values\n for field in self.writable_fields:\n try:\n value = getattr(self, field)\n kwargs[field] = value\n except AttributeError:\n pass\n return self._http.post(path, json.dumps(kwargs))\n",
"def _update(self, **kwargs):\n \"\"\"Update a resource in a remote Transifex server.\"\"\"\n path = self._construct_path_to_item()\n if not kwargs:\n return\n return self._http.put(path, json.dumps(kwargs))\n",
"def _handle_wrong_field(cls, field_name, field_type):\n \"\"\"Raise an exception whenever an invalid attribute with\n the given name was attempted to be set to or retrieved from\n this model class.\n\n Assumes that the given field is invalid, without making any checks.\n\n Also adds an entry to the logs.\n \"\"\"\n if field_type == ATTR_TYPE_READ:\n field_type = 'readable'\n elif field_type == ATTR_TYPE_WRITE:\n field_type = 'writable'\n elif field_type == ATTR_TYPE_URL:\n field_type = 'URL'\n else:\n raise AttributeError('Invalid attribute type: {}'.format(\n field_type\n ))\n\n msg = '{} has no {} attribute \"{}\"'.format(\n cls.__name__,\n field_type,\n field_name\n )\n _logger.error(msg)\n raise AttributeError(msg)\n",
"def _update(self, **kwargs):\n \"\"\"Use separate URL for updating the source file.\"\"\"\n if 'content' in kwargs:\n content = kwargs.pop('content')\n path = self._construct_path_to_source_content()\n self._http.put(path, json.dumps({'content': content}))\n super(Resource, self)._update(**kwargs)\n",
"def _create(self, **kwargs):\n \"\"\"Create the translation of a resource.\n\n The _create function differentiates from the one in the BaseModel\n in the HTTP request that takes place in the end. In the Translation\n object's case, it needs to be `PUT`, while in the BaseModel is `POST`\n \"\"\"\n path = self._construct_path_to_collection()\n\n # Use the fields for which we have values\n for field in self.writable_fields:\n try:\n value = getattr(self, field)\n kwargs[field] = value\n except AttributeError:\n pass\n return self._http.put(path, json.dumps(kwargs))\n"
] | class BaseModel(object):
"""Base class for Transifex models.
Each model has a list of fields. The model works as a proxy between the
local application and the remote Transifex server.
The user of the class can:
a) retrieve an existing remote instance of the model
by using the static method `MyModel.get(...)`
b) create a new local instance with a set of populated fields
and call `save()` in order to save it to the remote server
c) delete an existing remote instance, by first creating a local instance
and all necessary attributes that identify the object, and then
calling `delete()` on it.
"""
# The URI prefix of all API endpoints for this model
_prefix = ''
# The URI for retrieving a collection of multiple items
# (shouldn't start with a slash)
_path_to_collection = ''
# The URl for retrieving a single item
# (shouldn't start with a slash)
_path_to_item = ''
# All fields defined here will be used for constructing
# the URL of the request
url_fields = set()
# These fields can be modified in POST/PUT requests
writable_fields = set()
# Initially False, set to True when an instance of the class is created
_is_initialized = False
@classmethod
def get(cls, **kwargs):
"""Retrieve an object by making a GET request to Transifex.
Each value in `kwargs` that corresponds to a field
defined in `self.url_fields` will be used in the URL path
of the request, so that a particular entry of this model
is identified and retrieved.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed as kwargs
txlib.http.exceptions.NotFoundError: if the object with these
attributes is not found on the remote server
txlib.http.exceptions.ServerError subclass: depending on
the particular server response
Example:
# Note: also catch exceptions
>>> obj = MyModel.get(attr1=value1, attr2=value2)
"""
fields = {}
for field in cls.url_fields:
value = kwargs.pop(field, None)
if value is None:
cls._handle_wrong_field(field, ATTR_TYPE_URL)
fields[field] = value
# Create an instance of the model class and make the GET request
model = cls(**fields)
model._populate(**kwargs)
return model
def __init__(self, prefix='/api/2/', **url_values):
"""Constructor.
Initializes various variables, setup the HTTP handler and
stores all values
Args:
prefix: The prefix of the urls.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed
"""
self._http = registry.http_handler
self._prefix = prefix
self._modified_fields = {}
self._populated_fields = {}
for field in url_values:
if field in self.url_fields:
setattr(self, field, url_values[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_URL)
# From now on only, only specific attributes can be set
# on this object:
# a) one of the instance variables set above
# b) one of the attributes found in `self.writable_fields`
self._is_initialized = True
def __getattr__(self, name, default=None):
"""Return the value of the field with the given name.
Looks in `self._modified_fields` and `self._populated_fields`.
Raises:
AttributeError: if the requested attribute does not exist
"""
if name in self._modified_fields:
return self._modified_fields[name]
elif name in self._populated_fields:
return self._populated_fields[name]
else:
self._handle_wrong_field(name, ATTR_TYPE_READ)
def __setattr__(self, name, value):
"""Set the value of a field.
This method only allows certain attributes to be set:
a) Any attribute that is defined in `__init__()`
b) Any attribute found in `self.writable_fields`
For the rest it will raise an AttributeError.
For case (a), the attribute is saved directly on this object
For case (b), the attribute is saved in `self.writable_fields`
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
# If __init__() hasn't finished yet, accept anything
if ('_is_initialized' not in self.__dict__) or (name in self.__dict__):
return super(BaseModel, self).__setattr__(name, value)
elif name in self.writable_fields:
self._modified_fields[name] = value
else:
self._handle_wrong_field(name, ATTR_TYPE_WRITE)
def delete(self):
"""Delete the instance from the remote Transifex server."""
self._delete()
def _populate(self, **kwargs):
"""Populate the instance with the values from the server."""
self._populated_fields = self._get(**kwargs)
def _get(self, **kwargs):
"""Get the resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.get(path)
def _create(self, **kwargs):
"""Create a resource in the remote Transifex server."""
path = self._construct_path_to_collection()
# Use the fields for which we have values
for field in self.writable_fields:
try:
value = getattr(self, field)
kwargs[field] = value
except AttributeError:
pass
return self._http.post(path, json.dumps(kwargs))
def _update(self, **kwargs):
"""Update a resource in a remote Transifex server."""
path = self._construct_path_to_item()
if not kwargs:
return
return self._http.put(path, json.dumps(kwargs))
def _delete(self, **kwargs):
"""Delete a resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.delete(path)
def _construct_path_to_collection(self):
"""Construct the path to an actual collection."""
template = self.get_path_to_collection_template() # flake8 fix
return template % self.get_url_parameters()
def _construct_path_to_item(self):
"""Construct the path to an actual item."""
return self.get_path_to_item_template() % self.get_url_parameters()
def get_url_parameters(self):
"""Create a dictionary of parameters used in URLs for this model."""
url_fields = {}
for field in self.url_fields:
url_fields[field] = getattr(self, field)
return url_fields
def get_path_to_collection_template(self):
"""The URL to access the collection of the model."""
return self._join_subpaths(self._prefix, self._path_to_collection)
def get_path_to_item_template(self):
"""The URL to access a specific item of the model."""
return self._join_subpaths(self._prefix, self._path_to_item)
def _join_subpaths(self, *args):
"""Join subpaths (given as arguments) to form a
well-defined URL path.
"""
return '/'.join(args).replace('///', '/').replace('//', '/')
@classmethod
def _handle_wrong_field(cls, field_name, field_type):
"""Raise an exception whenever an invalid attribute with
the given name was attempted to be set to or retrieved from
this model class.
Assumes that the given field is invalid, without making any checks.
Also adds an entry to the logs.
"""
if field_type == ATTR_TYPE_READ:
field_type = 'readable'
elif field_type == ATTR_TYPE_WRITE:
field_type = 'writable'
elif field_type == ATTR_TYPE_URL:
field_type = 'URL'
else:
raise AttributeError('Invalid attribute type: {}'.format(
field_type
))
msg = '{} has no {} attribute "{}"'.format(
cls.__name__,
field_type,
field_name
)
_logger.error(msg)
raise AttributeError(msg)
|
transifex/transifex-python-library | txlib/api/base.py | BaseModel._get | python | def _get(self, **kwargs):
path = self._construct_path_to_item()
return self._http.get(path) | Get the resource from a remote Transifex server. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/api/base.py#L246-L249 | null | class BaseModel(object):
"""Base class for Transifex models.
Each model has a list of fields. The model works as a proxy between the
local application and the remote Transifex server.
The user of the class can:
a) retrieve an existing remote instance of the model
by using the static method `MyModel.get(...)`
b) create a new local instance with a set of populated fields
and call `save()` in order to save it to the remote server
c) delete an existing remote instance, by first creating a local instance
and all necessary attributes that identify the object, and then
calling `delete()` on it.
"""
# The URI prefix of all API endpoints for this model
_prefix = ''
# The URI for retrieving a collection of multiple items
# (shouldn't start with a slash)
_path_to_collection = ''
# The URl for retrieving a single item
# (shouldn't start with a slash)
_path_to_item = ''
# All fields defined here will be used for constructing
# the URL of the request
url_fields = set()
# These fields can be modified in POST/PUT requests
writable_fields = set()
# Initially False, set to True when an instance of the class is created
_is_initialized = False
@classmethod
def get(cls, **kwargs):
"""Retrieve an object by making a GET request to Transifex.
Each value in `kwargs` that corresponds to a field
defined in `self.url_fields` will be used in the URL path
of the request, so that a particular entry of this model
is identified and retrieved.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed as kwargs
txlib.http.exceptions.NotFoundError: if the object with these
attributes is not found on the remote server
txlib.http.exceptions.ServerError subclass: depending on
the particular server response
Example:
# Note: also catch exceptions
>>> obj = MyModel.get(attr1=value1, attr2=value2)
"""
fields = {}
for field in cls.url_fields:
value = kwargs.pop(field, None)
if value is None:
cls._handle_wrong_field(field, ATTR_TYPE_URL)
fields[field] = value
# Create an instance of the model class and make the GET request
model = cls(**fields)
model._populate(**kwargs)
return model
def __init__(self, prefix='/api/2/', **url_values):
"""Constructor.
Initializes various variables, setup the HTTP handler and
stores all values
Args:
prefix: The prefix of the urls.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed
"""
self._http = registry.http_handler
self._prefix = prefix
self._modified_fields = {}
self._populated_fields = {}
for field in url_values:
if field in self.url_fields:
setattr(self, field, url_values[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_URL)
# From now on only, only specific attributes can be set
# on this object:
# a) one of the instance variables set above
# b) one of the attributes found in `self.writable_fields`
self._is_initialized = True
def __getattr__(self, name, default=None):
"""Return the value of the field with the given name.
Looks in `self._modified_fields` and `self._populated_fields`.
Raises:
AttributeError: if the requested attribute does not exist
"""
if name in self._modified_fields:
return self._modified_fields[name]
elif name in self._populated_fields:
return self._populated_fields[name]
else:
self._handle_wrong_field(name, ATTR_TYPE_READ)
def __setattr__(self, name, value):
"""Set the value of a field.
This method only allows certain attributes to be set:
a) Any attribute that is defined in `__init__()`
b) Any attribute found in `self.writable_fields`
For the rest it will raise an AttributeError.
For case (a), the attribute is saved directly on this object
For case (b), the attribute is saved in `self.writable_fields`
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
# If __init__() hasn't finished yet, accept anything
if ('_is_initialized' not in self.__dict__) or (name in self.__dict__):
return super(BaseModel, self).__setattr__(name, value)
elif name in self.writable_fields:
self._modified_fields[name] = value
else:
self._handle_wrong_field(name, ATTR_TYPE_WRITE)
def save(self, **fields):
"""Save the instance to the remote Transifex server.
If it was pre-populated, it updates the instance on the server,
otherwise it creates a new object.
Any values given in `fields` will be attempted to be saved
on the object. The same goes for any other values already set
to the object by `model_instance.attr = value`.
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
for field in fields:
if field in self.writable_fields:
setattr(self, field, fields[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_WRITE)
if self._populated_fields:
self._update(**self._modified_fields)
else:
self._create(**self._modified_fields)
def delete(self):
"""Delete the instance from the remote Transifex server."""
self._delete()
def _populate(self, **kwargs):
"""Populate the instance with the values from the server."""
self._populated_fields = self._get(**kwargs)
def _create(self, **kwargs):
"""Create a resource in the remote Transifex server."""
path = self._construct_path_to_collection()
# Use the fields for which we have values
for field in self.writable_fields:
try:
value = getattr(self, field)
kwargs[field] = value
except AttributeError:
pass
return self._http.post(path, json.dumps(kwargs))
def _update(self, **kwargs):
"""Update a resource in a remote Transifex server."""
path = self._construct_path_to_item()
if not kwargs:
return
return self._http.put(path, json.dumps(kwargs))
def _delete(self, **kwargs):
"""Delete a resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.delete(path)
def _construct_path_to_collection(self):
"""Construct the path to an actual collection."""
template = self.get_path_to_collection_template() # flake8 fix
return template % self.get_url_parameters()
def _construct_path_to_item(self):
"""Construct the path to an actual item."""
return self.get_path_to_item_template() % self.get_url_parameters()
def get_url_parameters(self):
"""Create a dictionary of parameters used in URLs for this model."""
url_fields = {}
for field in self.url_fields:
url_fields[field] = getattr(self, field)
return url_fields
def get_path_to_collection_template(self):
"""The URL to access the collection of the model."""
return self._join_subpaths(self._prefix, self._path_to_collection)
def get_path_to_item_template(self):
"""The URL to access a specific item of the model."""
return self._join_subpaths(self._prefix, self._path_to_item)
def _join_subpaths(self, *args):
"""Join subpaths (given as arguments) to form a
well-defined URL path.
"""
return '/'.join(args).replace('///', '/').replace('//', '/')
@classmethod
def _handle_wrong_field(cls, field_name, field_type):
"""Raise an exception whenever an invalid attribute with
the given name was attempted to be set to or retrieved from
this model class.
Assumes that the given field is invalid, without making any checks.
Also adds an entry to the logs.
"""
if field_type == ATTR_TYPE_READ:
field_type = 'readable'
elif field_type == ATTR_TYPE_WRITE:
field_type = 'writable'
elif field_type == ATTR_TYPE_URL:
field_type = 'URL'
else:
raise AttributeError('Invalid attribute type: {}'.format(
field_type
))
msg = '{} has no {} attribute "{}"'.format(
cls.__name__,
field_type,
field_name
)
_logger.error(msg)
raise AttributeError(msg)
|
transifex/transifex-python-library | txlib/api/base.py | BaseModel._create | python | def _create(self, **kwargs):
path = self._construct_path_to_collection()
# Use the fields for which we have values
for field in self.writable_fields:
try:
value = getattr(self, field)
kwargs[field] = value
except AttributeError:
pass
return self._http.post(path, json.dumps(kwargs)) | Create a resource in the remote Transifex server. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/api/base.py#L251-L262 | [
"def _construct_path_to_collection(self):\n \"\"\"Construct the path to an actual collection.\"\"\"\n template = self.get_path_to_collection_template() # flake8 fix\n return template % self.get_url_parameters()\n"
] | class BaseModel(object):
"""Base class for Transifex models.
Each model has a list of fields. The model works as a proxy between the
local application and the remote Transifex server.
The user of the class can:
a) retrieve an existing remote instance of the model
by using the static method `MyModel.get(...)`
b) create a new local instance with a set of populated fields
and call `save()` in order to save it to the remote server
c) delete an existing remote instance, by first creating a local instance
and all necessary attributes that identify the object, and then
calling `delete()` on it.
"""
# The URI prefix of all API endpoints for this model
_prefix = ''
# The URI for retrieving a collection of multiple items
# (shouldn't start with a slash)
_path_to_collection = ''
# The URl for retrieving a single item
# (shouldn't start with a slash)
_path_to_item = ''
# All fields defined here will be used for constructing
# the URL of the request
url_fields = set()
# These fields can be modified in POST/PUT requests
writable_fields = set()
# Initially False, set to True when an instance of the class is created
_is_initialized = False
@classmethod
def get(cls, **kwargs):
"""Retrieve an object by making a GET request to Transifex.
Each value in `kwargs` that corresponds to a field
defined in `self.url_fields` will be used in the URL path
of the request, so that a particular entry of this model
is identified and retrieved.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed as kwargs
txlib.http.exceptions.NotFoundError: if the object with these
attributes is not found on the remote server
txlib.http.exceptions.ServerError subclass: depending on
the particular server response
Example:
# Note: also catch exceptions
>>> obj = MyModel.get(attr1=value1, attr2=value2)
"""
fields = {}
for field in cls.url_fields:
value = kwargs.pop(field, None)
if value is None:
cls._handle_wrong_field(field, ATTR_TYPE_URL)
fields[field] = value
# Create an instance of the model class and make the GET request
model = cls(**fields)
model._populate(**kwargs)
return model
def __init__(self, prefix='/api/2/', **url_values):
"""Constructor.
Initializes various variables, setup the HTTP handler and
stores all values
Args:
prefix: The prefix of the urls.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed
"""
self._http = registry.http_handler
self._prefix = prefix
self._modified_fields = {}
self._populated_fields = {}
for field in url_values:
if field in self.url_fields:
setattr(self, field, url_values[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_URL)
# From now on only, only specific attributes can be set
# on this object:
# a) one of the instance variables set above
# b) one of the attributes found in `self.writable_fields`
self._is_initialized = True
def __getattr__(self, name, default=None):
"""Return the value of the field with the given name.
Looks in `self._modified_fields` and `self._populated_fields`.
Raises:
AttributeError: if the requested attribute does not exist
"""
if name in self._modified_fields:
return self._modified_fields[name]
elif name in self._populated_fields:
return self._populated_fields[name]
else:
self._handle_wrong_field(name, ATTR_TYPE_READ)
def __setattr__(self, name, value):
"""Set the value of a field.
This method only allows certain attributes to be set:
a) Any attribute that is defined in `__init__()`
b) Any attribute found in `self.writable_fields`
For the rest it will raise an AttributeError.
For case (a), the attribute is saved directly on this object
For case (b), the attribute is saved in `self.writable_fields`
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
# If __init__() hasn't finished yet, accept anything
if ('_is_initialized' not in self.__dict__) or (name in self.__dict__):
return super(BaseModel, self).__setattr__(name, value)
elif name in self.writable_fields:
self._modified_fields[name] = value
else:
self._handle_wrong_field(name, ATTR_TYPE_WRITE)
def save(self, **fields):
"""Save the instance to the remote Transifex server.
If it was pre-populated, it updates the instance on the server,
otherwise it creates a new object.
Any values given in `fields` will be attempted to be saved
on the object. The same goes for any other values already set
to the object by `model_instance.attr = value`.
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
for field in fields:
if field in self.writable_fields:
setattr(self, field, fields[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_WRITE)
if self._populated_fields:
self._update(**self._modified_fields)
else:
self._create(**self._modified_fields)
def delete(self):
"""Delete the instance from the remote Transifex server."""
self._delete()
def _populate(self, **kwargs):
"""Populate the instance with the values from the server."""
self._populated_fields = self._get(**kwargs)
def _get(self, **kwargs):
"""Get the resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.get(path)
def _update(self, **kwargs):
"""Update a resource in a remote Transifex server."""
path = self._construct_path_to_item()
if not kwargs:
return
return self._http.put(path, json.dumps(kwargs))
def _delete(self, **kwargs):
"""Delete a resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.delete(path)
def _construct_path_to_collection(self):
"""Construct the path to an actual collection."""
template = self.get_path_to_collection_template() # flake8 fix
return template % self.get_url_parameters()
def _construct_path_to_item(self):
"""Construct the path to an actual item."""
return self.get_path_to_item_template() % self.get_url_parameters()
def get_url_parameters(self):
"""Create a dictionary of parameters used in URLs for this model."""
url_fields = {}
for field in self.url_fields:
url_fields[field] = getattr(self, field)
return url_fields
def get_path_to_collection_template(self):
"""The URL to access the collection of the model."""
return self._join_subpaths(self._prefix, self._path_to_collection)
def get_path_to_item_template(self):
"""The URL to access a specific item of the model."""
return self._join_subpaths(self._prefix, self._path_to_item)
def _join_subpaths(self, *args):
"""Join subpaths (given as arguments) to form a
well-defined URL path.
"""
return '/'.join(args).replace('///', '/').replace('//', '/')
@classmethod
def _handle_wrong_field(cls, field_name, field_type):
"""Raise an exception whenever an invalid attribute with
the given name was attempted to be set to or retrieved from
this model class.
Assumes that the given field is invalid, without making any checks.
Also adds an entry to the logs.
"""
if field_type == ATTR_TYPE_READ:
field_type = 'readable'
elif field_type == ATTR_TYPE_WRITE:
field_type = 'writable'
elif field_type == ATTR_TYPE_URL:
field_type = 'URL'
else:
raise AttributeError('Invalid attribute type: {}'.format(
field_type
))
msg = '{} has no {} attribute "{}"'.format(
cls.__name__,
field_type,
field_name
)
_logger.error(msg)
raise AttributeError(msg)
|
transifex/transifex-python-library | txlib/api/base.py | BaseModel._update | python | def _update(self, **kwargs):
path = self._construct_path_to_item()
if not kwargs:
return
return self._http.put(path, json.dumps(kwargs)) | Update a resource in a remote Transifex server. | train | https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/api/base.py#L264-L269 | [
"def _construct_path_to_item(self):\n \"\"\"Construct the path to an actual item.\"\"\"\n return self.get_path_to_item_template() % self.get_url_parameters()\n"
] | class BaseModel(object):
"""Base class for Transifex models.
Each model has a list of fields. The model works as a proxy between the
local application and the remote Transifex server.
The user of the class can:
a) retrieve an existing remote instance of the model
by using the static method `MyModel.get(...)`
b) create a new local instance with a set of populated fields
and call `save()` in order to save it to the remote server
c) delete an existing remote instance, by first creating a local instance
and all necessary attributes that identify the object, and then
calling `delete()` on it.
"""
# The URI prefix of all API endpoints for this model
_prefix = ''
# The URI for retrieving a collection of multiple items
# (shouldn't start with a slash)
_path_to_collection = ''
# The URl for retrieving a single item
# (shouldn't start with a slash)
_path_to_item = ''
# All fields defined here will be used for constructing
# the URL of the request
url_fields = set()
# These fields can be modified in POST/PUT requests
writable_fields = set()
# Initially False, set to True when an instance of the class is created
_is_initialized = False
@classmethod
def get(cls, **kwargs):
"""Retrieve an object by making a GET request to Transifex.
Each value in `kwargs` that corresponds to a field
defined in `self.url_fields` will be used in the URL path
of the request, so that a particular entry of this model
is identified and retrieved.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed as kwargs
txlib.http.exceptions.NotFoundError: if the object with these
attributes is not found on the remote server
txlib.http.exceptions.ServerError subclass: depending on
the particular server response
Example:
# Note: also catch exceptions
>>> obj = MyModel.get(attr1=value1, attr2=value2)
"""
fields = {}
for field in cls.url_fields:
value = kwargs.pop(field, None)
if value is None:
cls._handle_wrong_field(field, ATTR_TYPE_URL)
fields[field] = value
# Create an instance of the model class and make the GET request
model = cls(**fields)
model._populate(**kwargs)
return model
def __init__(self, prefix='/api/2/', **url_values):
"""Constructor.
Initializes various variables, setup the HTTP handler and
stores all values
Args:
prefix: The prefix of the urls.
Raises:
AttributeError: if not all values for parameters in `url_fields`
are passed
"""
self._http = registry.http_handler
self._prefix = prefix
self._modified_fields = {}
self._populated_fields = {}
for field in url_values:
if field in self.url_fields:
setattr(self, field, url_values[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_URL)
# From now on only, only specific attributes can be set
# on this object:
# a) one of the instance variables set above
# b) one of the attributes found in `self.writable_fields`
self._is_initialized = True
def __getattr__(self, name, default=None):
"""Return the value of the field with the given name.
Looks in `self._modified_fields` and `self._populated_fields`.
Raises:
AttributeError: if the requested attribute does not exist
"""
if name in self._modified_fields:
return self._modified_fields[name]
elif name in self._populated_fields:
return self._populated_fields[name]
else:
self._handle_wrong_field(name, ATTR_TYPE_READ)
def __setattr__(self, name, value):
"""Set the value of a field.
This method only allows certain attributes to be set:
a) Any attribute that is defined in `__init__()`
b) Any attribute found in `self.writable_fields`
For the rest it will raise an AttributeError.
For case (a), the attribute is saved directly on this object
For case (b), the attribute is saved in `self.writable_fields`
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
# If __init__() hasn't finished yet, accept anything
if ('_is_initialized' not in self.__dict__) or (name in self.__dict__):
return super(BaseModel, self).__setattr__(name, value)
elif name in self.writable_fields:
self._modified_fields[name] = value
else:
self._handle_wrong_field(name, ATTR_TYPE_WRITE)
def save(self, **fields):
"""Save the instance to the remote Transifex server.
If it was pre-populated, it updates the instance on the server,
otherwise it creates a new object.
Any values given in `fields` will be attempted to be saved
on the object. The same goes for any other values already set
to the object by `model_instance.attr = value`.
Raises:
AttributeError: if a given field is not included in
`self.writable_fields`,
"""
for field in fields:
if field in self.writable_fields:
setattr(self, field, fields[field])
else:
self._handle_wrong_field(field, ATTR_TYPE_WRITE)
if self._populated_fields:
self._update(**self._modified_fields)
else:
self._create(**self._modified_fields)
def delete(self):
"""Delete the instance from the remote Transifex server."""
self._delete()
def _populate(self, **kwargs):
"""Populate the instance with the values from the server."""
self._populated_fields = self._get(**kwargs)
def _get(self, **kwargs):
"""Get the resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.get(path)
def _create(self, **kwargs):
"""Create a resource in the remote Transifex server."""
path = self._construct_path_to_collection()
# Use the fields for which we have values
for field in self.writable_fields:
try:
value = getattr(self, field)
kwargs[field] = value
except AttributeError:
pass
return self._http.post(path, json.dumps(kwargs))
def _delete(self, **kwargs):
"""Delete a resource from a remote Transifex server."""
path = self._construct_path_to_item()
return self._http.delete(path)
def _construct_path_to_collection(self):
"""Construct the path to an actual collection."""
template = self.get_path_to_collection_template() # flake8 fix
return template % self.get_url_parameters()
def _construct_path_to_item(self):
"""Construct the path to an actual item."""
return self.get_path_to_item_template() % self.get_url_parameters()
def get_url_parameters(self):
"""Create a dictionary of parameters used in URLs for this model."""
url_fields = {}
for field in self.url_fields:
url_fields[field] = getattr(self, field)
return url_fields
def get_path_to_collection_template(self):
"""The URL to access the collection of the model."""
return self._join_subpaths(self._prefix, self._path_to_collection)
def get_path_to_item_template(self):
"""The URL to access a specific item of the model."""
return self._join_subpaths(self._prefix, self._path_to_item)
def _join_subpaths(self, *args):
"""Join subpaths (given as arguments) to form a
well-defined URL path.
"""
return '/'.join(args).replace('///', '/').replace('//', '/')
@classmethod
def _handle_wrong_field(cls, field_name, field_type):
"""Raise an exception whenever an invalid attribute with
the given name was attempted to be set to or retrieved from
this model class.
Assumes that the given field is invalid, without making any checks.
Also adds an entry to the logs.
"""
if field_type == ATTR_TYPE_READ:
field_type = 'readable'
elif field_type == ATTR_TYPE_WRITE:
field_type = 'writable'
elif field_type == ATTR_TYPE_URL:
field_type = 'URL'
else:
raise AttributeError('Invalid attribute type: {}'.format(
field_type
))
msg = '{} has no {} attribute "{}"'.format(
cls.__name__,
field_type,
field_name
)
_logger.error(msg)
raise AttributeError(msg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.