repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
klmitch/requiem
|
requiem/decorators.py
|
restmethod
|
python
|
def restmethod(method, reluri, *qargs, **headers):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Process the arguments against the original function
argmap, theSelf, req_name = _getcallargs(func, args, kwargs)
# Build the URL
url = _urljoin(theSelf._baseurl, reluri.format(**argmap))
# Build the query string, as needed
if qargs:
query = dict([(k, argmap[k]) for k in qargs
if argmap[k] is not None])
if query:
url += '?%s' % urllib.urlencode(query)
# Build the headers, if needed
hlist = None
if headers:
hlist = hdrs.HeaderDict()
for aname, hname in headers.items():
if argmap[aname]:
hlist[hname] = argmap[aname]
if not hlist:
# If there are no headers, don't send any
hlist = None
# Now, build the request and pass it to the method
argmap[req_name] = theSelf._make_req(method, url,
func.__name__, hlist)
# Call the method
return func(**argmap)
# Return the function wrapper
return wrapper
# Return the actual decorator
return decorator
|
Decorate a method to inject an HTTPRequest.
Generates an HTTPRequest using the given HTTP method and relative
URI. If additional positional arguments are present, they are
expected to be strings that name function arguments that should be
included as the query parameters of the URL. If additional
keyword arguments are present, the keywords are expected to name
function arguments and the values are expected to name headers to
set from those values. The request is injected as the first
function argument after the 'self' argument.
Note that two attributes must exist on the object the method is
called on: the '_baseurl' attribute specifies the URL that reluri
is relative to; and the '_make_req' attribute specifies a method
that instantiates an HTTPRequest from a method and full url (which
will include query arguments).
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/decorators.py#L179-L236
| null |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
import urllib
import urlparse
from requiem import headers as hdrs
__all__ = ['restmethod']
# Custom version of inspect.getcallargs(). We need this because:
#
# 1. inspect.getcallargs() does not exist prior to Python 2.7.
#
# 2. We need to inject an argument for request objects, and make it
# accessible to our caller.
#
# 3. We need to additionally return the object the method is acting
# on. (This function only works on methods.)
#
# Note that this implementation is largely copied straight from Python
# 2.7 inspect.py, with the addition of a few comments and the changes
# in behavior noted above.
def _getcallargs(func, positional, named):
"""Get the mapping of arguments to values.
Generates a dict, with keys being the function argument names
(including the names of the * and ** arguments, if any), and
values the respective bound values from 'positional' and 'named'.
A parameter for the request is injected. Returns a tuple of the
dict, the object the method is being called on, and the name of
the injected request argument.
"""
args, varargs, varkw, defaults = inspect.getargspec(func)
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple parameter
# unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, str):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg, subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg, str):
return arg in arg2value
return arg in assigned_tuple_params
# Inject a place-holder for the request and get the self and the
# req_name
positional = positional[:1] + (None,) + positional[1:]
theSelf = positional[0]
req_name = args[1]
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
# Start with our positional parameters...
for arg, value in zip(args, positional):
assign(arg, value)
# Deal with the variable argument list...
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos-num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
# Exclusion rules on keyword arguments
for arg in args:
if isinstance(arg, str) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
# Fill in any missing values with the defaults
if defaults:
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
# Handle the **names
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
if isinstance(unexpected, unicode):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
# Anything left over?
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
# Return the mapping and the name of the request argument
return arg2value, theSelf, req_name
def _urljoin(left, right):
"""Join two URLs.
Takes URLs specified by left and right and joins them into a
single URL. If right is an absolute URL, it is returned directly.
This differs from urlparse.urljoin() in that the latter always
chops off the left-most component of left unless it is trailed by
'/', which is not the behavior we want.
"""
# Handle the tricky case of right being a full URL
tmp = urlparse.urlparse(right)
if tmp.scheme or tmp.netloc:
# Go ahead and use urlparse.urljoin()
return urlparse.urljoin(left, right)
# Check for slashes
joincond = (left[-1:], right[:1])
if joincond == ('/', '/'):
# Too many, preserve only one
return left + right[1:]
elif '/' in joincond:
# Just one; great!
return left + right
else:
# Not enough; add one
return left + '/' + right
|
klmitch/requiem
|
requiem/client.py
|
RESTClient._debug
|
python
|
def _debug(self, msg, *args, **kwargs):
# Do nothing if debugging is disabled
if self._debug_stream is None or self._debug_stream is False:
return
# What are we passing to the format?
if kwargs:
fmtargs = kwargs
else:
fmtargs = args
# Emit the message
print >>self._debug_stream, msg % fmtargs
|
Emit debugging messages.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/client.py#L68-L82
| null |
class RESTClient(object):
"""Represent a REST client API.
Methods are expected to perform REST calls to a server specified
by a base URL. The @restmethod() decorator helps this process by
passing an additional HTTPRequest object into the method. The
request class to use can be overridden by changing the
'_req_class' class attribute or by overriding the _make_req()
method.
The HTTPRequest object additionally needs an
httplib2.Http-compatible client object, which may be provided by
passing the 'client' keyword argument to the RESTClient
constructor. If no client is provided, a basic one will be
allocated.
"""
_req_class = request.HTTPRequest
def __init__(self, baseurl, headers=None, debug=None, client=None):
"""Initialize a REST client API.
The baseurl specifies the base URL for the REST service. If
provided, headers specifies a dictionary of additional HTTP
headers to set on every request. Beware of name clashes in
the keys of headers; header names are considered in a
case-insensitive manner.
Debugging output can be enabled by passing a stream as the
debug parameter. If True is passed instead, sys.stderr will
be used.
"""
# Initialize an API client
self._baseurl = baseurl
self._headers = hdrs.HeaderDict(headers)
self._debug_stream = sys.stderr if debug is True else debug
self._client = client or httplib2.Http()
self._procstack = processor.ProcessorStack()
def _push_processor(self, proc, index=None):
"""
Pushes a processor onto the processor stack. Processors are
objects with proc_request(), proc_response(), and/or
proc_exception() methods, which can intercept requests,
responses, and exceptions. When a method invokes the send()
method on a request, the proc_request() method on each
processor is called in turn. Likewise, responses are
processed by the proc_response() method of each processor, in
the reverse order of the calls to proc_request(). The
proc_exception() methods are called if an exception is raised
instead of a response being returned.
Note that this method can append a processor to the stack, if
the index parameter is None (the default), or a processor may
be inserted into the stack by specifying an integer index.
For more information about processors, see the
requiem.Processor class.
"""
if index is None:
self._procstack.append(proc)
else:
self._procstack.insert(index, proc)
def _make_req(self, method, url, methname, headers=None):
"""Create a request object for the specified method and url."""
# Build up headers
hset = hdrs.HeaderDict()
# Walk through our global headers
for hdr, value in self._headers.items():
# If it's a callable, call it
if callable(value):
value = value(methname)
else:
# OK, just stringify it
value = str(value)
# If it's meaningful, attach it
if value:
hset[hdr] = value
# Were headers passed in?
if headers is not None:
# Update from specified headers
hset.update(headers)
# Hook method to instantiate requests
self._debug("Creating request %s.%s(%r, %r, headers=%r)",
self._req_class.__module__, self._req_class.__name__,
method, url, hset)
return self._req_class(method, url, self._client, self._procstack,
headers=hset, debug=self._debug)
|
klmitch/requiem
|
requiem/client.py
|
RESTClient._push_processor
|
python
|
def _push_processor(self, proc, index=None):
if index is None:
self._procstack.append(proc)
else:
self._procstack.insert(index, proc)
|
Pushes a processor onto the processor stack. Processors are
objects with proc_request(), proc_response(), and/or
proc_exception() methods, which can intercept requests,
responses, and exceptions. When a method invokes the send()
method on a request, the proc_request() method on each
processor is called in turn. Likewise, responses are
processed by the proc_response() method of each processor, in
the reverse order of the calls to proc_request(). The
proc_exception() methods are called if an exception is raised
instead of a response being returned.
Note that this method can append a processor to the stack, if
the index parameter is None (the default), or a processor may
be inserted into the stack by specifying an integer index.
For more information about processors, see the
requiem.Processor class.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/client.py#L84-L108
| null |
class RESTClient(object):
"""Represent a REST client API.
Methods are expected to perform REST calls to a server specified
by a base URL. The @restmethod() decorator helps this process by
passing an additional HTTPRequest object into the method. The
request class to use can be overridden by changing the
'_req_class' class attribute or by overriding the _make_req()
method.
The HTTPRequest object additionally needs an
httplib2.Http-compatible client object, which may be provided by
passing the 'client' keyword argument to the RESTClient
constructor. If no client is provided, a basic one will be
allocated.
"""
_req_class = request.HTTPRequest
def __init__(self, baseurl, headers=None, debug=None, client=None):
"""Initialize a REST client API.
The baseurl specifies the base URL for the REST service. If
provided, headers specifies a dictionary of additional HTTP
headers to set on every request. Beware of name clashes in
the keys of headers; header names are considered in a
case-insensitive manner.
Debugging output can be enabled by passing a stream as the
debug parameter. If True is passed instead, sys.stderr will
be used.
"""
# Initialize an API client
self._baseurl = baseurl
self._headers = hdrs.HeaderDict(headers)
self._debug_stream = sys.stderr if debug is True else debug
self._client = client or httplib2.Http()
self._procstack = processor.ProcessorStack()
def _debug(self, msg, *args, **kwargs):
"""Emit debugging messages."""
# Do nothing if debugging is disabled
if self._debug_stream is None or self._debug_stream is False:
return
# What are we passing to the format?
if kwargs:
fmtargs = kwargs
else:
fmtargs = args
# Emit the message
print >>self._debug_stream, msg % fmtargs
def _make_req(self, method, url, methname, headers=None):
"""Create a request object for the specified method and url."""
# Build up headers
hset = hdrs.HeaderDict()
# Walk through our global headers
for hdr, value in self._headers.items():
# If it's a callable, call it
if callable(value):
value = value(methname)
else:
# OK, just stringify it
value = str(value)
# If it's meaningful, attach it
if value:
hset[hdr] = value
# Were headers passed in?
if headers is not None:
# Update from specified headers
hset.update(headers)
# Hook method to instantiate requests
self._debug("Creating request %s.%s(%r, %r, headers=%r)",
self._req_class.__module__, self._req_class.__name__,
method, url, hset)
return self._req_class(method, url, self._client, self._procstack,
headers=hset, debug=self._debug)
|
klmitch/requiem
|
requiem/client.py
|
RESTClient._make_req
|
python
|
def _make_req(self, method, url, methname, headers=None):
# Build up headers
hset = hdrs.HeaderDict()
# Walk through our global headers
for hdr, value in self._headers.items():
# If it's a callable, call it
if callable(value):
value = value(methname)
else:
# OK, just stringify it
value = str(value)
# If it's meaningful, attach it
if value:
hset[hdr] = value
# Were headers passed in?
if headers is not None:
# Update from specified headers
hset.update(headers)
# Hook method to instantiate requests
self._debug("Creating request %s.%s(%r, %r, headers=%r)",
self._req_class.__module__, self._req_class.__name__,
method, url, hset)
return self._req_class(method, url, self._client, self._procstack,
headers=hset, debug=self._debug)
|
Create a request object for the specified method and url.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/client.py#L110-L139
| null |
class RESTClient(object):
"""Represent a REST client API.
Methods are expected to perform REST calls to a server specified
by a base URL. The @restmethod() decorator helps this process by
passing an additional HTTPRequest object into the method. The
request class to use can be overridden by changing the
'_req_class' class attribute or by overriding the _make_req()
method.
The HTTPRequest object additionally needs an
httplib2.Http-compatible client object, which may be provided by
passing the 'client' keyword argument to the RESTClient
constructor. If no client is provided, a basic one will be
allocated.
"""
_req_class = request.HTTPRequest
def __init__(self, baseurl, headers=None, debug=None, client=None):
"""Initialize a REST client API.
The baseurl specifies the base URL for the REST service. If
provided, headers specifies a dictionary of additional HTTP
headers to set on every request. Beware of name clashes in
the keys of headers; header names are considered in a
case-insensitive manner.
Debugging output can be enabled by passing a stream as the
debug parameter. If True is passed instead, sys.stderr will
be used.
"""
# Initialize an API client
self._baseurl = baseurl
self._headers = hdrs.HeaderDict(headers)
self._debug_stream = sys.stderr if debug is True else debug
self._client = client or httplib2.Http()
self._procstack = processor.ProcessorStack()
def _debug(self, msg, *args, **kwargs):
"""Emit debugging messages."""
# Do nothing if debugging is disabled
if self._debug_stream is None or self._debug_stream is False:
return
# What are we passing to the format?
if kwargs:
fmtargs = kwargs
else:
fmtargs = args
# Emit the message
print >>self._debug_stream, msg % fmtargs
def _push_processor(self, proc, index=None):
"""
Pushes a processor onto the processor stack. Processors are
objects with proc_request(), proc_response(), and/or
proc_exception() methods, which can intercept requests,
responses, and exceptions. When a method invokes the send()
method on a request, the proc_request() method on each
processor is called in turn. Likewise, responses are
processed by the proc_response() method of each processor, in
the reverse order of the calls to proc_request(). The
proc_exception() methods are called if an exception is raised
instead of a response being returned.
Note that this method can append a processor to the stack, if
the index parameter is None (the default), or a processor may
be inserted into the stack by specifying an integer index.
For more information about processors, see the
requiem.Processor class.
"""
if index is None:
self._procstack.append(proc)
else:
self._procstack.insert(index, proc)
|
rocky/python-filecache
|
pyficache/main.py
|
pyc2py
|
python
|
def pyc2py(filename):
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
|
Find corresponding .py name given a .pyc or .pyo
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L94-L105
| null |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
clear_file_cache
|
python
|
def clear_file_cache(filename=None):
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
|
Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename.
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L198-L211
| null |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
clear_file_format_cache
|
python
|
def clear_file_format_cache():
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
|
Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked.
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L213-L224
| null |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
checkcache
|
python
|
def checkcache(filename=None, opts=False):
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
|
Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached.
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L230-L267
| null |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
cache_script
|
python
|
def cache_script(script, text, opts={}):
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
|
Cache script if it is not already cached.
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L269-L275
|
[
"def update_script_cache(script, text, opts={}):\n \"\"\"Cache script if it is not already cached.\"\"\"\n global script_cache\n if script not in script_cache:\n script_cache[script] = text\n return script\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
cache_file
|
python
|
def cache_file(filename, reload_on_change=False, opts=default_opts):
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return
|
Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file.
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L292-L307
|
[
"def checkcache(filename=None, opts=False):\n \"\"\"Discard cache entries that are out of date. If *filename* is *None*\n all entries in the file cache *file_cache* are checked. If we do not\n have stat information about a file it will be kept. Return a list of\n invalidated filenames. None is returned if a filename was given but\n not found cached.\"\"\"\n\n if isinstance(opts, dict):\n use_linecache_lines = opts['use_linecache_lines']\n else:\n use_linecache_lines = opts\n pass\n\n if not filename:\n filenames = list(file_cache.keys())\n elif filename in file_cache:\n filenames = [filename]\n else:\n return None\n\n result = []\n for filename in filenames:\n if filename not in file_cache: continue\n path = file_cache[filename].path\n if os.path.exists(path):\n cache_info = file_cache[filename].stat\n stat = os.stat(path)\n if stat and \\\n (cache_info.st_size != stat.st_size or\n cache_info.st_mtime != stat.st_mtime):\n result.append(filename)\n update_cache(filename, use_linecache_lines)\n else:\n result.append(filename)\n update_cache(filename)\n pass\n pass\n return result\n",
"def pyc2py(filename):\n \"\"\"\n Find corresponding .py name given a .pyc or .pyo\n \"\"\"\n if re.match(\".*py[co]$\", filename):\n if PYTHON3:\n return re.sub(r'(.*)__pycache__/(.+)\\.cpython-%s.py[co]$' % PYVER,\n '\\\\1\\\\2.py',\n filename)\n else:\n return filename[:-1]\n return filename\n",
"def update_cache(filename, opts=default_opts, module_globals=None):\n \"\"\"Update a cache entry. If something is wrong, return\n *None*. Return *True* if the cache was updated and *False* if not. If\n *use_linecache_lines* is *True*, use an existing cache entry as source\n for the lines of the file.\"\"\"\n\n if not filename: return None\n\n orig_filename = filename\n filename = pyc2py(filename)\n if filename in file_cache: del file_cache[filename]\n path = os.path.abspath(filename)\n stat = None\n if get_option('use_linecache_lines', opts):\n fname_list = [filename]\n mapped_path = file2file_remap.get(path)\n if mapped_path:\n fname_list.append(mapped_path)\n for filename in fname_list:\n try:\n stat = os.stat(filename)\n plain_lines = linecache.getlines(filename)\n trailing_nl = has_trailing_nl(plain_lines[-1])\n lines = {\n 'plain' : plain_lines,\n }\n file_cache[filename] = LineCacheInfo(stat, None, lines,\n path, None)\n except:\n pass\n pass\n if orig_filename != filename:\n file2file_remap[orig_filename] = filename\n file2file_remap[os.path.abspath(orig_filename)] = filename\n pass\n file2file_remap[path] = filename\n return filename\n pass\n pass\n\n if os.path.exists(path):\n stat = os.stat(path)\n elif module_globals and '__loader__' in module_globals:\n name = module_globals.get('__name__')\n loader = module_globals['__loader__']\n get_source = getattr(loader, 'get_source', None)\n if name and get_source:\n try:\n data = get_source(name)\n except (ImportError, IOError):\n pass\n else:\n if data is None:\n # No luck, the PEP302 loader cannot find the source\n # for this module.\n return None\n # FIXME: DRY with code below\n lines = {'plain' : data.splitlines()}\n raw_string = ''.join(lines['plain'])\n trailing_nl = has_trailing_nl(raw_string)\n if 'style' in opts:\n key = opts['style']\n highlight_opts = {'style': key}\n else:\n key = 'terminal'\n highlight_opts = {}\n\n lines[key] = highlight_array(raw_string.split('\\n'),\n trailing_nl, **highlight_opts)\n file_cache[filename] = \\\n LineCacheInfo(None, None, lines, filename, None)\n file2file_remap[path] = filename\n return True\n pass\n pass\n if not os.path.isabs(filename):\n # Try looking through the module search path, which is only useful\n # when handling a relative filename.\n stat = None\n for dirname in sys.path:\n path = os.path.join(dirname, filename)\n if os.path.exists(path):\n stat = os.stat(path)\n break\n pass\n if not stat: return False\n pass\n\n try:\n mode = 'r' if PYTHON3 else 'rU'\n with open(path, mode) as fp:\n lines = {'plain' : fp.readlines()}\n eols = fp.newlines\n except:\n return None\n\n # FIXME: DRY with code above\n raw_string = ''.join(lines['plain'])\n trailing_nl = has_trailing_nl(raw_string)\n if 'style' in opts:\n key = opts['style'] or 'default'\n highlight_opts = {'style': key}\n else:\n key = 'terminal'\n highlight_opts = {}\n\n lines[key] = highlight_array(raw_string.split('\\n'),\n trailing_nl, **highlight_opts)\n if orig_filename != filename:\n file2file_remap[orig_filename] = filename\n file2file_remap[os.path.abspath(orig_filename)] = filename\n pass\n pass\n\n file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)\n file2file_remap[path] = filename\n return True\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
# Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
is_cached
|
python
|
def is_cached(file_or_script):
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
|
Return True if file_or_script is cached
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L309-L315
|
[
"def is_cached_script(filename):\n return unmap_file(filename) in list(script_cache.keys())\n",
"def unmap_file(filename):\n # FIXME: this is wrong?\n return file2file_remap.get(filename, filename)\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
getline
|
python
|
def getline(file_or_script, line_number, opts=default_opts):
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return
|
Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L324-L344
|
[
"def getlines(filename, opts=default_opts):\n \"\"\"Read lines of *filename* and cache the results. However, if\n *filename* was previously cached use the results from the\n cache. Return *None* if we can not get lines\n \"\"\"\n if get_option('reload_on_change', opts): checkcache(filename)\n fmt = get_option('output', opts)\n highlight_opts = {'bg': fmt}\n cs = opts.get('style')\n\n # Colorstyle of Terminal255Formatter takes precidence over\n # light/dark colorthemes of TerminalFormatter\n if cs:\n highlight_opts['style'] = cs\n fmt = cs\n\n if filename not in file_cache:\n update_cache(filename, opts)\n filename = pyc2py(filename)\n if filename not in file_cache: return None\n pass\n lines = file_cache[filename].lines\n if fmt not in lines.keys():\n lines[fmt] = highlight_array(lines['plain'], **highlight_opts)\n pass\n return lines[fmt]\n",
"def maxline(filename, use_cache_only=False):\n \"\"\"Return the maximum line number filename after taking into account\n line remapping. If no remapping then this is the same as size\"\"\"\n if filename not in file2file_remap_lines:\n return size(filename, use_cache_only)\n max_lineno = -1\n remap_line_entry = file2file_remap_lines.get(filename)\n if not remap_line_entry:\n return size(filename, use_cache_only)\n for t in remap_line_entry.from_to_pairs:\n max_lineno = max(max_lineno, t[1])\n if max_lineno == -1:\n return size(filename, use_cache_only)\n else:\n return max_lineno\n",
"def unmap_file(filename):\n # FIXME: this is wrong?\n return file2file_remap.get(filename, filename)\n",
"def unmap_file_line(filename, line_number, reverse=False):\n remap_line_entry = file2file_remap_lines.get(filename)\n mapped_line_number = line_number\n if remap_line_entry:\n filename = remap_line_entry.mapped_path\n cache_entry = file_cache.get(filename, None)\n if cache_entry:\n line_max = maxline(filename)\n else:\n line_max = large_int\n last_t = (1, 1)\n # FIXME: use binary search\n # Note we assume assume from_line is increasing.\n # Add sentinel at end of from pairs to handle using the final\n # entry for line numbers greater than it.\n # Find the closest mapped line number equal or before line_number.\n for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):\n if reverse:\n t = list(reversed(t))\n if t[1] == line_number:\n mapped_line_number = t[0]\n break\n elif t[1] > line_number:\n mapped_line_number = last_t[0] + (line_number - last_t[1] )\n break\n last_t = t\n pass\n return (filename, mapped_line_number)\n",
"def get_option(key, options):\n global default_opts\n if not options or key not in options:\n return default_opts.get(key)\n else:\n return options[key]\n return None # Not reached\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
# Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
getlines
|
python
|
def getlines(filename, opts=default_opts):
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
|
Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L346-L371
|
[
"def checkcache(filename=None, opts=False):\n \"\"\"Discard cache entries that are out of date. If *filename* is *None*\n all entries in the file cache *file_cache* are checked. If we do not\n have stat information about a file it will be kept. Return a list of\n invalidated filenames. None is returned if a filename was given but\n not found cached.\"\"\"\n\n if isinstance(opts, dict):\n use_linecache_lines = opts['use_linecache_lines']\n else:\n use_linecache_lines = opts\n pass\n\n if not filename:\n filenames = list(file_cache.keys())\n elif filename in file_cache:\n filenames = [filename]\n else:\n return None\n\n result = []\n for filename in filenames:\n if filename not in file_cache: continue\n path = file_cache[filename].path\n if os.path.exists(path):\n cache_info = file_cache[filename].stat\n stat = os.stat(path)\n if stat and \\\n (cache_info.st_size != stat.st_size or\n cache_info.st_mtime != stat.st_mtime):\n result.append(filename)\n update_cache(filename, use_linecache_lines)\n else:\n result.append(filename)\n update_cache(filename)\n pass\n pass\n return result\n",
"def highlight_array(array, trailing_nl=True,\n bg='light', **options):\n fmt_array = highlight_string(''.join(array),\n bg, **options).split('\\n')\n lines = [ line + \"\\n\" for line in fmt_array ]\n if not trailing_nl: lines[-1] = lines[-1].rstrip('\\n')\n return lines\n",
"def pyc2py(filename):\n \"\"\"\n Find corresponding .py name given a .pyc or .pyo\n \"\"\"\n if re.match(\".*py[co]$\", filename):\n if PYTHON3:\n return re.sub(r'(.*)__pycache__/(.+)\\.cpython-%s.py[co]$' % PYVER,\n '\\\\1\\\\2.py',\n filename)\n else:\n return filename[:-1]\n return filename\n",
"def update_cache(filename, opts=default_opts, module_globals=None):\n \"\"\"Update a cache entry. If something is wrong, return\n *None*. Return *True* if the cache was updated and *False* if not. If\n *use_linecache_lines* is *True*, use an existing cache entry as source\n for the lines of the file.\"\"\"\n\n if not filename: return None\n\n orig_filename = filename\n filename = pyc2py(filename)\n if filename in file_cache: del file_cache[filename]\n path = os.path.abspath(filename)\n stat = None\n if get_option('use_linecache_lines', opts):\n fname_list = [filename]\n mapped_path = file2file_remap.get(path)\n if mapped_path:\n fname_list.append(mapped_path)\n for filename in fname_list:\n try:\n stat = os.stat(filename)\n plain_lines = linecache.getlines(filename)\n trailing_nl = has_trailing_nl(plain_lines[-1])\n lines = {\n 'plain' : plain_lines,\n }\n file_cache[filename] = LineCacheInfo(stat, None, lines,\n path, None)\n except:\n pass\n pass\n if orig_filename != filename:\n file2file_remap[orig_filename] = filename\n file2file_remap[os.path.abspath(orig_filename)] = filename\n pass\n file2file_remap[path] = filename\n return filename\n pass\n pass\n\n if os.path.exists(path):\n stat = os.stat(path)\n elif module_globals and '__loader__' in module_globals:\n name = module_globals.get('__name__')\n loader = module_globals['__loader__']\n get_source = getattr(loader, 'get_source', None)\n if name and get_source:\n try:\n data = get_source(name)\n except (ImportError, IOError):\n pass\n else:\n if data is None:\n # No luck, the PEP302 loader cannot find the source\n # for this module.\n return None\n # FIXME: DRY with code below\n lines = {'plain' : data.splitlines()}\n raw_string = ''.join(lines['plain'])\n trailing_nl = has_trailing_nl(raw_string)\n if 'style' in opts:\n key = opts['style']\n highlight_opts = {'style': key}\n else:\n key = 'terminal'\n highlight_opts = {}\n\n lines[key] = highlight_array(raw_string.split('\\n'),\n trailing_nl, **highlight_opts)\n file_cache[filename] = \\\n LineCacheInfo(None, None, lines, filename, None)\n file2file_remap[path] = filename\n return True\n pass\n pass\n if not os.path.isabs(filename):\n # Try looking through the module search path, which is only useful\n # when handling a relative filename.\n stat = None\n for dirname in sys.path:\n path = os.path.join(dirname, filename)\n if os.path.exists(path):\n stat = os.stat(path)\n break\n pass\n if not stat: return False\n pass\n\n try:\n mode = 'r' if PYTHON3 else 'rU'\n with open(path, mode) as fp:\n lines = {'plain' : fp.readlines()}\n eols = fp.newlines\n except:\n return None\n\n # FIXME: DRY with code above\n raw_string = ''.join(lines['plain'])\n trailing_nl = has_trailing_nl(raw_string)\n if 'style' in opts:\n key = opts['style'] or 'default'\n highlight_opts = {'style': key}\n else:\n key = 'terminal'\n highlight_opts = {}\n\n lines[key] = highlight_array(raw_string.split('\\n'),\n trailing_nl, **highlight_opts)\n if orig_filename != filename:\n file2file_remap[orig_filename] = filename\n file2file_remap[os.path.abspath(orig_filename)] = filename\n pass\n pass\n\n file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)\n file2file_remap[path] = filename\n return True\n",
"def get_option(key, options):\n global default_opts\n if not options or key not in options:\n return default_opts.get(key)\n else:\n return options[key]\n return None # Not reached\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
path
|
python
|
def path(filename):
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
|
Return full filename path for filename
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L407-L412
|
[
"def unmap_file(filename):\n # FIXME: this is wrong?\n return file2file_remap.get(filename, filename)\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
remap_file_lines
|
python
|
def remap_file_lines(from_path, to_path, line_map_list):
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
|
Adds line_map list to the list of association of from_file to
to to_file
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L419-L434
|
[
"def cache_file(filename, reload_on_change=False, opts=default_opts):\n \"\"\"Cache filename if it is not already cached.\n Return the expanded filename for it in the cache\n or nil if we can not find the file.\"\"\"\n filename = pyc2py(filename)\n if filename in file_cache:\n if reload_on_change: checkcache(filename)\n pass\n else:\n opts['use_linecache_lines'] = True\n update_cache(filename, opts)\n pass\n if filename in file_cache:\n return file_cache[filename].path\n else: return None\n return # Not reached\n",
"def pyc2py(filename):\n \"\"\"\n Find corresponding .py name given a .pyc or .pyo\n \"\"\"\n if re.match(\".*py[co]$\", filename):\n if PYTHON3:\n return re.sub(r'(.*)__pycache__/(.+)\\.cpython-%s.py[co]$' % PYVER,\n '\\\\1\\\\2.py',\n filename)\n else:\n return filename[:-1]\n return filename\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
remove_remap_file
|
python
|
def remove_remap_file(filename):
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
|
Remove any mapping for *filename* and return that if it exists
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L436-L443
| null |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
sha1
|
python
|
def sha1(filename):
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
|
Return SHA1 of filename.
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L445-L460
|
[
"def cache_file(filename, reload_on_change=False, opts=default_opts):\n \"\"\"Cache filename if it is not already cached.\n Return the expanded filename for it in the cache\n or nil if we can not find the file.\"\"\"\n filename = pyc2py(filename)\n if filename in file_cache:\n if reload_on_change: checkcache(filename)\n pass\n else:\n opts['use_linecache_lines'] = True\n update_cache(filename, opts)\n pass\n if filename in file_cache:\n return file_cache[filename].path\n else: return None\n return # Not reached\n",
"def unmap_file(filename):\n # FIXME: this is wrong?\n return file2file_remap.get(filename, filename)\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
size
|
python
|
def size(filename, use_cache_only=False):
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
|
Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached.
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L462-L471
|
[
"def cache_file(filename, reload_on_change=False, opts=default_opts):\n \"\"\"Cache filename if it is not already cached.\n Return the expanded filename for it in the cache\n or nil if we can not find the file.\"\"\"\n filename = pyc2py(filename)\n if filename in file_cache:\n if reload_on_change: checkcache(filename)\n pass\n else:\n opts['use_linecache_lines'] = True\n update_cache(filename, opts)\n pass\n if filename in file_cache:\n return file_cache[filename].path\n else: return None\n return # Not reached\n",
"def unmap_file(filename):\n # FIXME: this is wrong?\n return file2file_remap.get(filename, filename)\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
maxline
|
python
|
def maxline(filename, use_cache_only=False):
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
|
Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L473-L487
|
[
"def size(filename, use_cache_only=False):\n \"\"\"Return the number of lines in filename. If `use_cache_only' is False,\n we'll try to fetch the file if it is not cached.\"\"\"\n filename = unmap_file(filename)\n if filename not in file_cache:\n if not use_cache_only: cache_file(filename)\n if filename not in file_cache:\n return None\n pass\n return len(file_cache[filename].lines['plain'])\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
stat
|
python
|
def stat(filename, use_cache_only=False):
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
|
Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached.
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L489-L498
|
[
"def cache_file(filename, reload_on_change=False, opts=default_opts):\n \"\"\"Cache filename if it is not already cached.\n Return the expanded filename for it in the cache\n or nil if we can not find the file.\"\"\"\n filename = pyc2py(filename)\n if filename in file_cache:\n if reload_on_change: checkcache(filename)\n pass\n else:\n opts['use_linecache_lines'] = True\n update_cache(filename, opts)\n pass\n if filename in file_cache:\n return file_cache[filename].path\n else: return None\n return # Not reached\n",
"def pyc2py(filename):\n \"\"\"\n Find corresponding .py name given a .pyc or .pyo\n \"\"\"\n if re.match(\".*py[co]$\", filename):\n if PYTHON3:\n return re.sub(r'(.*)__pycache__/(.+)\\.cpython-%s.py[co]$' % PYVER,\n '\\\\1\\\\2.py',\n filename)\n else:\n return filename[:-1]\n return filename\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
trace_line_numbers
|
python
|
def trace_line_numbers(filename, reload_on_change=False):
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
|
Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once.
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L500-L517
|
[
"def cache_file(filename, reload_on_change=False, opts=default_opts):\n \"\"\"Cache filename if it is not already cached.\n Return the expanded filename for it in the cache\n or nil if we can not find the file.\"\"\"\n filename = pyc2py(filename)\n if filename in file_cache:\n if reload_on_change: checkcache(filename)\n pass\n else:\n opts['use_linecache_lines'] = True\n update_cache(filename, opts)\n pass\n if filename in file_cache:\n return file_cache[filename].path\n else: return None\n return # Not reached\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
def update_cache(filename, opts=default_opts, module_globals=None):
"""Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file."""
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
rocky/python-filecache
|
pyficache/main.py
|
update_cache
|
python
|
def update_cache(filename, opts=default_opts, module_globals=None):
if not filename: return None
orig_filename = filename
filename = pyc2py(filename)
if filename in file_cache: del file_cache[filename]
path = os.path.abspath(filename)
stat = None
if get_option('use_linecache_lines', opts):
fname_list = [filename]
mapped_path = file2file_remap.get(path)
if mapped_path:
fname_list.append(mapped_path)
for filename in fname_list:
try:
stat = os.stat(filename)
plain_lines = linecache.getlines(filename)
trailing_nl = has_trailing_nl(plain_lines[-1])
lines = {
'plain' : plain_lines,
}
file_cache[filename] = LineCacheInfo(stat, None, lines,
path, None)
except:
pass
pass
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
file2file_remap[path] = filename
return filename
pass
pass
if os.path.exists(path):
stat = os.stat(path)
elif module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return None
# FIXME: DRY with code below
lines = {'plain' : data.splitlines()}
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style']
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
file_cache[filename] = \
LineCacheInfo(None, None, lines, filename, None)
file2file_remap[path] = filename
return True
pass
pass
if not os.path.isabs(filename):
# Try looking through the module search path, which is only useful
# when handling a relative filename.
stat = None
for dirname in sys.path:
path = os.path.join(dirname, filename)
if os.path.exists(path):
stat = os.stat(path)
break
pass
if not stat: return False
pass
try:
mode = 'r' if PYTHON3 else 'rU'
with open(path, mode) as fp:
lines = {'plain' : fp.readlines()}
eols = fp.newlines
except:
return None
# FIXME: DRY with code above
raw_string = ''.join(lines['plain'])
trailing_nl = has_trailing_nl(raw_string)
if 'style' in opts:
key = opts['style'] or 'default'
highlight_opts = {'style': key}
else:
key = 'terminal'
highlight_opts = {}
lines[key] = highlight_array(raw_string.split('\n'),
trailing_nl, **highlight_opts)
if orig_filename != filename:
file2file_remap[orig_filename] = filename
file2file_remap[os.path.abspath(orig_filename)] = filename
pass
pass
file_cache[filename] = LineCacheInfo(stat, None, lines, path, None, eols)
file2file_remap[path] = filename
return True
|
Update a cache entry. If something is wrong, return
*None*. Return *True* if the cache was updated and *False* if not. If
*use_linecache_lines* is *True*, use an existing cache entry as source
for the lines of the file.
|
train
|
https://github.com/rocky/python-filecache/blob/60709ccd837ef5df001faf3cb02d4979ba342a23/pyficache/main.py#L560-L676
|
[
"def highlight_array(array, trailing_nl=True,\n bg='light', **options):\n fmt_array = highlight_string(''.join(array),\n bg, **options).split('\\n')\n lines = [ line + \"\\n\" for line in fmt_array ]\n if not trailing_nl: lines[-1] = lines[-1].rstrip('\\n')\n return lines\n",
"def pyc2py(filename):\n \"\"\"\n Find corresponding .py name given a .pyc or .pyo\n \"\"\"\n if re.match(\".*py[co]$\", filename):\n if PYTHON3:\n return re.sub(r'(.*)__pycache__/(.+)\\.cpython-%s.py[co]$' % PYVER,\n '\\\\1\\\\2.py',\n filename)\n else:\n return filename[:-1]\n return filename\n",
"def get_option(key, options):\n global default_opts\n if not options or key not in options:\n return default_opts.get(key)\n else:\n return options[key]\n return None # Not reached\n",
"def has_trailing_nl(string):\n return len(string) > 0 and '\\n' == string[-1]\n"
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2012-2013, 2015-2016, 2018
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Read and cache lines of a Python program.
Module to get line from any file, caching lines of the file on
first access to the file. Although the file may be any file, this
package is more tailored to the case where the file is a Python script.
Synopsis
--------
import pyficache
filename = __file__ # e.g. '/tmp/myprogram'
# return all lines of filename as an array
lines = pyficache.getlines(filename)
# return line 6, and reload all lines if the file has changed.
line = pyficache.getline(filename, 6, {'reload_on_change': True})
# return line 6 syntax highlighted via pygments using style 'emacs'
line = pyficache.getline(filename, 6, {'style': 'emacs'})
pyficache.remap_file('/tmp/myprogram.py', 'another-name')
line_from_alias = pyficache.getline('another-name', 6)
assert __file__, pyficache.remove_remap_file('another-name')
# another-name is no longer an alias for /tmp/myprogram
assert None, pyficache.remove_remap_file('another-name')
# Clear cache for __file__
pyficache.clear_file_cache(__file__)
# Clear all cached files.
pyficache.clear_file_cache()
# Check for modifications of all cached files.
pyficache.update_cache()
"""
import coverage, hashlib, linecache, os, re, sys
from collections import namedtuple
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter
PYTHON3 = (sys.version_info >= (3, 0))
PYVER = "%s%s" % sys.version_info[0:2]
if PYTHON3:
large_int = sys.maxsize
else:
large_int = sys.maxint
default_opts = {
'reload_on_change' : False, # Check if file has changed since last
# time
'use_linecache_lines' : True,
'strip_nl' : True, # Strip trailing \n on line returned
'output' : 'plain' # To we want plain output?
# Set to 'terminal'
# for terminal syntax-colored output
}
def get_option(key, options):
global default_opts
if not options or key not in options:
return default_opts.get(key)
else:
return options[key]
return None # Not reached
def has_trailing_nl(string):
return len(string) > 0 and '\n' == string[-1]
def pyc2py(filename):
"""
Find corresponding .py name given a .pyc or .pyo
"""
if re.match(".*py[co]$", filename):
if PYTHON3:
return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER,
'\\1\\2.py',
filename)
else:
return filename[:-1]
return filename
class LineCacheInfo:
def __init__(self, stat, line_numbers, lines, path, sha1, eols=None):
self.stat, self.lines, self.path, self.sha1 = (stat, lines, path, sha1)
self.line_numbers = line_numbers
self.eols = eols
return
pass
# The file cache. The key is a name as would be given by co_filename
# or __file__. The value is a LineCacheInfo object.
file_cache = {}
script_cache = {}
# `file2file_remap` maps a path (a string) to another path key in file_cache (a
# string).
#
# One important use of file2file_remap is mapping the a full path of a
# file into the name stored in file_cache or given by a Python
# __file__. Applications such as those that get input from users, may
# want to canonicalize the path before looking it up. This map gives a
# way to canonicalize.
#
file2file_remap = {}
# `file2file_remap_lines` goes further than `file2file_remap` and
# allows a path and line number to another path and line number.
#
# One use of this is in translation systems where Python is embedded
# in some other source code. Or perhaps you used uncompyle6 with
# the --linemap option to recreate the source code and want to
# keep the associations between the line numbers in the code
# with line numbers is the recreated source.
#
# The key of `file2file_remap_lines` is the name of the path as Python
# sees it. For example it would be the name that would be found inside a code
# object co_filename. The value of the dictionary is a RemapLineEntry
# described below
file2file_remap_lines = {}
# `RemapLineEntry` is an entry in file2file_remap_lines with fields:
#
# * `mapped_path` is # the name of the source path.
# For example this might not be a Python file
# per se, but the thing from which Python was extracted from.
#
# * `from_to_pairs` is a tuple of integer pairs. For each pair, the first
# item is a line number in as Python sees it. The second item is the
# line number in corresponding mapped_path. The the first entry of the
# pair should always increase from the previous value. The second entry
# doesn't have to, although in practice it will.
RemapLineEntry = namedtuple("RemapLineEntry", 'mapped_path from_to_pairs')
# Example. File "unmapped.template" contains:
# x = 1; y = 2 # line 1
# # a comment # line 2
# # another comment # line 3
# z = 4 # line 4
# a=5 # line 5
# File "mapped_file.py" contains:
# # This file was recreated from foo.template # line 1
# # and is reformatted according to PEP8 # line 2
# x = 1 # line 3
# y = 2 # line 4
# z = 4 # line 5
# a = 5 # line 6
# file2file_remap_lines = {
# 'foo.template = RemapLineEntry("mapped_file.py", ((1, 3), (4, 5)))
# }
# In this example, line 1 of foo.template corresponds to line 3 of
# mapped_file.py. There is no line recorded in foo.template that corresponds
# to line 4 of mapped_file. Line 4 of foo.template corresponds to line 5 of
# mapped_file.py. And line 5 of foo.template implicitly corresponds to line 6
# of mapped_file.py since there is nothing to indicate contrary and since that
# line exists in mapped_file.
# Note that this scheme allows the possibility that several co_filenames can be
# mapped to a single file. So a templating system could break a single template
# into several Python files and we can track that. But we not the other way
# around. That is we don't support tracking several templated files which got
# built into a single Python module.
# At such time as the need arises, we will work this.
def clear_file_cache(filename=None):
"""Clear the file cache. If no filename is given clear it entirely.
if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines
if filename is not None:
if filename in file_cache:
del file_cache[filename]
pass
else:
file_cache = {}
file2file_remap = {}
file2file_remap_lines = {}
pass
return
def clear_file_format_cache():
"""Remove syntax-formatted lines in the cache. Use this
when you change the Pygments syntax or Token formatting
and want to redo how files may have previously been
syntax marked."""
for fname, cache_info in file_cache.items():
for format, lines in cache_info.lines.items():
if 'plain' == format: continue
file_cache[fname].lines[format] = None
pass
pass
pass
def cached_files():
"""Return an array of cached file names"""
return list(file_cache.keys())
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result
def cache_script(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
update_script_cache(script, text, opts)
pass
return script
def uncache_script(script, opts={}):
"""remove script from cache."""
global script_cache
if script in script_cache:
del script_cache[script]
return script
return None
def update_script_cache(script, text, opts={}):
"""Cache script if it is not already cached."""
global script_cache
if script not in script_cache:
script_cache[script] = text
return script
def cache_file(filename, reload_on_change=False, opts=default_opts):
"""Cache filename if it is not already cached.
Return the expanded filename for it in the cache
or nil if we can not find the file."""
filename = pyc2py(filename)
if filename in file_cache:
if reload_on_change: checkcache(filename)
pass
else:
opts['use_linecache_lines'] = True
update_cache(filename, opts)
pass
if filename in file_cache:
return file_cache[filename].path
else: return None
return # Not reached
def is_cached(file_or_script):
"""Return True if file_or_script is cached"""
if isinstance(file_or_script, str):
return unmap_file(file_or_script) in file_cache
else:
return is_cached_script(file_or_script)
return
def is_cached_script(filename):
return unmap_file(filename) in list(script_cache.keys())
def is_empty(filename):
filename=unmap_file(filename)
return 0 == len(file_cache[filename].lines['plain'])
def getline(file_or_script, line_number, opts=default_opts):
"""Get line *line_number* from file named *file_or_script*. Return None if
there was a problem or it is not found.
Example:
lines = pyficache.getline("/tmp/myfile.py")
"""
filename = unmap_file(file_or_script)
filename, line_number = unmap_file_line(filename, line_number)
lines = getlines(filename, opts)
if lines and line_number >=1 and line_number <= maxline(filename):
line = lines[line_number-1]
if get_option('strip_nl', opts):
return line.rstrip('\n')
else:
return line
pass
else:
return None
return # Not reached
def getlines(filename, opts=default_opts):
"""Read lines of *filename* and cache the results. However, if
*filename* was previously cached use the results from the
cache. Return *None* if we can not get lines
"""
if get_option('reload_on_change', opts): checkcache(filename)
fmt = get_option('output', opts)
highlight_opts = {'bg': fmt}
cs = opts.get('style')
# Colorstyle of Terminal255Formatter takes precidence over
# light/dark colorthemes of TerminalFormatter
if cs:
highlight_opts['style'] = cs
fmt = cs
if filename not in file_cache:
update_cache(filename, opts)
filename = pyc2py(filename)
if filename not in file_cache: return None
pass
lines = file_cache[filename].lines
if fmt not in lines.keys():
lines[fmt] = highlight_array(lines['plain'], **highlight_opts)
pass
return lines[fmt]
def highlight_array(array, trailing_nl=True,
bg='light', **options):
fmt_array = highlight_string(''.join(array),
bg, **options).split('\n')
lines = [ line + "\n" for line in fmt_array ]
if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
return lines
python_lexer = PythonLexer()
# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE. Ugh
dark_terminal_formatter = TerminalFormatter(bg = 'dark')
light_terminal_formatter = TerminalFormatter(bg = 'light')
terminal_256_formatter = Terminal256Formatter()
def highlight_string(string, bg='light', **options):
global terminal_256_formatter
if options.get('style'):
if terminal_256_formatter.style != options['style']:
terminal_256_formatter = \
Terminal256Formatter(style=options['style'])
del options['style']
return highlight(string, python_lexer, terminal_256_formatter,
**options)
elif 'light' == bg:
return highlight(string, python_lexer, light_terminal_formatter,
**options)
else:
return highlight(string, python_lexer, dark_terminal_formatter,
**options)
pass
def path(filename):
"""Return full filename path for filename"""
filename = unmap_file(filename)
if filename not in file_cache:
return None
return file_cache[filename].path
def remap_file(from_file, to_file):
"""Make *to_file* be a synonym for *from_file*"""
file2file_remap[to_file] = from_file
return
def remap_file_lines(from_path, to_path, line_map_list):
"""Adds line_map list to the list of association of from_file to
to to_file"""
from_path = pyc2py(from_path)
cache_file(to_path)
remap_entry = file2file_remap_lines.get(to_path)
if remap_entry:
new_list = list(remap_entry.from_to_pairs) + list(line_map_list)
else:
new_list = line_map_list
# FIXME: look for duplicates ?
file2file_remap_lines[to_path] = RemapLineEntry(
from_path,
tuple(sorted(new_list, key=lambda t: t[0]))
)
return
def remove_remap_file(filename):
"""Remove any mapping for *filename* and return that if it exists"""
global file2file_remap
if filename in file2file_remap:
retval = file2file_remap[filename]
del file2file_remap[filename]
return retval
return None
def sha1(filename):
"""Return SHA1 of filename."""
filename = unmap_file(filename)
if filename not in file_cache:
cache_file(filename)
if filename not in file_cache:
return None
pass
if file_cache[filename].sha1:
return file_cache[filename].sha1.hexdigest()
sha1 = hashlib.sha1()
for line in file_cache[filename].lines['plain']:
sha1.update(line.encode('utf-8'))
pass
file_cache[filename].sha1 = sha1
return sha1.hexdigest()
def size(filename, use_cache_only=False):
"""Return the number of lines in filename. If `use_cache_only' is False,
we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return len(file_cache[filename].lines['plain'])
def maxline(filename, use_cache_only=False):
"""Return the maximum line number filename after taking into account
line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines:
return size(filename, use_cache_only)
max_lineno = -1
remap_line_entry = file2file_remap_lines.get(filename)
if not remap_line_entry:
return size(filename, use_cache_only)
for t in remap_line_entry.from_to_pairs:
max_lineno = max(max_lineno, t[1])
if max_lineno == -1:
return size(filename, use_cache_only)
else:
return max_lineno
def stat(filename, use_cache_only=False):
"""Return stat() info for *filename*. If *use_cache_only* is *False*,
we will try to fetch the file if it is not cached."""
filename = pyc2py(filename)
if filename not in file_cache:
if not use_cache_only: cache_file(filename)
if filename not in file_cache:
return None
pass
return file_cache[filename].stat
def trace_line_numbers(filename, reload_on_change=False):
"""Return an Array of breakpoints in filename.
The list will contain an entry for each distinct line event call
so it is possible (and possibly useful) for a line number appear more
than once."""
fullname = cache_file(filename, reload_on_change)
if not fullname: return None
e = file_cache[filename]
if not e.line_numbers:
if hasattr(coverage.coverage, 'analyze_morf'):
e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1]
else:
cov = coverage.coverage()
cov._warn_no_data = False
e.line_numbers = cov.analysis(fullname)[1]
pass
pass
return e.line_numbers
def is_mapped_file(filename):
if filename in file2file_remap :
return 'file'
elif file2file_remap_lines.get(filename):
return 'file_line'
else:
return None
def unmap_file(filename):
# FIXME: this is wrong?
return file2file_remap.get(filename, filename)
def unmap_file_line(filename, line_number, reverse=False):
remap_line_entry = file2file_remap_lines.get(filename)
mapped_line_number = line_number
if remap_line_entry:
filename = remap_line_entry.mapped_path
cache_entry = file_cache.get(filename, None)
if cache_entry:
line_max = maxline(filename)
else:
line_max = large_int
last_t = (1, 1)
# FIXME: use binary search
# Note we assume assume from_line is increasing.
# Add sentinel at end of from pairs to handle using the final
# entry for line numbers greater than it.
# Find the closest mapped line number equal or before line_number.
for t in remap_line_entry.from_to_pairs + ((large_int, line_max),):
if reverse:
t = list(reversed(t))
if t[1] == line_number:
mapped_line_number = t[0]
break
elif t[1] > line_number:
mapped_line_number = last_t[0] + (line_number - last_t[1] )
break
last_t = t
pass
return (filename, mapped_line_number)
# example usage
if __name__ == '__main__':
def yes_no(var):
if var: return ""
else: return "not "
return # Not reached
# print(getline(__file__, 1, {'output': 'dark'}))
# print(getline(__file__, 2, {'output': 'light'}))
# from pygments.styles import STYLE_MAP
# opts = {'style': list(STYLE_MAP.keys())[0]}
# print(getline(__file__, 1, opts))
# update_cache('os')
# lines = getlines(__file__)
# print("%s has %s lines" % (__file__, len(lines['plain'])))
# lines = getlines(__file__, {'output': 'light'})
# i = 0
# for line in lines:
# i += 1
# print(line.rstrip('\n').rstrip('\n'))
# if i > 20: break
# pass
# line = getline(__file__, 6)
# print("The 6th line is\n%s" % line)
# line = remap_file(__file__, 'another_name')
# print(getline('another_name', 7))
# print("Files cached: %s" % cached_files())
# update_cache(__file__)
# checkcache(__file__)
# print("%s has %s lines" % (__file__, size(__file__)))
# print("%s trace line numbers:\n" % __file__)
# print("%s " % repr(trace_line_numbers(__file__)))
# print("%s is %scached." % (__file__,
# yes_no(is_cached(__file__))))
# print(stat(__file__))
# print("Full path: %s" % path(__file__))
# checkcache() # Check all files in the cache
# clear_file_format_cache()
# clear_file_cache()
# print(("%s is now %scached." % (__file__, yes_no(is_cached(__file__)))))
# # # digest = SCRIPT_LINES__.select{|k,v| k =~ /digest.rb$/}
# # # if digest is not None: print digest.first[0]
# line = getline(__file__, 7)
# print("The 7th line is\n%s" % line)
orig_path = __file__
mapped_path = 'test2'
start_line = 10
start_mapped = 6
remap_file_lines(orig_path, mapped_path, ((start_line, start_mapped),))
for l in (1,):
line = getline(mapped_path, l+start_mapped)
print("Remapped %s line %d should be line %d of %s. line is:\n%s"
% (mapped_path, start_mapped+l, start_line+l, orig_path, line))
# print("XXX", file2file_remap_lines)
|
Min-ops/cruddy
|
cruddy/__init__.py
|
CRUD.describe
|
python
|
def describe(self, **kwargs):
response = self._new_response()
description = {
'cruddy_version': __version__,
'table_name': self.table_name,
'supported_operations': copy.copy(self.supported_ops),
'prototype': copy.deepcopy(self.prototype),
'operations': {}
}
for name, method in inspect.getmembers(self, inspect.ismethod):
if not name.startswith('_'):
argspec = inspect.getargspec(method)
if argspec.defaults is None:
defaults = None
else:
defaults = list(argspec.defaults)
method_info = {
'docs': inspect.getdoc(method),
'argspec': {
'args': argspec.args,
'varargs': argspec.varargs,
'keywords': argspec.keywords,
'defaults': defaults
}
}
description['operations'][name] = method_info
response.data = description
return response
|
Returns descriptive information about this cruddy handler and the
methods supported by it.
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/__init__.py#L181-L212
|
[
"def _new_response(self):\n return CRUDResponse(self._debug)\n"
] |
class CRUD(object):
SupportedOps = ["create", "update", "get", "delete", "bulk_delete",
"list", "search", "increment_counter",
"describe", "ping"]
def __init__(self, **kwargs):
"""
Create a new CRUD handler. The CRUD handler accepts the following
parameters:
* table_name - name of the backing DynamoDB table (required)
* profile_name - name of the AWS credential profile to use when
creating the boto3 Session
* region_name - name of the AWS region to use when creating the
boto3 Session
* prototype - a dictionary of name/value pairs that will be used to
initialize newly created items
* supported_ops - a list of operations supported by the CRUD handler
(choices are list, get, create, update, delete, search,
increment_counter, describe, help, ping)
* encrypted_attributes - a list of tuples where the first item in the
tuple is the name of the attribute that should be encrypted and the
second item in the tuple is the KMS master key ID to use for
encrypting/decrypting the value
* debug - if not False this will cause the raw_response to be left
in the response dictionary
"""
self.table_name = kwargs['table_name']
profile_name = kwargs.get('profile_name')
region_name = kwargs.get('region_name')
placebo = kwargs.get('placebo')
placebo_dir = kwargs.get('placebo_dir')
placebo_mode = kwargs.get('placebo_mode', 'record')
self.prototype = kwargs.get('prototype', dict())
self._prototype_handler = PrototypeHandler(self.prototype)
self.supported_ops = kwargs.get('supported_ops', self.SupportedOps)
self.supported_ops.append('describe')
self.encrypted_attributes = kwargs.get('encrypted_attributes', list())
session = boto3.Session(profile_name=profile_name,
region_name=region_name)
if placebo and placebo_dir:
self.pill = placebo.attach(session, placebo_dir, debug=True)
if placebo_mode == 'record':
self.pill.record()
else:
self.pill.playback()
else:
self.pill = None
ddb_resource = session.resource('dynamodb')
self.table = ddb_resource.Table(self.table_name)
self._indexes = {}
self._analyze_table()
self._debug = kwargs.get('debug', False)
if self.encrypted_attributes:
self._kms_client = session.client('kms')
else:
self._kms_client = None
def _analyze_table(self):
# First check the Key Schema
if len(self.table.key_schema) != 1:
LOG.info('cruddy does not support RANGE keys')
else:
self._indexes[self.table.key_schema[0]['AttributeName']] = None
# Now process any GSI's
if self.table.global_secondary_indexes:
for gsi in self.table.global_secondary_indexes:
# find HASH of GSI, that's all we support for now
# if the GSI has a RANGE, we ignore it for now
if len(gsi['KeySchema']) == 1:
gsi_hash = gsi['KeySchema'][0]['AttributeName']
self._indexes[gsi_hash] = gsi['IndexName']
# Because the Boto3 DynamoDB client turns all numeric types into Decimals
# (which is actually the right thing to do) we need to convert those
# Decimal values back into integers or floats before serializing to JSON.
def _replace_decimals(self, obj):
if isinstance(obj, list):
for i in xrange(len(obj)):
obj[i] = self._replace_decimals(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.iterkeys():
obj[k] = self._replace_decimals(obj[k])
return obj
elif isinstance(obj, decimal.Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def _encrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.encrypt(
KeyId=master_key_id,
Plaintext=item[encrypted_attr])
blob = response['CiphertextBlob']
item[encrypted_attr] = base64.b64encode(blob)
def _decrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.decrypt(
CiphertextBlob=base64.b64decode(item[encrypted_attr]))
item[encrypted_attr] = response['Plaintext']
def _check_supported_op(self, op_name, response):
if op_name not in self.supported_ops:
response.status = 'error'
response.error_type = 'UnsupportedOperation'
response.error_message = 'Unsupported operation: {}'.format(
op_name)
return False
return True
def _call_ddb_method(self, method, kwargs, response):
try:
response.raw_response = method(**kwargs)
except ClientError as e:
LOG.debug(e)
response.status = 'error'
response.error_message = e.response['Error'].get('Message')
response.error_code = e.response['Error'].get('Code')
response.error_type = e.response['Error'].get('Type')
except Exception as e:
response.status = 'error'
response.error_type = e.__class__.__name__
response.error_code = None
response.error_message = str(e)
def _new_response(self):
return CRUDResponse(self._debug)
def ping(self, **kwargs):
"""
A no-op method that simply returns a successful response.
"""
response = self._new_response()
return response
def search(self, query, **kwargs):
"""
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
"""
response = self._new_response()
if self._check_supported_op('search', response):
if '=' not in query:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Only the = operation is supported'
response.error_message = msg
else:
key, value = query.split('=')
if key not in self._indexes:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Attribute {} is not indexed'.format(key)
response.error_message = msg
else:
params = {'KeyConditionExpression': Key(key).eq(value)}
index_name = self._indexes[key]
if index_name:
params['IndexName'] = index_name
pe = kwargs.get('projection_expression')
if pe:
params['ProjectionExpression'] = pe
self._call_ddb_method(self.table.query,
params, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def list(self, **kwargs):
"""
Returns a list of items in the database. Encrypted attributes are not
decrypted when listing items.
"""
response = self._new_response()
if self._check_supported_op('list', response):
self._call_ddb_method(self.table.scan, {}, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def get(self, id, decrypt=False, id_name='id', **kwargs):
"""
Returns the item corresponding to ``id``. If the ``decrypt`` param is
not False (the default) any encrypted attributes in the item will be
decrypted before the item is returned. If not, the encrypted
attributes will contain the encrypted value.
"""
response = self._new_response()
if self._check_supported_op('get', response):
if id is None:
response.status = 'error'
response.error_type = 'IDRequired'
response.error_message = 'Get requires an id'
else:
params = {'Key': {id_name: id},
'ConsistentRead': True}
self._call_ddb_method(self.table.get_item,
params, response)
if response.status == 'success':
if 'Item' in response.raw_response:
item = response.raw_response['Item']
if decrypt:
self._decrypt(item)
response.data = self._replace_decimals(item)
else:
response.status = 'error'
response.error_type = 'NotFound'
msg = 'item ({}) not found'.format(id)
response.error_message = msg
response.prepare()
return response
def create(self, item, **kwargs):
"""
Creates a new item. You pass in an item containing initial values.
Any attribute names defined in ``prototype`` that are missing from the
item will be added using the default value defined in ``prototype``.
"""
response = self._new_response()
if self._prototype_handler.check(item, 'create', response):
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def update(self, item, encrypt=True, **kwargs):
"""
Updates the item based on the current values of the dictionary passed
in.
"""
response = self._new_response()
if self._check_supported_op('update', response):
if self._prototype_handler.check(item, 'update', response):
if encrypt:
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def increment_counter(self, id, counter_name, increment=1,
id_name='id', **kwargs):
"""
Atomically increments a counter attribute in the item identified by
``id``. You must specify the name of the attribute as ``counter_name``
and, optionally, the ``increment`` which defaults to ``1``.
"""
response = self._new_response()
if self._check_supported_op('increment_counter', response):
params = {
'Key': {id_name: id},
'UpdateExpression': 'set #ctr = #ctr + :val',
'ExpressionAttributeNames': {"#ctr": counter_name},
'ExpressionAttributeValues': {
':val': decimal.Decimal(increment)},
'ReturnValues': 'UPDATED_NEW'
}
self._call_ddb_method(self.table.update_item, params, response)
if response.status == 'success':
if 'Attributes' in response.raw_response:
self._replace_decimals(response.raw_response)
attr = response.raw_response['Attributes'][counter_name]
response.data = attr
response.prepare()
return response
def delete(self, id, id_name='id', **kwargs):
"""
Deletes the item corresponding to ``id``.
"""
response = self._new_response()
if self._check_supported_op('delete', response):
params = {'Key': {id_name: id}}
self._call_ddb_method(self.table.delete_item, params, response)
response.data = 'true'
response.prepare()
return response
def bulk_delete(self, query, **kwargs):
"""
Perform a search and delete all items that match.
"""
response = self._new_response()
if self._check_supported_op('search', response):
n = 0
pe = 'id'
response = self.search(query, projection_expression=pe, **kwargs)
while response.status == 'success' and response.data:
for item in response.data:
delete_response = self.delete(item['id'])
if response.status != 'success':
response = delete_response
break
n += 1
response = self.search(
query, projection_expression=pe, **kwargs)
if response.status == 'success':
response.data = {'deleted': n}
return response
def handler(self, operation=None, **kwargs):
"""
In addition to the methods described above, cruddy also provides a
generic handler interface. This is mainly useful when you want to wrap
a cruddy handler in a Lambda function and then call that Lambda
function to access the CRUD capabilities.
To call the handler, you simply put all necessary parameters into a
Python dictionary and then call the handler with that dict.
```
params = {
'operation': 'create',
'item': {'foo': 'bar', 'fie': 'baz'}
}
response = crud.handler(**params)
```
"""
response = self._new_response()
if operation is None:
response.status = 'error'
response.error_type = 'MissingOperation'
response.error_message = 'You must pass an operation'
return response
operation = operation.lower()
self._check_supported_op(operation, response)
if response.status == 'success':
method = getattr(self, operation, None)
if callable(method):
response = method(**kwargs)
else:
response.status == 'error'
response.error_type = 'NotImplemented'
msg = 'Operation: {} is not implemented'.format(operation)
response.error_message = msg
return response
|
Min-ops/cruddy
|
cruddy/__init__.py
|
CRUD.search
|
python
|
def search(self, query, **kwargs):
response = self._new_response()
if self._check_supported_op('search', response):
if '=' not in query:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Only the = operation is supported'
response.error_message = msg
else:
key, value = query.split('=')
if key not in self._indexes:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Attribute {} is not indexed'.format(key)
response.error_message = msg
else:
params = {'KeyConditionExpression': Key(key).eq(value)}
index_name = self._indexes[key]
if index_name:
params['IndexName'] = index_name
pe = kwargs.get('projection_expression')
if pe:
params['ProjectionExpression'] = pe
self._call_ddb_method(self.table.query,
params, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
|
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/__init__.py#L214-L266
|
[
"def _check_supported_op(self, op_name, response):\n if op_name not in self.supported_ops:\n response.status = 'error'\n response.error_type = 'UnsupportedOperation'\n response.error_message = 'Unsupported operation: {}'.format(\n op_name)\n return False\n return True\n",
"def _new_response(self):\n return CRUDResponse(self._debug)\n"
] |
class CRUD(object):
SupportedOps = ["create", "update", "get", "delete", "bulk_delete",
"list", "search", "increment_counter",
"describe", "ping"]
def __init__(self, **kwargs):
"""
Create a new CRUD handler. The CRUD handler accepts the following
parameters:
* table_name - name of the backing DynamoDB table (required)
* profile_name - name of the AWS credential profile to use when
creating the boto3 Session
* region_name - name of the AWS region to use when creating the
boto3 Session
* prototype - a dictionary of name/value pairs that will be used to
initialize newly created items
* supported_ops - a list of operations supported by the CRUD handler
(choices are list, get, create, update, delete, search,
increment_counter, describe, help, ping)
* encrypted_attributes - a list of tuples where the first item in the
tuple is the name of the attribute that should be encrypted and the
second item in the tuple is the KMS master key ID to use for
encrypting/decrypting the value
* debug - if not False this will cause the raw_response to be left
in the response dictionary
"""
self.table_name = kwargs['table_name']
profile_name = kwargs.get('profile_name')
region_name = kwargs.get('region_name')
placebo = kwargs.get('placebo')
placebo_dir = kwargs.get('placebo_dir')
placebo_mode = kwargs.get('placebo_mode', 'record')
self.prototype = kwargs.get('prototype', dict())
self._prototype_handler = PrototypeHandler(self.prototype)
self.supported_ops = kwargs.get('supported_ops', self.SupportedOps)
self.supported_ops.append('describe')
self.encrypted_attributes = kwargs.get('encrypted_attributes', list())
session = boto3.Session(profile_name=profile_name,
region_name=region_name)
if placebo and placebo_dir:
self.pill = placebo.attach(session, placebo_dir, debug=True)
if placebo_mode == 'record':
self.pill.record()
else:
self.pill.playback()
else:
self.pill = None
ddb_resource = session.resource('dynamodb')
self.table = ddb_resource.Table(self.table_name)
self._indexes = {}
self._analyze_table()
self._debug = kwargs.get('debug', False)
if self.encrypted_attributes:
self._kms_client = session.client('kms')
else:
self._kms_client = None
def _analyze_table(self):
# First check the Key Schema
if len(self.table.key_schema) != 1:
LOG.info('cruddy does not support RANGE keys')
else:
self._indexes[self.table.key_schema[0]['AttributeName']] = None
# Now process any GSI's
if self.table.global_secondary_indexes:
for gsi in self.table.global_secondary_indexes:
# find HASH of GSI, that's all we support for now
# if the GSI has a RANGE, we ignore it for now
if len(gsi['KeySchema']) == 1:
gsi_hash = gsi['KeySchema'][0]['AttributeName']
self._indexes[gsi_hash] = gsi['IndexName']
# Because the Boto3 DynamoDB client turns all numeric types into Decimals
# (which is actually the right thing to do) we need to convert those
# Decimal values back into integers or floats before serializing to JSON.
def _replace_decimals(self, obj):
if isinstance(obj, list):
for i in xrange(len(obj)):
obj[i] = self._replace_decimals(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.iterkeys():
obj[k] = self._replace_decimals(obj[k])
return obj
elif isinstance(obj, decimal.Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def _encrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.encrypt(
KeyId=master_key_id,
Plaintext=item[encrypted_attr])
blob = response['CiphertextBlob']
item[encrypted_attr] = base64.b64encode(blob)
def _decrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.decrypt(
CiphertextBlob=base64.b64decode(item[encrypted_attr]))
item[encrypted_attr] = response['Plaintext']
def _check_supported_op(self, op_name, response):
if op_name not in self.supported_ops:
response.status = 'error'
response.error_type = 'UnsupportedOperation'
response.error_message = 'Unsupported operation: {}'.format(
op_name)
return False
return True
def _call_ddb_method(self, method, kwargs, response):
try:
response.raw_response = method(**kwargs)
except ClientError as e:
LOG.debug(e)
response.status = 'error'
response.error_message = e.response['Error'].get('Message')
response.error_code = e.response['Error'].get('Code')
response.error_type = e.response['Error'].get('Type')
except Exception as e:
response.status = 'error'
response.error_type = e.__class__.__name__
response.error_code = None
response.error_message = str(e)
def _new_response(self):
return CRUDResponse(self._debug)
def ping(self, **kwargs):
"""
A no-op method that simply returns a successful response.
"""
response = self._new_response()
return response
def describe(self, **kwargs):
"""
Returns descriptive information about this cruddy handler and the
methods supported by it.
"""
response = self._new_response()
description = {
'cruddy_version': __version__,
'table_name': self.table_name,
'supported_operations': copy.copy(self.supported_ops),
'prototype': copy.deepcopy(self.prototype),
'operations': {}
}
for name, method in inspect.getmembers(self, inspect.ismethod):
if not name.startswith('_'):
argspec = inspect.getargspec(method)
if argspec.defaults is None:
defaults = None
else:
defaults = list(argspec.defaults)
method_info = {
'docs': inspect.getdoc(method),
'argspec': {
'args': argspec.args,
'varargs': argspec.varargs,
'keywords': argspec.keywords,
'defaults': defaults
}
}
description['operations'][name] = method_info
response.data = description
return response
def list(self, **kwargs):
"""
Returns a list of items in the database. Encrypted attributes are not
decrypted when listing items.
"""
response = self._new_response()
if self._check_supported_op('list', response):
self._call_ddb_method(self.table.scan, {}, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def get(self, id, decrypt=False, id_name='id', **kwargs):
"""
Returns the item corresponding to ``id``. If the ``decrypt`` param is
not False (the default) any encrypted attributes in the item will be
decrypted before the item is returned. If not, the encrypted
attributes will contain the encrypted value.
"""
response = self._new_response()
if self._check_supported_op('get', response):
if id is None:
response.status = 'error'
response.error_type = 'IDRequired'
response.error_message = 'Get requires an id'
else:
params = {'Key': {id_name: id},
'ConsistentRead': True}
self._call_ddb_method(self.table.get_item,
params, response)
if response.status == 'success':
if 'Item' in response.raw_response:
item = response.raw_response['Item']
if decrypt:
self._decrypt(item)
response.data = self._replace_decimals(item)
else:
response.status = 'error'
response.error_type = 'NotFound'
msg = 'item ({}) not found'.format(id)
response.error_message = msg
response.prepare()
return response
def create(self, item, **kwargs):
"""
Creates a new item. You pass in an item containing initial values.
Any attribute names defined in ``prototype`` that are missing from the
item will be added using the default value defined in ``prototype``.
"""
response = self._new_response()
if self._prototype_handler.check(item, 'create', response):
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def update(self, item, encrypt=True, **kwargs):
"""
Updates the item based on the current values of the dictionary passed
in.
"""
response = self._new_response()
if self._check_supported_op('update', response):
if self._prototype_handler.check(item, 'update', response):
if encrypt:
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def increment_counter(self, id, counter_name, increment=1,
id_name='id', **kwargs):
"""
Atomically increments a counter attribute in the item identified by
``id``. You must specify the name of the attribute as ``counter_name``
and, optionally, the ``increment`` which defaults to ``1``.
"""
response = self._new_response()
if self._check_supported_op('increment_counter', response):
params = {
'Key': {id_name: id},
'UpdateExpression': 'set #ctr = #ctr + :val',
'ExpressionAttributeNames': {"#ctr": counter_name},
'ExpressionAttributeValues': {
':val': decimal.Decimal(increment)},
'ReturnValues': 'UPDATED_NEW'
}
self._call_ddb_method(self.table.update_item, params, response)
if response.status == 'success':
if 'Attributes' in response.raw_response:
self._replace_decimals(response.raw_response)
attr = response.raw_response['Attributes'][counter_name]
response.data = attr
response.prepare()
return response
def delete(self, id, id_name='id', **kwargs):
"""
Deletes the item corresponding to ``id``.
"""
response = self._new_response()
if self._check_supported_op('delete', response):
params = {'Key': {id_name: id}}
self._call_ddb_method(self.table.delete_item, params, response)
response.data = 'true'
response.prepare()
return response
def bulk_delete(self, query, **kwargs):
"""
Perform a search and delete all items that match.
"""
response = self._new_response()
if self._check_supported_op('search', response):
n = 0
pe = 'id'
response = self.search(query, projection_expression=pe, **kwargs)
while response.status == 'success' and response.data:
for item in response.data:
delete_response = self.delete(item['id'])
if response.status != 'success':
response = delete_response
break
n += 1
response = self.search(
query, projection_expression=pe, **kwargs)
if response.status == 'success':
response.data = {'deleted': n}
return response
def handler(self, operation=None, **kwargs):
"""
In addition to the methods described above, cruddy also provides a
generic handler interface. This is mainly useful when you want to wrap
a cruddy handler in a Lambda function and then call that Lambda
function to access the CRUD capabilities.
To call the handler, you simply put all necessary parameters into a
Python dictionary and then call the handler with that dict.
```
params = {
'operation': 'create',
'item': {'foo': 'bar', 'fie': 'baz'}
}
response = crud.handler(**params)
```
"""
response = self._new_response()
if operation is None:
response.status = 'error'
response.error_type = 'MissingOperation'
response.error_message = 'You must pass an operation'
return response
operation = operation.lower()
self._check_supported_op(operation, response)
if response.status == 'success':
method = getattr(self, operation, None)
if callable(method):
response = method(**kwargs)
else:
response.status == 'error'
response.error_type = 'NotImplemented'
msg = 'Operation: {} is not implemented'.format(operation)
response.error_message = msg
return response
|
Min-ops/cruddy
|
cruddy/__init__.py
|
CRUD.list
|
python
|
def list(self, **kwargs):
response = self._new_response()
if self._check_supported_op('list', response):
self._call_ddb_method(self.table.scan, {}, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
|
Returns a list of items in the database. Encrypted attributes are not
decrypted when listing items.
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/__init__.py#L268-L280
|
[
"def _check_supported_op(self, op_name, response):\n if op_name not in self.supported_ops:\n response.status = 'error'\n response.error_type = 'UnsupportedOperation'\n response.error_message = 'Unsupported operation: {}'.format(\n op_name)\n return False\n return True\n",
"def _new_response(self):\n return CRUDResponse(self._debug)\n"
] |
class CRUD(object):
SupportedOps = ["create", "update", "get", "delete", "bulk_delete",
"list", "search", "increment_counter",
"describe", "ping"]
def __init__(self, **kwargs):
"""
Create a new CRUD handler. The CRUD handler accepts the following
parameters:
* table_name - name of the backing DynamoDB table (required)
* profile_name - name of the AWS credential profile to use when
creating the boto3 Session
* region_name - name of the AWS region to use when creating the
boto3 Session
* prototype - a dictionary of name/value pairs that will be used to
initialize newly created items
* supported_ops - a list of operations supported by the CRUD handler
(choices are list, get, create, update, delete, search,
increment_counter, describe, help, ping)
* encrypted_attributes - a list of tuples where the first item in the
tuple is the name of the attribute that should be encrypted and the
second item in the tuple is the KMS master key ID to use for
encrypting/decrypting the value
* debug - if not False this will cause the raw_response to be left
in the response dictionary
"""
self.table_name = kwargs['table_name']
profile_name = kwargs.get('profile_name')
region_name = kwargs.get('region_name')
placebo = kwargs.get('placebo')
placebo_dir = kwargs.get('placebo_dir')
placebo_mode = kwargs.get('placebo_mode', 'record')
self.prototype = kwargs.get('prototype', dict())
self._prototype_handler = PrototypeHandler(self.prototype)
self.supported_ops = kwargs.get('supported_ops', self.SupportedOps)
self.supported_ops.append('describe')
self.encrypted_attributes = kwargs.get('encrypted_attributes', list())
session = boto3.Session(profile_name=profile_name,
region_name=region_name)
if placebo and placebo_dir:
self.pill = placebo.attach(session, placebo_dir, debug=True)
if placebo_mode == 'record':
self.pill.record()
else:
self.pill.playback()
else:
self.pill = None
ddb_resource = session.resource('dynamodb')
self.table = ddb_resource.Table(self.table_name)
self._indexes = {}
self._analyze_table()
self._debug = kwargs.get('debug', False)
if self.encrypted_attributes:
self._kms_client = session.client('kms')
else:
self._kms_client = None
def _analyze_table(self):
# First check the Key Schema
if len(self.table.key_schema) != 1:
LOG.info('cruddy does not support RANGE keys')
else:
self._indexes[self.table.key_schema[0]['AttributeName']] = None
# Now process any GSI's
if self.table.global_secondary_indexes:
for gsi in self.table.global_secondary_indexes:
# find HASH of GSI, that's all we support for now
# if the GSI has a RANGE, we ignore it for now
if len(gsi['KeySchema']) == 1:
gsi_hash = gsi['KeySchema'][0]['AttributeName']
self._indexes[gsi_hash] = gsi['IndexName']
# Because the Boto3 DynamoDB client turns all numeric types into Decimals
# (which is actually the right thing to do) we need to convert those
# Decimal values back into integers or floats before serializing to JSON.
def _replace_decimals(self, obj):
if isinstance(obj, list):
for i in xrange(len(obj)):
obj[i] = self._replace_decimals(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.iterkeys():
obj[k] = self._replace_decimals(obj[k])
return obj
elif isinstance(obj, decimal.Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def _encrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.encrypt(
KeyId=master_key_id,
Plaintext=item[encrypted_attr])
blob = response['CiphertextBlob']
item[encrypted_attr] = base64.b64encode(blob)
def _decrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.decrypt(
CiphertextBlob=base64.b64decode(item[encrypted_attr]))
item[encrypted_attr] = response['Plaintext']
def _check_supported_op(self, op_name, response):
if op_name not in self.supported_ops:
response.status = 'error'
response.error_type = 'UnsupportedOperation'
response.error_message = 'Unsupported operation: {}'.format(
op_name)
return False
return True
def _call_ddb_method(self, method, kwargs, response):
try:
response.raw_response = method(**kwargs)
except ClientError as e:
LOG.debug(e)
response.status = 'error'
response.error_message = e.response['Error'].get('Message')
response.error_code = e.response['Error'].get('Code')
response.error_type = e.response['Error'].get('Type')
except Exception as e:
response.status = 'error'
response.error_type = e.__class__.__name__
response.error_code = None
response.error_message = str(e)
def _new_response(self):
return CRUDResponse(self._debug)
def ping(self, **kwargs):
"""
A no-op method that simply returns a successful response.
"""
response = self._new_response()
return response
def describe(self, **kwargs):
"""
Returns descriptive information about this cruddy handler and the
methods supported by it.
"""
response = self._new_response()
description = {
'cruddy_version': __version__,
'table_name': self.table_name,
'supported_operations': copy.copy(self.supported_ops),
'prototype': copy.deepcopy(self.prototype),
'operations': {}
}
for name, method in inspect.getmembers(self, inspect.ismethod):
if not name.startswith('_'):
argspec = inspect.getargspec(method)
if argspec.defaults is None:
defaults = None
else:
defaults = list(argspec.defaults)
method_info = {
'docs': inspect.getdoc(method),
'argspec': {
'args': argspec.args,
'varargs': argspec.varargs,
'keywords': argspec.keywords,
'defaults': defaults
}
}
description['operations'][name] = method_info
response.data = description
return response
def search(self, query, **kwargs):
"""
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
"""
response = self._new_response()
if self._check_supported_op('search', response):
if '=' not in query:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Only the = operation is supported'
response.error_message = msg
else:
key, value = query.split('=')
if key not in self._indexes:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Attribute {} is not indexed'.format(key)
response.error_message = msg
else:
params = {'KeyConditionExpression': Key(key).eq(value)}
index_name = self._indexes[key]
if index_name:
params['IndexName'] = index_name
pe = kwargs.get('projection_expression')
if pe:
params['ProjectionExpression'] = pe
self._call_ddb_method(self.table.query,
params, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def get(self, id, decrypt=False, id_name='id', **kwargs):
"""
Returns the item corresponding to ``id``. If the ``decrypt`` param is
not False (the default) any encrypted attributes in the item will be
decrypted before the item is returned. If not, the encrypted
attributes will contain the encrypted value.
"""
response = self._new_response()
if self._check_supported_op('get', response):
if id is None:
response.status = 'error'
response.error_type = 'IDRequired'
response.error_message = 'Get requires an id'
else:
params = {'Key': {id_name: id},
'ConsistentRead': True}
self._call_ddb_method(self.table.get_item,
params, response)
if response.status == 'success':
if 'Item' in response.raw_response:
item = response.raw_response['Item']
if decrypt:
self._decrypt(item)
response.data = self._replace_decimals(item)
else:
response.status = 'error'
response.error_type = 'NotFound'
msg = 'item ({}) not found'.format(id)
response.error_message = msg
response.prepare()
return response
def create(self, item, **kwargs):
"""
Creates a new item. You pass in an item containing initial values.
Any attribute names defined in ``prototype`` that are missing from the
item will be added using the default value defined in ``prototype``.
"""
response = self._new_response()
if self._prototype_handler.check(item, 'create', response):
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def update(self, item, encrypt=True, **kwargs):
"""
Updates the item based on the current values of the dictionary passed
in.
"""
response = self._new_response()
if self._check_supported_op('update', response):
if self._prototype_handler.check(item, 'update', response):
if encrypt:
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def increment_counter(self, id, counter_name, increment=1,
id_name='id', **kwargs):
"""
Atomically increments a counter attribute in the item identified by
``id``. You must specify the name of the attribute as ``counter_name``
and, optionally, the ``increment`` which defaults to ``1``.
"""
response = self._new_response()
if self._check_supported_op('increment_counter', response):
params = {
'Key': {id_name: id},
'UpdateExpression': 'set #ctr = #ctr + :val',
'ExpressionAttributeNames': {"#ctr": counter_name},
'ExpressionAttributeValues': {
':val': decimal.Decimal(increment)},
'ReturnValues': 'UPDATED_NEW'
}
self._call_ddb_method(self.table.update_item, params, response)
if response.status == 'success':
if 'Attributes' in response.raw_response:
self._replace_decimals(response.raw_response)
attr = response.raw_response['Attributes'][counter_name]
response.data = attr
response.prepare()
return response
def delete(self, id, id_name='id', **kwargs):
"""
Deletes the item corresponding to ``id``.
"""
response = self._new_response()
if self._check_supported_op('delete', response):
params = {'Key': {id_name: id}}
self._call_ddb_method(self.table.delete_item, params, response)
response.data = 'true'
response.prepare()
return response
def bulk_delete(self, query, **kwargs):
"""
Perform a search and delete all items that match.
"""
response = self._new_response()
if self._check_supported_op('search', response):
n = 0
pe = 'id'
response = self.search(query, projection_expression=pe, **kwargs)
while response.status == 'success' and response.data:
for item in response.data:
delete_response = self.delete(item['id'])
if response.status != 'success':
response = delete_response
break
n += 1
response = self.search(
query, projection_expression=pe, **kwargs)
if response.status == 'success':
response.data = {'deleted': n}
return response
def handler(self, operation=None, **kwargs):
"""
In addition to the methods described above, cruddy also provides a
generic handler interface. This is mainly useful when you want to wrap
a cruddy handler in a Lambda function and then call that Lambda
function to access the CRUD capabilities.
To call the handler, you simply put all necessary parameters into a
Python dictionary and then call the handler with that dict.
```
params = {
'operation': 'create',
'item': {'foo': 'bar', 'fie': 'baz'}
}
response = crud.handler(**params)
```
"""
response = self._new_response()
if operation is None:
response.status = 'error'
response.error_type = 'MissingOperation'
response.error_message = 'You must pass an operation'
return response
operation = operation.lower()
self._check_supported_op(operation, response)
if response.status == 'success':
method = getattr(self, operation, None)
if callable(method):
response = method(**kwargs)
else:
response.status == 'error'
response.error_type = 'NotImplemented'
msg = 'Operation: {} is not implemented'.format(operation)
response.error_message = msg
return response
|
Min-ops/cruddy
|
cruddy/__init__.py
|
CRUD.get
|
python
|
def get(self, id, decrypt=False, id_name='id', **kwargs):
response = self._new_response()
if self._check_supported_op('get', response):
if id is None:
response.status = 'error'
response.error_type = 'IDRequired'
response.error_message = 'Get requires an id'
else:
params = {'Key': {id_name: id},
'ConsistentRead': True}
self._call_ddb_method(self.table.get_item,
params, response)
if response.status == 'success':
if 'Item' in response.raw_response:
item = response.raw_response['Item']
if decrypt:
self._decrypt(item)
response.data = self._replace_decimals(item)
else:
response.status = 'error'
response.error_type = 'NotFound'
msg = 'item ({}) not found'.format(id)
response.error_message = msg
response.prepare()
return response
|
Returns the item corresponding to ``id``. If the ``decrypt`` param is
not False (the default) any encrypted attributes in the item will be
decrypted before the item is returned. If not, the encrypted
attributes will contain the encrypted value.
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/__init__.py#L282-L313
|
[
"def _check_supported_op(self, op_name, response):\n if op_name not in self.supported_ops:\n response.status = 'error'\n response.error_type = 'UnsupportedOperation'\n response.error_message = 'Unsupported operation: {}'.format(\n op_name)\n return False\n return True\n",
"def _new_response(self):\n return CRUDResponse(self._debug)\n"
] |
class CRUD(object):
SupportedOps = ["create", "update", "get", "delete", "bulk_delete",
"list", "search", "increment_counter",
"describe", "ping"]
def __init__(self, **kwargs):
"""
Create a new CRUD handler. The CRUD handler accepts the following
parameters:
* table_name - name of the backing DynamoDB table (required)
* profile_name - name of the AWS credential profile to use when
creating the boto3 Session
* region_name - name of the AWS region to use when creating the
boto3 Session
* prototype - a dictionary of name/value pairs that will be used to
initialize newly created items
* supported_ops - a list of operations supported by the CRUD handler
(choices are list, get, create, update, delete, search,
increment_counter, describe, help, ping)
* encrypted_attributes - a list of tuples where the first item in the
tuple is the name of the attribute that should be encrypted and the
second item in the tuple is the KMS master key ID to use for
encrypting/decrypting the value
* debug - if not False this will cause the raw_response to be left
in the response dictionary
"""
self.table_name = kwargs['table_name']
profile_name = kwargs.get('profile_name')
region_name = kwargs.get('region_name')
placebo = kwargs.get('placebo')
placebo_dir = kwargs.get('placebo_dir')
placebo_mode = kwargs.get('placebo_mode', 'record')
self.prototype = kwargs.get('prototype', dict())
self._prototype_handler = PrototypeHandler(self.prototype)
self.supported_ops = kwargs.get('supported_ops', self.SupportedOps)
self.supported_ops.append('describe')
self.encrypted_attributes = kwargs.get('encrypted_attributes', list())
session = boto3.Session(profile_name=profile_name,
region_name=region_name)
if placebo and placebo_dir:
self.pill = placebo.attach(session, placebo_dir, debug=True)
if placebo_mode == 'record':
self.pill.record()
else:
self.pill.playback()
else:
self.pill = None
ddb_resource = session.resource('dynamodb')
self.table = ddb_resource.Table(self.table_name)
self._indexes = {}
self._analyze_table()
self._debug = kwargs.get('debug', False)
if self.encrypted_attributes:
self._kms_client = session.client('kms')
else:
self._kms_client = None
def _analyze_table(self):
# First check the Key Schema
if len(self.table.key_schema) != 1:
LOG.info('cruddy does not support RANGE keys')
else:
self._indexes[self.table.key_schema[0]['AttributeName']] = None
# Now process any GSI's
if self.table.global_secondary_indexes:
for gsi in self.table.global_secondary_indexes:
# find HASH of GSI, that's all we support for now
# if the GSI has a RANGE, we ignore it for now
if len(gsi['KeySchema']) == 1:
gsi_hash = gsi['KeySchema'][0]['AttributeName']
self._indexes[gsi_hash] = gsi['IndexName']
# Because the Boto3 DynamoDB client turns all numeric types into Decimals
# (which is actually the right thing to do) we need to convert those
# Decimal values back into integers or floats before serializing to JSON.
def _replace_decimals(self, obj):
if isinstance(obj, list):
for i in xrange(len(obj)):
obj[i] = self._replace_decimals(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.iterkeys():
obj[k] = self._replace_decimals(obj[k])
return obj
elif isinstance(obj, decimal.Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def _encrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.encrypt(
KeyId=master_key_id,
Plaintext=item[encrypted_attr])
blob = response['CiphertextBlob']
item[encrypted_attr] = base64.b64encode(blob)
def _decrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.decrypt(
CiphertextBlob=base64.b64decode(item[encrypted_attr]))
item[encrypted_attr] = response['Plaintext']
def _check_supported_op(self, op_name, response):
if op_name not in self.supported_ops:
response.status = 'error'
response.error_type = 'UnsupportedOperation'
response.error_message = 'Unsupported operation: {}'.format(
op_name)
return False
return True
def _call_ddb_method(self, method, kwargs, response):
try:
response.raw_response = method(**kwargs)
except ClientError as e:
LOG.debug(e)
response.status = 'error'
response.error_message = e.response['Error'].get('Message')
response.error_code = e.response['Error'].get('Code')
response.error_type = e.response['Error'].get('Type')
except Exception as e:
response.status = 'error'
response.error_type = e.__class__.__name__
response.error_code = None
response.error_message = str(e)
def _new_response(self):
return CRUDResponse(self._debug)
def ping(self, **kwargs):
"""
A no-op method that simply returns a successful response.
"""
response = self._new_response()
return response
def describe(self, **kwargs):
"""
Returns descriptive information about this cruddy handler and the
methods supported by it.
"""
response = self._new_response()
description = {
'cruddy_version': __version__,
'table_name': self.table_name,
'supported_operations': copy.copy(self.supported_ops),
'prototype': copy.deepcopy(self.prototype),
'operations': {}
}
for name, method in inspect.getmembers(self, inspect.ismethod):
if not name.startswith('_'):
argspec = inspect.getargspec(method)
if argspec.defaults is None:
defaults = None
else:
defaults = list(argspec.defaults)
method_info = {
'docs': inspect.getdoc(method),
'argspec': {
'args': argspec.args,
'varargs': argspec.varargs,
'keywords': argspec.keywords,
'defaults': defaults
}
}
description['operations'][name] = method_info
response.data = description
return response
def search(self, query, **kwargs):
"""
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
"""
response = self._new_response()
if self._check_supported_op('search', response):
if '=' not in query:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Only the = operation is supported'
response.error_message = msg
else:
key, value = query.split('=')
if key not in self._indexes:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Attribute {} is not indexed'.format(key)
response.error_message = msg
else:
params = {'KeyConditionExpression': Key(key).eq(value)}
index_name = self._indexes[key]
if index_name:
params['IndexName'] = index_name
pe = kwargs.get('projection_expression')
if pe:
params['ProjectionExpression'] = pe
self._call_ddb_method(self.table.query,
params, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def list(self, **kwargs):
"""
Returns a list of items in the database. Encrypted attributes are not
decrypted when listing items.
"""
response = self._new_response()
if self._check_supported_op('list', response):
self._call_ddb_method(self.table.scan, {}, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def create(self, item, **kwargs):
"""
Creates a new item. You pass in an item containing initial values.
Any attribute names defined in ``prototype`` that are missing from the
item will be added using the default value defined in ``prototype``.
"""
response = self._new_response()
if self._prototype_handler.check(item, 'create', response):
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def update(self, item, encrypt=True, **kwargs):
"""
Updates the item based on the current values of the dictionary passed
in.
"""
response = self._new_response()
if self._check_supported_op('update', response):
if self._prototype_handler.check(item, 'update', response):
if encrypt:
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def increment_counter(self, id, counter_name, increment=1,
id_name='id', **kwargs):
"""
Atomically increments a counter attribute in the item identified by
``id``. You must specify the name of the attribute as ``counter_name``
and, optionally, the ``increment`` which defaults to ``1``.
"""
response = self._new_response()
if self._check_supported_op('increment_counter', response):
params = {
'Key': {id_name: id},
'UpdateExpression': 'set #ctr = #ctr + :val',
'ExpressionAttributeNames': {"#ctr": counter_name},
'ExpressionAttributeValues': {
':val': decimal.Decimal(increment)},
'ReturnValues': 'UPDATED_NEW'
}
self._call_ddb_method(self.table.update_item, params, response)
if response.status == 'success':
if 'Attributes' in response.raw_response:
self._replace_decimals(response.raw_response)
attr = response.raw_response['Attributes'][counter_name]
response.data = attr
response.prepare()
return response
def delete(self, id, id_name='id', **kwargs):
"""
Deletes the item corresponding to ``id``.
"""
response = self._new_response()
if self._check_supported_op('delete', response):
params = {'Key': {id_name: id}}
self._call_ddb_method(self.table.delete_item, params, response)
response.data = 'true'
response.prepare()
return response
def bulk_delete(self, query, **kwargs):
"""
Perform a search and delete all items that match.
"""
response = self._new_response()
if self._check_supported_op('search', response):
n = 0
pe = 'id'
response = self.search(query, projection_expression=pe, **kwargs)
while response.status == 'success' and response.data:
for item in response.data:
delete_response = self.delete(item['id'])
if response.status != 'success':
response = delete_response
break
n += 1
response = self.search(
query, projection_expression=pe, **kwargs)
if response.status == 'success':
response.data = {'deleted': n}
return response
def handler(self, operation=None, **kwargs):
"""
In addition to the methods described above, cruddy also provides a
generic handler interface. This is mainly useful when you want to wrap
a cruddy handler in a Lambda function and then call that Lambda
function to access the CRUD capabilities.
To call the handler, you simply put all necessary parameters into a
Python dictionary and then call the handler with that dict.
```
params = {
'operation': 'create',
'item': {'foo': 'bar', 'fie': 'baz'}
}
response = crud.handler(**params)
```
"""
response = self._new_response()
if operation is None:
response.status = 'error'
response.error_type = 'MissingOperation'
response.error_message = 'You must pass an operation'
return response
operation = operation.lower()
self._check_supported_op(operation, response)
if response.status == 'success':
method = getattr(self, operation, None)
if callable(method):
response = method(**kwargs)
else:
response.status == 'error'
response.error_type = 'NotImplemented'
msg = 'Operation: {} is not implemented'.format(operation)
response.error_message = msg
return response
|
Min-ops/cruddy
|
cruddy/__init__.py
|
CRUD.create
|
python
|
def create(self, item, **kwargs):
response = self._new_response()
if self._prototype_handler.check(item, 'create', response):
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
|
Creates a new item. You pass in an item containing initial values.
Any attribute names defined in ``prototype`` that are missing from the
item will be added using the default value defined in ``prototype``.
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/__init__.py#L315-L330
|
[
"def _new_response(self):\n return CRUDResponse(self._debug)\n"
] |
class CRUD(object):
SupportedOps = ["create", "update", "get", "delete", "bulk_delete",
"list", "search", "increment_counter",
"describe", "ping"]
def __init__(self, **kwargs):
"""
Create a new CRUD handler. The CRUD handler accepts the following
parameters:
* table_name - name of the backing DynamoDB table (required)
* profile_name - name of the AWS credential profile to use when
creating the boto3 Session
* region_name - name of the AWS region to use when creating the
boto3 Session
* prototype - a dictionary of name/value pairs that will be used to
initialize newly created items
* supported_ops - a list of operations supported by the CRUD handler
(choices are list, get, create, update, delete, search,
increment_counter, describe, help, ping)
* encrypted_attributes - a list of tuples where the first item in the
tuple is the name of the attribute that should be encrypted and the
second item in the tuple is the KMS master key ID to use for
encrypting/decrypting the value
* debug - if not False this will cause the raw_response to be left
in the response dictionary
"""
self.table_name = kwargs['table_name']
profile_name = kwargs.get('profile_name')
region_name = kwargs.get('region_name')
placebo = kwargs.get('placebo')
placebo_dir = kwargs.get('placebo_dir')
placebo_mode = kwargs.get('placebo_mode', 'record')
self.prototype = kwargs.get('prototype', dict())
self._prototype_handler = PrototypeHandler(self.prototype)
self.supported_ops = kwargs.get('supported_ops', self.SupportedOps)
self.supported_ops.append('describe')
self.encrypted_attributes = kwargs.get('encrypted_attributes', list())
session = boto3.Session(profile_name=profile_name,
region_name=region_name)
if placebo and placebo_dir:
self.pill = placebo.attach(session, placebo_dir, debug=True)
if placebo_mode == 'record':
self.pill.record()
else:
self.pill.playback()
else:
self.pill = None
ddb_resource = session.resource('dynamodb')
self.table = ddb_resource.Table(self.table_name)
self._indexes = {}
self._analyze_table()
self._debug = kwargs.get('debug', False)
if self.encrypted_attributes:
self._kms_client = session.client('kms')
else:
self._kms_client = None
def _analyze_table(self):
# First check the Key Schema
if len(self.table.key_schema) != 1:
LOG.info('cruddy does not support RANGE keys')
else:
self._indexes[self.table.key_schema[0]['AttributeName']] = None
# Now process any GSI's
if self.table.global_secondary_indexes:
for gsi in self.table.global_secondary_indexes:
# find HASH of GSI, that's all we support for now
# if the GSI has a RANGE, we ignore it for now
if len(gsi['KeySchema']) == 1:
gsi_hash = gsi['KeySchema'][0]['AttributeName']
self._indexes[gsi_hash] = gsi['IndexName']
# Because the Boto3 DynamoDB client turns all numeric types into Decimals
# (which is actually the right thing to do) we need to convert those
# Decimal values back into integers or floats before serializing to JSON.
def _replace_decimals(self, obj):
if isinstance(obj, list):
for i in xrange(len(obj)):
obj[i] = self._replace_decimals(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.iterkeys():
obj[k] = self._replace_decimals(obj[k])
return obj
elif isinstance(obj, decimal.Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def _encrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.encrypt(
KeyId=master_key_id,
Plaintext=item[encrypted_attr])
blob = response['CiphertextBlob']
item[encrypted_attr] = base64.b64encode(blob)
def _decrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.decrypt(
CiphertextBlob=base64.b64decode(item[encrypted_attr]))
item[encrypted_attr] = response['Plaintext']
def _check_supported_op(self, op_name, response):
if op_name not in self.supported_ops:
response.status = 'error'
response.error_type = 'UnsupportedOperation'
response.error_message = 'Unsupported operation: {}'.format(
op_name)
return False
return True
def _call_ddb_method(self, method, kwargs, response):
try:
response.raw_response = method(**kwargs)
except ClientError as e:
LOG.debug(e)
response.status = 'error'
response.error_message = e.response['Error'].get('Message')
response.error_code = e.response['Error'].get('Code')
response.error_type = e.response['Error'].get('Type')
except Exception as e:
response.status = 'error'
response.error_type = e.__class__.__name__
response.error_code = None
response.error_message = str(e)
def _new_response(self):
return CRUDResponse(self._debug)
def ping(self, **kwargs):
"""
A no-op method that simply returns a successful response.
"""
response = self._new_response()
return response
def describe(self, **kwargs):
"""
Returns descriptive information about this cruddy handler and the
methods supported by it.
"""
response = self._new_response()
description = {
'cruddy_version': __version__,
'table_name': self.table_name,
'supported_operations': copy.copy(self.supported_ops),
'prototype': copy.deepcopy(self.prototype),
'operations': {}
}
for name, method in inspect.getmembers(self, inspect.ismethod):
if not name.startswith('_'):
argspec = inspect.getargspec(method)
if argspec.defaults is None:
defaults = None
else:
defaults = list(argspec.defaults)
method_info = {
'docs': inspect.getdoc(method),
'argspec': {
'args': argspec.args,
'varargs': argspec.varargs,
'keywords': argspec.keywords,
'defaults': defaults
}
}
description['operations'][name] = method_info
response.data = description
return response
def search(self, query, **kwargs):
"""
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
"""
response = self._new_response()
if self._check_supported_op('search', response):
if '=' not in query:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Only the = operation is supported'
response.error_message = msg
else:
key, value = query.split('=')
if key not in self._indexes:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Attribute {} is not indexed'.format(key)
response.error_message = msg
else:
params = {'KeyConditionExpression': Key(key).eq(value)}
index_name = self._indexes[key]
if index_name:
params['IndexName'] = index_name
pe = kwargs.get('projection_expression')
if pe:
params['ProjectionExpression'] = pe
self._call_ddb_method(self.table.query,
params, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def list(self, **kwargs):
"""
Returns a list of items in the database. Encrypted attributes are not
decrypted when listing items.
"""
response = self._new_response()
if self._check_supported_op('list', response):
self._call_ddb_method(self.table.scan, {}, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def get(self, id, decrypt=False, id_name='id', **kwargs):
"""
Returns the item corresponding to ``id``. If the ``decrypt`` param is
not False (the default) any encrypted attributes in the item will be
decrypted before the item is returned. If not, the encrypted
attributes will contain the encrypted value.
"""
response = self._new_response()
if self._check_supported_op('get', response):
if id is None:
response.status = 'error'
response.error_type = 'IDRequired'
response.error_message = 'Get requires an id'
else:
params = {'Key': {id_name: id},
'ConsistentRead': True}
self._call_ddb_method(self.table.get_item,
params, response)
if response.status == 'success':
if 'Item' in response.raw_response:
item = response.raw_response['Item']
if decrypt:
self._decrypt(item)
response.data = self._replace_decimals(item)
else:
response.status = 'error'
response.error_type = 'NotFound'
msg = 'item ({}) not found'.format(id)
response.error_message = msg
response.prepare()
return response
def update(self, item, encrypt=True, **kwargs):
"""
Updates the item based on the current values of the dictionary passed
in.
"""
response = self._new_response()
if self._check_supported_op('update', response):
if self._prototype_handler.check(item, 'update', response):
if encrypt:
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def increment_counter(self, id, counter_name, increment=1,
id_name='id', **kwargs):
"""
Atomically increments a counter attribute in the item identified by
``id``. You must specify the name of the attribute as ``counter_name``
and, optionally, the ``increment`` which defaults to ``1``.
"""
response = self._new_response()
if self._check_supported_op('increment_counter', response):
params = {
'Key': {id_name: id},
'UpdateExpression': 'set #ctr = #ctr + :val',
'ExpressionAttributeNames': {"#ctr": counter_name},
'ExpressionAttributeValues': {
':val': decimal.Decimal(increment)},
'ReturnValues': 'UPDATED_NEW'
}
self._call_ddb_method(self.table.update_item, params, response)
if response.status == 'success':
if 'Attributes' in response.raw_response:
self._replace_decimals(response.raw_response)
attr = response.raw_response['Attributes'][counter_name]
response.data = attr
response.prepare()
return response
def delete(self, id, id_name='id', **kwargs):
"""
Deletes the item corresponding to ``id``.
"""
response = self._new_response()
if self._check_supported_op('delete', response):
params = {'Key': {id_name: id}}
self._call_ddb_method(self.table.delete_item, params, response)
response.data = 'true'
response.prepare()
return response
def bulk_delete(self, query, **kwargs):
"""
Perform a search and delete all items that match.
"""
response = self._new_response()
if self._check_supported_op('search', response):
n = 0
pe = 'id'
response = self.search(query, projection_expression=pe, **kwargs)
while response.status == 'success' and response.data:
for item in response.data:
delete_response = self.delete(item['id'])
if response.status != 'success':
response = delete_response
break
n += 1
response = self.search(
query, projection_expression=pe, **kwargs)
if response.status == 'success':
response.data = {'deleted': n}
return response
def handler(self, operation=None, **kwargs):
"""
In addition to the methods described above, cruddy also provides a
generic handler interface. This is mainly useful when you want to wrap
a cruddy handler in a Lambda function and then call that Lambda
function to access the CRUD capabilities.
To call the handler, you simply put all necessary parameters into a
Python dictionary and then call the handler with that dict.
```
params = {
'operation': 'create',
'item': {'foo': 'bar', 'fie': 'baz'}
}
response = crud.handler(**params)
```
"""
response = self._new_response()
if operation is None:
response.status = 'error'
response.error_type = 'MissingOperation'
response.error_message = 'You must pass an operation'
return response
operation = operation.lower()
self._check_supported_op(operation, response)
if response.status == 'success':
method = getattr(self, operation, None)
if callable(method):
response = method(**kwargs)
else:
response.status == 'error'
response.error_type = 'NotImplemented'
msg = 'Operation: {} is not implemented'.format(operation)
response.error_message = msg
return response
|
Min-ops/cruddy
|
cruddy/__init__.py
|
CRUD.update
|
python
|
def update(self, item, encrypt=True, **kwargs):
response = self._new_response()
if self._check_supported_op('update', response):
if self._prototype_handler.check(item, 'update', response):
if encrypt:
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
|
Updates the item based on the current values of the dictionary passed
in.
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/__init__.py#L332-L348
|
[
"def _check_supported_op(self, op_name, response):\n if op_name not in self.supported_ops:\n response.status = 'error'\n response.error_type = 'UnsupportedOperation'\n response.error_message = 'Unsupported operation: {}'.format(\n op_name)\n return False\n return True\n",
"def _new_response(self):\n return CRUDResponse(self._debug)\n"
] |
class CRUD(object):
SupportedOps = ["create", "update", "get", "delete", "bulk_delete",
"list", "search", "increment_counter",
"describe", "ping"]
def __init__(self, **kwargs):
"""
Create a new CRUD handler. The CRUD handler accepts the following
parameters:
* table_name - name of the backing DynamoDB table (required)
* profile_name - name of the AWS credential profile to use when
creating the boto3 Session
* region_name - name of the AWS region to use when creating the
boto3 Session
* prototype - a dictionary of name/value pairs that will be used to
initialize newly created items
* supported_ops - a list of operations supported by the CRUD handler
(choices are list, get, create, update, delete, search,
increment_counter, describe, help, ping)
* encrypted_attributes - a list of tuples where the first item in the
tuple is the name of the attribute that should be encrypted and the
second item in the tuple is the KMS master key ID to use for
encrypting/decrypting the value
* debug - if not False this will cause the raw_response to be left
in the response dictionary
"""
self.table_name = kwargs['table_name']
profile_name = kwargs.get('profile_name')
region_name = kwargs.get('region_name')
placebo = kwargs.get('placebo')
placebo_dir = kwargs.get('placebo_dir')
placebo_mode = kwargs.get('placebo_mode', 'record')
self.prototype = kwargs.get('prototype', dict())
self._prototype_handler = PrototypeHandler(self.prototype)
self.supported_ops = kwargs.get('supported_ops', self.SupportedOps)
self.supported_ops.append('describe')
self.encrypted_attributes = kwargs.get('encrypted_attributes', list())
session = boto3.Session(profile_name=profile_name,
region_name=region_name)
if placebo and placebo_dir:
self.pill = placebo.attach(session, placebo_dir, debug=True)
if placebo_mode == 'record':
self.pill.record()
else:
self.pill.playback()
else:
self.pill = None
ddb_resource = session.resource('dynamodb')
self.table = ddb_resource.Table(self.table_name)
self._indexes = {}
self._analyze_table()
self._debug = kwargs.get('debug', False)
if self.encrypted_attributes:
self._kms_client = session.client('kms')
else:
self._kms_client = None
def _analyze_table(self):
# First check the Key Schema
if len(self.table.key_schema) != 1:
LOG.info('cruddy does not support RANGE keys')
else:
self._indexes[self.table.key_schema[0]['AttributeName']] = None
# Now process any GSI's
if self.table.global_secondary_indexes:
for gsi in self.table.global_secondary_indexes:
# find HASH of GSI, that's all we support for now
# if the GSI has a RANGE, we ignore it for now
if len(gsi['KeySchema']) == 1:
gsi_hash = gsi['KeySchema'][0]['AttributeName']
self._indexes[gsi_hash] = gsi['IndexName']
# Because the Boto3 DynamoDB client turns all numeric types into Decimals
# (which is actually the right thing to do) we need to convert those
# Decimal values back into integers or floats before serializing to JSON.
def _replace_decimals(self, obj):
if isinstance(obj, list):
for i in xrange(len(obj)):
obj[i] = self._replace_decimals(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.iterkeys():
obj[k] = self._replace_decimals(obj[k])
return obj
elif isinstance(obj, decimal.Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def _encrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.encrypt(
KeyId=master_key_id,
Plaintext=item[encrypted_attr])
blob = response['CiphertextBlob']
item[encrypted_attr] = base64.b64encode(blob)
def _decrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.decrypt(
CiphertextBlob=base64.b64decode(item[encrypted_attr]))
item[encrypted_attr] = response['Plaintext']
def _check_supported_op(self, op_name, response):
if op_name not in self.supported_ops:
response.status = 'error'
response.error_type = 'UnsupportedOperation'
response.error_message = 'Unsupported operation: {}'.format(
op_name)
return False
return True
def _call_ddb_method(self, method, kwargs, response):
try:
response.raw_response = method(**kwargs)
except ClientError as e:
LOG.debug(e)
response.status = 'error'
response.error_message = e.response['Error'].get('Message')
response.error_code = e.response['Error'].get('Code')
response.error_type = e.response['Error'].get('Type')
except Exception as e:
response.status = 'error'
response.error_type = e.__class__.__name__
response.error_code = None
response.error_message = str(e)
def _new_response(self):
return CRUDResponse(self._debug)
def ping(self, **kwargs):
"""
A no-op method that simply returns a successful response.
"""
response = self._new_response()
return response
def describe(self, **kwargs):
"""
Returns descriptive information about this cruddy handler and the
methods supported by it.
"""
response = self._new_response()
description = {
'cruddy_version': __version__,
'table_name': self.table_name,
'supported_operations': copy.copy(self.supported_ops),
'prototype': copy.deepcopy(self.prototype),
'operations': {}
}
for name, method in inspect.getmembers(self, inspect.ismethod):
if not name.startswith('_'):
argspec = inspect.getargspec(method)
if argspec.defaults is None:
defaults = None
else:
defaults = list(argspec.defaults)
method_info = {
'docs': inspect.getdoc(method),
'argspec': {
'args': argspec.args,
'varargs': argspec.varargs,
'keywords': argspec.keywords,
'defaults': defaults
}
}
description['operations'][name] = method_info
response.data = description
return response
def search(self, query, **kwargs):
"""
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
"""
response = self._new_response()
if self._check_supported_op('search', response):
if '=' not in query:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Only the = operation is supported'
response.error_message = msg
else:
key, value = query.split('=')
if key not in self._indexes:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Attribute {} is not indexed'.format(key)
response.error_message = msg
else:
params = {'KeyConditionExpression': Key(key).eq(value)}
index_name = self._indexes[key]
if index_name:
params['IndexName'] = index_name
pe = kwargs.get('projection_expression')
if pe:
params['ProjectionExpression'] = pe
self._call_ddb_method(self.table.query,
params, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def list(self, **kwargs):
"""
Returns a list of items in the database. Encrypted attributes are not
decrypted when listing items.
"""
response = self._new_response()
if self._check_supported_op('list', response):
self._call_ddb_method(self.table.scan, {}, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def get(self, id, decrypt=False, id_name='id', **kwargs):
"""
Returns the item corresponding to ``id``. If the ``decrypt`` param is
not False (the default) any encrypted attributes in the item will be
decrypted before the item is returned. If not, the encrypted
attributes will contain the encrypted value.
"""
response = self._new_response()
if self._check_supported_op('get', response):
if id is None:
response.status = 'error'
response.error_type = 'IDRequired'
response.error_message = 'Get requires an id'
else:
params = {'Key': {id_name: id},
'ConsistentRead': True}
self._call_ddb_method(self.table.get_item,
params, response)
if response.status == 'success':
if 'Item' in response.raw_response:
item = response.raw_response['Item']
if decrypt:
self._decrypt(item)
response.data = self._replace_decimals(item)
else:
response.status = 'error'
response.error_type = 'NotFound'
msg = 'item ({}) not found'.format(id)
response.error_message = msg
response.prepare()
return response
def create(self, item, **kwargs):
"""
Creates a new item. You pass in an item containing initial values.
Any attribute names defined in ``prototype`` that are missing from the
item will be added using the default value defined in ``prototype``.
"""
response = self._new_response()
if self._prototype_handler.check(item, 'create', response):
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def increment_counter(self, id, counter_name, increment=1,
id_name='id', **kwargs):
"""
Atomically increments a counter attribute in the item identified by
``id``. You must specify the name of the attribute as ``counter_name``
and, optionally, the ``increment`` which defaults to ``1``.
"""
response = self._new_response()
if self._check_supported_op('increment_counter', response):
params = {
'Key': {id_name: id},
'UpdateExpression': 'set #ctr = #ctr + :val',
'ExpressionAttributeNames': {"#ctr": counter_name},
'ExpressionAttributeValues': {
':val': decimal.Decimal(increment)},
'ReturnValues': 'UPDATED_NEW'
}
self._call_ddb_method(self.table.update_item, params, response)
if response.status == 'success':
if 'Attributes' in response.raw_response:
self._replace_decimals(response.raw_response)
attr = response.raw_response['Attributes'][counter_name]
response.data = attr
response.prepare()
return response
def delete(self, id, id_name='id', **kwargs):
"""
Deletes the item corresponding to ``id``.
"""
response = self._new_response()
if self._check_supported_op('delete', response):
params = {'Key': {id_name: id}}
self._call_ddb_method(self.table.delete_item, params, response)
response.data = 'true'
response.prepare()
return response
def bulk_delete(self, query, **kwargs):
"""
Perform a search and delete all items that match.
"""
response = self._new_response()
if self._check_supported_op('search', response):
n = 0
pe = 'id'
response = self.search(query, projection_expression=pe, **kwargs)
while response.status == 'success' and response.data:
for item in response.data:
delete_response = self.delete(item['id'])
if response.status != 'success':
response = delete_response
break
n += 1
response = self.search(
query, projection_expression=pe, **kwargs)
if response.status == 'success':
response.data = {'deleted': n}
return response
def handler(self, operation=None, **kwargs):
"""
In addition to the methods described above, cruddy also provides a
generic handler interface. This is mainly useful when you want to wrap
a cruddy handler in a Lambda function and then call that Lambda
function to access the CRUD capabilities.
To call the handler, you simply put all necessary parameters into a
Python dictionary and then call the handler with that dict.
```
params = {
'operation': 'create',
'item': {'foo': 'bar', 'fie': 'baz'}
}
response = crud.handler(**params)
```
"""
response = self._new_response()
if operation is None:
response.status = 'error'
response.error_type = 'MissingOperation'
response.error_message = 'You must pass an operation'
return response
operation = operation.lower()
self._check_supported_op(operation, response)
if response.status == 'success':
method = getattr(self, operation, None)
if callable(method):
response = method(**kwargs)
else:
response.status == 'error'
response.error_type = 'NotImplemented'
msg = 'Operation: {} is not implemented'.format(operation)
response.error_message = msg
return response
|
Min-ops/cruddy
|
cruddy/__init__.py
|
CRUD.increment_counter
|
python
|
def increment_counter(self, id, counter_name, increment=1,
id_name='id', **kwargs):
response = self._new_response()
if self._check_supported_op('increment_counter', response):
params = {
'Key': {id_name: id},
'UpdateExpression': 'set #ctr = #ctr + :val',
'ExpressionAttributeNames': {"#ctr": counter_name},
'ExpressionAttributeValues': {
':val': decimal.Decimal(increment)},
'ReturnValues': 'UPDATED_NEW'
}
self._call_ddb_method(self.table.update_item, params, response)
if response.status == 'success':
if 'Attributes' in response.raw_response:
self._replace_decimals(response.raw_response)
attr = response.raw_response['Attributes'][counter_name]
response.data = attr
response.prepare()
return response
|
Atomically increments a counter attribute in the item identified by
``id``. You must specify the name of the attribute as ``counter_name``
and, optionally, the ``increment`` which defaults to ``1``.
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/__init__.py#L350-L374
|
[
"def _check_supported_op(self, op_name, response):\n if op_name not in self.supported_ops:\n response.status = 'error'\n response.error_type = 'UnsupportedOperation'\n response.error_message = 'Unsupported operation: {}'.format(\n op_name)\n return False\n return True\n",
"def _new_response(self):\n return CRUDResponse(self._debug)\n"
] |
class CRUD(object):
SupportedOps = ["create", "update", "get", "delete", "bulk_delete",
"list", "search", "increment_counter",
"describe", "ping"]
def __init__(self, **kwargs):
"""
Create a new CRUD handler. The CRUD handler accepts the following
parameters:
* table_name - name of the backing DynamoDB table (required)
* profile_name - name of the AWS credential profile to use when
creating the boto3 Session
* region_name - name of the AWS region to use when creating the
boto3 Session
* prototype - a dictionary of name/value pairs that will be used to
initialize newly created items
* supported_ops - a list of operations supported by the CRUD handler
(choices are list, get, create, update, delete, search,
increment_counter, describe, help, ping)
* encrypted_attributes - a list of tuples where the first item in the
tuple is the name of the attribute that should be encrypted and the
second item in the tuple is the KMS master key ID to use for
encrypting/decrypting the value
* debug - if not False this will cause the raw_response to be left
in the response dictionary
"""
self.table_name = kwargs['table_name']
profile_name = kwargs.get('profile_name')
region_name = kwargs.get('region_name')
placebo = kwargs.get('placebo')
placebo_dir = kwargs.get('placebo_dir')
placebo_mode = kwargs.get('placebo_mode', 'record')
self.prototype = kwargs.get('prototype', dict())
self._prototype_handler = PrototypeHandler(self.prototype)
self.supported_ops = kwargs.get('supported_ops', self.SupportedOps)
self.supported_ops.append('describe')
self.encrypted_attributes = kwargs.get('encrypted_attributes', list())
session = boto3.Session(profile_name=profile_name,
region_name=region_name)
if placebo and placebo_dir:
self.pill = placebo.attach(session, placebo_dir, debug=True)
if placebo_mode == 'record':
self.pill.record()
else:
self.pill.playback()
else:
self.pill = None
ddb_resource = session.resource('dynamodb')
self.table = ddb_resource.Table(self.table_name)
self._indexes = {}
self._analyze_table()
self._debug = kwargs.get('debug', False)
if self.encrypted_attributes:
self._kms_client = session.client('kms')
else:
self._kms_client = None
def _analyze_table(self):
# First check the Key Schema
if len(self.table.key_schema) != 1:
LOG.info('cruddy does not support RANGE keys')
else:
self._indexes[self.table.key_schema[0]['AttributeName']] = None
# Now process any GSI's
if self.table.global_secondary_indexes:
for gsi in self.table.global_secondary_indexes:
# find HASH of GSI, that's all we support for now
# if the GSI has a RANGE, we ignore it for now
if len(gsi['KeySchema']) == 1:
gsi_hash = gsi['KeySchema'][0]['AttributeName']
self._indexes[gsi_hash] = gsi['IndexName']
# Because the Boto3 DynamoDB client turns all numeric types into Decimals
# (which is actually the right thing to do) we need to convert those
# Decimal values back into integers or floats before serializing to JSON.
def _replace_decimals(self, obj):
if isinstance(obj, list):
for i in xrange(len(obj)):
obj[i] = self._replace_decimals(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.iterkeys():
obj[k] = self._replace_decimals(obj[k])
return obj
elif isinstance(obj, decimal.Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def _encrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.encrypt(
KeyId=master_key_id,
Plaintext=item[encrypted_attr])
blob = response['CiphertextBlob']
item[encrypted_attr] = base64.b64encode(blob)
def _decrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.decrypt(
CiphertextBlob=base64.b64decode(item[encrypted_attr]))
item[encrypted_attr] = response['Plaintext']
def _check_supported_op(self, op_name, response):
if op_name not in self.supported_ops:
response.status = 'error'
response.error_type = 'UnsupportedOperation'
response.error_message = 'Unsupported operation: {}'.format(
op_name)
return False
return True
def _call_ddb_method(self, method, kwargs, response):
try:
response.raw_response = method(**kwargs)
except ClientError as e:
LOG.debug(e)
response.status = 'error'
response.error_message = e.response['Error'].get('Message')
response.error_code = e.response['Error'].get('Code')
response.error_type = e.response['Error'].get('Type')
except Exception as e:
response.status = 'error'
response.error_type = e.__class__.__name__
response.error_code = None
response.error_message = str(e)
def _new_response(self):
return CRUDResponse(self._debug)
def ping(self, **kwargs):
"""
A no-op method that simply returns a successful response.
"""
response = self._new_response()
return response
def describe(self, **kwargs):
"""
Returns descriptive information about this cruddy handler and the
methods supported by it.
"""
response = self._new_response()
description = {
'cruddy_version': __version__,
'table_name': self.table_name,
'supported_operations': copy.copy(self.supported_ops),
'prototype': copy.deepcopy(self.prototype),
'operations': {}
}
for name, method in inspect.getmembers(self, inspect.ismethod):
if not name.startswith('_'):
argspec = inspect.getargspec(method)
if argspec.defaults is None:
defaults = None
else:
defaults = list(argspec.defaults)
method_info = {
'docs': inspect.getdoc(method),
'argspec': {
'args': argspec.args,
'varargs': argspec.varargs,
'keywords': argspec.keywords,
'defaults': defaults
}
}
description['operations'][name] = method_info
response.data = description
return response
def search(self, query, **kwargs):
"""
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
"""
response = self._new_response()
if self._check_supported_op('search', response):
if '=' not in query:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Only the = operation is supported'
response.error_message = msg
else:
key, value = query.split('=')
if key not in self._indexes:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Attribute {} is not indexed'.format(key)
response.error_message = msg
else:
params = {'KeyConditionExpression': Key(key).eq(value)}
index_name = self._indexes[key]
if index_name:
params['IndexName'] = index_name
pe = kwargs.get('projection_expression')
if pe:
params['ProjectionExpression'] = pe
self._call_ddb_method(self.table.query,
params, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def list(self, **kwargs):
"""
Returns a list of items in the database. Encrypted attributes are not
decrypted when listing items.
"""
response = self._new_response()
if self._check_supported_op('list', response):
self._call_ddb_method(self.table.scan, {}, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def get(self, id, decrypt=False, id_name='id', **kwargs):
"""
Returns the item corresponding to ``id``. If the ``decrypt`` param is
not False (the default) any encrypted attributes in the item will be
decrypted before the item is returned. If not, the encrypted
attributes will contain the encrypted value.
"""
response = self._new_response()
if self._check_supported_op('get', response):
if id is None:
response.status = 'error'
response.error_type = 'IDRequired'
response.error_message = 'Get requires an id'
else:
params = {'Key': {id_name: id},
'ConsistentRead': True}
self._call_ddb_method(self.table.get_item,
params, response)
if response.status == 'success':
if 'Item' in response.raw_response:
item = response.raw_response['Item']
if decrypt:
self._decrypt(item)
response.data = self._replace_decimals(item)
else:
response.status = 'error'
response.error_type = 'NotFound'
msg = 'item ({}) not found'.format(id)
response.error_message = msg
response.prepare()
return response
def create(self, item, **kwargs):
"""
Creates a new item. You pass in an item containing initial values.
Any attribute names defined in ``prototype`` that are missing from the
item will be added using the default value defined in ``prototype``.
"""
response = self._new_response()
if self._prototype_handler.check(item, 'create', response):
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def update(self, item, encrypt=True, **kwargs):
"""
Updates the item based on the current values of the dictionary passed
in.
"""
response = self._new_response()
if self._check_supported_op('update', response):
if self._prototype_handler.check(item, 'update', response):
if encrypt:
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def delete(self, id, id_name='id', **kwargs):
"""
Deletes the item corresponding to ``id``.
"""
response = self._new_response()
if self._check_supported_op('delete', response):
params = {'Key': {id_name: id}}
self._call_ddb_method(self.table.delete_item, params, response)
response.data = 'true'
response.prepare()
return response
def bulk_delete(self, query, **kwargs):
"""
Perform a search and delete all items that match.
"""
response = self._new_response()
if self._check_supported_op('search', response):
n = 0
pe = 'id'
response = self.search(query, projection_expression=pe, **kwargs)
while response.status == 'success' and response.data:
for item in response.data:
delete_response = self.delete(item['id'])
if response.status != 'success':
response = delete_response
break
n += 1
response = self.search(
query, projection_expression=pe, **kwargs)
if response.status == 'success':
response.data = {'deleted': n}
return response
def handler(self, operation=None, **kwargs):
"""
In addition to the methods described above, cruddy also provides a
generic handler interface. This is mainly useful when you want to wrap
a cruddy handler in a Lambda function and then call that Lambda
function to access the CRUD capabilities.
To call the handler, you simply put all necessary parameters into a
Python dictionary and then call the handler with that dict.
```
params = {
'operation': 'create',
'item': {'foo': 'bar', 'fie': 'baz'}
}
response = crud.handler(**params)
```
"""
response = self._new_response()
if operation is None:
response.status = 'error'
response.error_type = 'MissingOperation'
response.error_message = 'You must pass an operation'
return response
operation = operation.lower()
self._check_supported_op(operation, response)
if response.status == 'success':
method = getattr(self, operation, None)
if callable(method):
response = method(**kwargs)
else:
response.status == 'error'
response.error_type = 'NotImplemented'
msg = 'Operation: {} is not implemented'.format(operation)
response.error_message = msg
return response
|
Min-ops/cruddy
|
cruddy/__init__.py
|
CRUD.delete
|
python
|
def delete(self, id, id_name='id', **kwargs):
response = self._new_response()
if self._check_supported_op('delete', response):
params = {'Key': {id_name: id}}
self._call_ddb_method(self.table.delete_item, params, response)
response.data = 'true'
response.prepare()
return response
|
Deletes the item corresponding to ``id``.
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/__init__.py#L376-L386
|
[
"def _check_supported_op(self, op_name, response):\n if op_name not in self.supported_ops:\n response.status = 'error'\n response.error_type = 'UnsupportedOperation'\n response.error_message = 'Unsupported operation: {}'.format(\n op_name)\n return False\n return True\n",
"def _new_response(self):\n return CRUDResponse(self._debug)\n"
] |
class CRUD(object):
SupportedOps = ["create", "update", "get", "delete", "bulk_delete",
"list", "search", "increment_counter",
"describe", "ping"]
def __init__(self, **kwargs):
"""
Create a new CRUD handler. The CRUD handler accepts the following
parameters:
* table_name - name of the backing DynamoDB table (required)
* profile_name - name of the AWS credential profile to use when
creating the boto3 Session
* region_name - name of the AWS region to use when creating the
boto3 Session
* prototype - a dictionary of name/value pairs that will be used to
initialize newly created items
* supported_ops - a list of operations supported by the CRUD handler
(choices are list, get, create, update, delete, search,
increment_counter, describe, help, ping)
* encrypted_attributes - a list of tuples where the first item in the
tuple is the name of the attribute that should be encrypted and the
second item in the tuple is the KMS master key ID to use for
encrypting/decrypting the value
* debug - if not False this will cause the raw_response to be left
in the response dictionary
"""
self.table_name = kwargs['table_name']
profile_name = kwargs.get('profile_name')
region_name = kwargs.get('region_name')
placebo = kwargs.get('placebo')
placebo_dir = kwargs.get('placebo_dir')
placebo_mode = kwargs.get('placebo_mode', 'record')
self.prototype = kwargs.get('prototype', dict())
self._prototype_handler = PrototypeHandler(self.prototype)
self.supported_ops = kwargs.get('supported_ops', self.SupportedOps)
self.supported_ops.append('describe')
self.encrypted_attributes = kwargs.get('encrypted_attributes', list())
session = boto3.Session(profile_name=profile_name,
region_name=region_name)
if placebo and placebo_dir:
self.pill = placebo.attach(session, placebo_dir, debug=True)
if placebo_mode == 'record':
self.pill.record()
else:
self.pill.playback()
else:
self.pill = None
ddb_resource = session.resource('dynamodb')
self.table = ddb_resource.Table(self.table_name)
self._indexes = {}
self._analyze_table()
self._debug = kwargs.get('debug', False)
if self.encrypted_attributes:
self._kms_client = session.client('kms')
else:
self._kms_client = None
def _analyze_table(self):
# First check the Key Schema
if len(self.table.key_schema) != 1:
LOG.info('cruddy does not support RANGE keys')
else:
self._indexes[self.table.key_schema[0]['AttributeName']] = None
# Now process any GSI's
if self.table.global_secondary_indexes:
for gsi in self.table.global_secondary_indexes:
# find HASH of GSI, that's all we support for now
# if the GSI has a RANGE, we ignore it for now
if len(gsi['KeySchema']) == 1:
gsi_hash = gsi['KeySchema'][0]['AttributeName']
self._indexes[gsi_hash] = gsi['IndexName']
# Because the Boto3 DynamoDB client turns all numeric types into Decimals
# (which is actually the right thing to do) we need to convert those
# Decimal values back into integers or floats before serializing to JSON.
def _replace_decimals(self, obj):
if isinstance(obj, list):
for i in xrange(len(obj)):
obj[i] = self._replace_decimals(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.iterkeys():
obj[k] = self._replace_decimals(obj[k])
return obj
elif isinstance(obj, decimal.Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def _encrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.encrypt(
KeyId=master_key_id,
Plaintext=item[encrypted_attr])
blob = response['CiphertextBlob']
item[encrypted_attr] = base64.b64encode(blob)
def _decrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.decrypt(
CiphertextBlob=base64.b64decode(item[encrypted_attr]))
item[encrypted_attr] = response['Plaintext']
def _check_supported_op(self, op_name, response):
if op_name not in self.supported_ops:
response.status = 'error'
response.error_type = 'UnsupportedOperation'
response.error_message = 'Unsupported operation: {}'.format(
op_name)
return False
return True
def _call_ddb_method(self, method, kwargs, response):
try:
response.raw_response = method(**kwargs)
except ClientError as e:
LOG.debug(e)
response.status = 'error'
response.error_message = e.response['Error'].get('Message')
response.error_code = e.response['Error'].get('Code')
response.error_type = e.response['Error'].get('Type')
except Exception as e:
response.status = 'error'
response.error_type = e.__class__.__name__
response.error_code = None
response.error_message = str(e)
def _new_response(self):
return CRUDResponse(self._debug)
def ping(self, **kwargs):
"""
A no-op method that simply returns a successful response.
"""
response = self._new_response()
return response
def describe(self, **kwargs):
"""
Returns descriptive information about this cruddy handler and the
methods supported by it.
"""
response = self._new_response()
description = {
'cruddy_version': __version__,
'table_name': self.table_name,
'supported_operations': copy.copy(self.supported_ops),
'prototype': copy.deepcopy(self.prototype),
'operations': {}
}
for name, method in inspect.getmembers(self, inspect.ismethod):
if not name.startswith('_'):
argspec = inspect.getargspec(method)
if argspec.defaults is None:
defaults = None
else:
defaults = list(argspec.defaults)
method_info = {
'docs': inspect.getdoc(method),
'argspec': {
'args': argspec.args,
'varargs': argspec.varargs,
'keywords': argspec.keywords,
'defaults': defaults
}
}
description['operations'][name] = method_info
response.data = description
return response
def search(self, query, **kwargs):
"""
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
"""
response = self._new_response()
if self._check_supported_op('search', response):
if '=' not in query:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Only the = operation is supported'
response.error_message = msg
else:
key, value = query.split('=')
if key not in self._indexes:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Attribute {} is not indexed'.format(key)
response.error_message = msg
else:
params = {'KeyConditionExpression': Key(key).eq(value)}
index_name = self._indexes[key]
if index_name:
params['IndexName'] = index_name
pe = kwargs.get('projection_expression')
if pe:
params['ProjectionExpression'] = pe
self._call_ddb_method(self.table.query,
params, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def list(self, **kwargs):
"""
Returns a list of items in the database. Encrypted attributes are not
decrypted when listing items.
"""
response = self._new_response()
if self._check_supported_op('list', response):
self._call_ddb_method(self.table.scan, {}, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def get(self, id, decrypt=False, id_name='id', **kwargs):
"""
Returns the item corresponding to ``id``. If the ``decrypt`` param is
not False (the default) any encrypted attributes in the item will be
decrypted before the item is returned. If not, the encrypted
attributes will contain the encrypted value.
"""
response = self._new_response()
if self._check_supported_op('get', response):
if id is None:
response.status = 'error'
response.error_type = 'IDRequired'
response.error_message = 'Get requires an id'
else:
params = {'Key': {id_name: id},
'ConsistentRead': True}
self._call_ddb_method(self.table.get_item,
params, response)
if response.status == 'success':
if 'Item' in response.raw_response:
item = response.raw_response['Item']
if decrypt:
self._decrypt(item)
response.data = self._replace_decimals(item)
else:
response.status = 'error'
response.error_type = 'NotFound'
msg = 'item ({}) not found'.format(id)
response.error_message = msg
response.prepare()
return response
def create(self, item, **kwargs):
"""
Creates a new item. You pass in an item containing initial values.
Any attribute names defined in ``prototype`` that are missing from the
item will be added using the default value defined in ``prototype``.
"""
response = self._new_response()
if self._prototype_handler.check(item, 'create', response):
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def update(self, item, encrypt=True, **kwargs):
"""
Updates the item based on the current values of the dictionary passed
in.
"""
response = self._new_response()
if self._check_supported_op('update', response):
if self._prototype_handler.check(item, 'update', response):
if encrypt:
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def increment_counter(self, id, counter_name, increment=1,
id_name='id', **kwargs):
"""
Atomically increments a counter attribute in the item identified by
``id``. You must specify the name of the attribute as ``counter_name``
and, optionally, the ``increment`` which defaults to ``1``.
"""
response = self._new_response()
if self._check_supported_op('increment_counter', response):
params = {
'Key': {id_name: id},
'UpdateExpression': 'set #ctr = #ctr + :val',
'ExpressionAttributeNames': {"#ctr": counter_name},
'ExpressionAttributeValues': {
':val': decimal.Decimal(increment)},
'ReturnValues': 'UPDATED_NEW'
}
self._call_ddb_method(self.table.update_item, params, response)
if response.status == 'success':
if 'Attributes' in response.raw_response:
self._replace_decimals(response.raw_response)
attr = response.raw_response['Attributes'][counter_name]
response.data = attr
response.prepare()
return response
def bulk_delete(self, query, **kwargs):
"""
Perform a search and delete all items that match.
"""
response = self._new_response()
if self._check_supported_op('search', response):
n = 0
pe = 'id'
response = self.search(query, projection_expression=pe, **kwargs)
while response.status == 'success' and response.data:
for item in response.data:
delete_response = self.delete(item['id'])
if response.status != 'success':
response = delete_response
break
n += 1
response = self.search(
query, projection_expression=pe, **kwargs)
if response.status == 'success':
response.data = {'deleted': n}
return response
def handler(self, operation=None, **kwargs):
"""
In addition to the methods described above, cruddy also provides a
generic handler interface. This is mainly useful when you want to wrap
a cruddy handler in a Lambda function and then call that Lambda
function to access the CRUD capabilities.
To call the handler, you simply put all necessary parameters into a
Python dictionary and then call the handler with that dict.
```
params = {
'operation': 'create',
'item': {'foo': 'bar', 'fie': 'baz'}
}
response = crud.handler(**params)
```
"""
response = self._new_response()
if operation is None:
response.status = 'error'
response.error_type = 'MissingOperation'
response.error_message = 'You must pass an operation'
return response
operation = operation.lower()
self._check_supported_op(operation, response)
if response.status == 'success':
method = getattr(self, operation, None)
if callable(method):
response = method(**kwargs)
else:
response.status == 'error'
response.error_type = 'NotImplemented'
msg = 'Operation: {} is not implemented'.format(operation)
response.error_message = msg
return response
|
Min-ops/cruddy
|
cruddy/__init__.py
|
CRUD.bulk_delete
|
python
|
def bulk_delete(self, query, **kwargs):
response = self._new_response()
if self._check_supported_op('search', response):
n = 0
pe = 'id'
response = self.search(query, projection_expression=pe, **kwargs)
while response.status == 'success' and response.data:
for item in response.data:
delete_response = self.delete(item['id'])
if response.status != 'success':
response = delete_response
break
n += 1
response = self.search(
query, projection_expression=pe, **kwargs)
if response.status == 'success':
response.data = {'deleted': n}
return response
|
Perform a search and delete all items that match.
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/__init__.py#L388-L408
|
[
"def _check_supported_op(self, op_name, response):\n if op_name not in self.supported_ops:\n response.status = 'error'\n response.error_type = 'UnsupportedOperation'\n response.error_message = 'Unsupported operation: {}'.format(\n op_name)\n return False\n return True\n",
"def _new_response(self):\n return CRUDResponse(self._debug)\n"
] |
class CRUD(object):
SupportedOps = ["create", "update", "get", "delete", "bulk_delete",
"list", "search", "increment_counter",
"describe", "ping"]
def __init__(self, **kwargs):
"""
Create a new CRUD handler. The CRUD handler accepts the following
parameters:
* table_name - name of the backing DynamoDB table (required)
* profile_name - name of the AWS credential profile to use when
creating the boto3 Session
* region_name - name of the AWS region to use when creating the
boto3 Session
* prototype - a dictionary of name/value pairs that will be used to
initialize newly created items
* supported_ops - a list of operations supported by the CRUD handler
(choices are list, get, create, update, delete, search,
increment_counter, describe, help, ping)
* encrypted_attributes - a list of tuples where the first item in the
tuple is the name of the attribute that should be encrypted and the
second item in the tuple is the KMS master key ID to use for
encrypting/decrypting the value
* debug - if not False this will cause the raw_response to be left
in the response dictionary
"""
self.table_name = kwargs['table_name']
profile_name = kwargs.get('profile_name')
region_name = kwargs.get('region_name')
placebo = kwargs.get('placebo')
placebo_dir = kwargs.get('placebo_dir')
placebo_mode = kwargs.get('placebo_mode', 'record')
self.prototype = kwargs.get('prototype', dict())
self._prototype_handler = PrototypeHandler(self.prototype)
self.supported_ops = kwargs.get('supported_ops', self.SupportedOps)
self.supported_ops.append('describe')
self.encrypted_attributes = kwargs.get('encrypted_attributes', list())
session = boto3.Session(profile_name=profile_name,
region_name=region_name)
if placebo and placebo_dir:
self.pill = placebo.attach(session, placebo_dir, debug=True)
if placebo_mode == 'record':
self.pill.record()
else:
self.pill.playback()
else:
self.pill = None
ddb_resource = session.resource('dynamodb')
self.table = ddb_resource.Table(self.table_name)
self._indexes = {}
self._analyze_table()
self._debug = kwargs.get('debug', False)
if self.encrypted_attributes:
self._kms_client = session.client('kms')
else:
self._kms_client = None
def _analyze_table(self):
# First check the Key Schema
if len(self.table.key_schema) != 1:
LOG.info('cruddy does not support RANGE keys')
else:
self._indexes[self.table.key_schema[0]['AttributeName']] = None
# Now process any GSI's
if self.table.global_secondary_indexes:
for gsi in self.table.global_secondary_indexes:
# find HASH of GSI, that's all we support for now
# if the GSI has a RANGE, we ignore it for now
if len(gsi['KeySchema']) == 1:
gsi_hash = gsi['KeySchema'][0]['AttributeName']
self._indexes[gsi_hash] = gsi['IndexName']
# Because the Boto3 DynamoDB client turns all numeric types into Decimals
# (which is actually the right thing to do) we need to convert those
# Decimal values back into integers or floats before serializing to JSON.
def _replace_decimals(self, obj):
if isinstance(obj, list):
for i in xrange(len(obj)):
obj[i] = self._replace_decimals(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.iterkeys():
obj[k] = self._replace_decimals(obj[k])
return obj
elif isinstance(obj, decimal.Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def _encrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.encrypt(
KeyId=master_key_id,
Plaintext=item[encrypted_attr])
blob = response['CiphertextBlob']
item[encrypted_attr] = base64.b64encode(blob)
def _decrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.decrypt(
CiphertextBlob=base64.b64decode(item[encrypted_attr]))
item[encrypted_attr] = response['Plaintext']
def _check_supported_op(self, op_name, response):
if op_name not in self.supported_ops:
response.status = 'error'
response.error_type = 'UnsupportedOperation'
response.error_message = 'Unsupported operation: {}'.format(
op_name)
return False
return True
def _call_ddb_method(self, method, kwargs, response):
try:
response.raw_response = method(**kwargs)
except ClientError as e:
LOG.debug(e)
response.status = 'error'
response.error_message = e.response['Error'].get('Message')
response.error_code = e.response['Error'].get('Code')
response.error_type = e.response['Error'].get('Type')
except Exception as e:
response.status = 'error'
response.error_type = e.__class__.__name__
response.error_code = None
response.error_message = str(e)
def _new_response(self):
return CRUDResponse(self._debug)
def ping(self, **kwargs):
"""
A no-op method that simply returns a successful response.
"""
response = self._new_response()
return response
def describe(self, **kwargs):
"""
Returns descriptive information about this cruddy handler and the
methods supported by it.
"""
response = self._new_response()
description = {
'cruddy_version': __version__,
'table_name': self.table_name,
'supported_operations': copy.copy(self.supported_ops),
'prototype': copy.deepcopy(self.prototype),
'operations': {}
}
for name, method in inspect.getmembers(self, inspect.ismethod):
if not name.startswith('_'):
argspec = inspect.getargspec(method)
if argspec.defaults is None:
defaults = None
else:
defaults = list(argspec.defaults)
method_info = {
'docs': inspect.getdoc(method),
'argspec': {
'args': argspec.args,
'varargs': argspec.varargs,
'keywords': argspec.keywords,
'defaults': defaults
}
}
description['operations'][name] = method_info
response.data = description
return response
def search(self, query, **kwargs):
"""
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
"""
response = self._new_response()
if self._check_supported_op('search', response):
if '=' not in query:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Only the = operation is supported'
response.error_message = msg
else:
key, value = query.split('=')
if key not in self._indexes:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Attribute {} is not indexed'.format(key)
response.error_message = msg
else:
params = {'KeyConditionExpression': Key(key).eq(value)}
index_name = self._indexes[key]
if index_name:
params['IndexName'] = index_name
pe = kwargs.get('projection_expression')
if pe:
params['ProjectionExpression'] = pe
self._call_ddb_method(self.table.query,
params, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def list(self, **kwargs):
"""
Returns a list of items in the database. Encrypted attributes are not
decrypted when listing items.
"""
response = self._new_response()
if self._check_supported_op('list', response):
self._call_ddb_method(self.table.scan, {}, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def get(self, id, decrypt=False, id_name='id', **kwargs):
"""
Returns the item corresponding to ``id``. If the ``decrypt`` param is
not False (the default) any encrypted attributes in the item will be
decrypted before the item is returned. If not, the encrypted
attributes will contain the encrypted value.
"""
response = self._new_response()
if self._check_supported_op('get', response):
if id is None:
response.status = 'error'
response.error_type = 'IDRequired'
response.error_message = 'Get requires an id'
else:
params = {'Key': {id_name: id},
'ConsistentRead': True}
self._call_ddb_method(self.table.get_item,
params, response)
if response.status == 'success':
if 'Item' in response.raw_response:
item = response.raw_response['Item']
if decrypt:
self._decrypt(item)
response.data = self._replace_decimals(item)
else:
response.status = 'error'
response.error_type = 'NotFound'
msg = 'item ({}) not found'.format(id)
response.error_message = msg
response.prepare()
return response
def create(self, item, **kwargs):
"""
Creates a new item. You pass in an item containing initial values.
Any attribute names defined in ``prototype`` that are missing from the
item will be added using the default value defined in ``prototype``.
"""
response = self._new_response()
if self._prototype_handler.check(item, 'create', response):
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def update(self, item, encrypt=True, **kwargs):
"""
Updates the item based on the current values of the dictionary passed
in.
"""
response = self._new_response()
if self._check_supported_op('update', response):
if self._prototype_handler.check(item, 'update', response):
if encrypt:
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def increment_counter(self, id, counter_name, increment=1,
id_name='id', **kwargs):
"""
Atomically increments a counter attribute in the item identified by
``id``. You must specify the name of the attribute as ``counter_name``
and, optionally, the ``increment`` which defaults to ``1``.
"""
response = self._new_response()
if self._check_supported_op('increment_counter', response):
params = {
'Key': {id_name: id},
'UpdateExpression': 'set #ctr = #ctr + :val',
'ExpressionAttributeNames': {"#ctr": counter_name},
'ExpressionAttributeValues': {
':val': decimal.Decimal(increment)},
'ReturnValues': 'UPDATED_NEW'
}
self._call_ddb_method(self.table.update_item, params, response)
if response.status == 'success':
if 'Attributes' in response.raw_response:
self._replace_decimals(response.raw_response)
attr = response.raw_response['Attributes'][counter_name]
response.data = attr
response.prepare()
return response
def delete(self, id, id_name='id', **kwargs):
"""
Deletes the item corresponding to ``id``.
"""
response = self._new_response()
if self._check_supported_op('delete', response):
params = {'Key': {id_name: id}}
self._call_ddb_method(self.table.delete_item, params, response)
response.data = 'true'
response.prepare()
return response
def handler(self, operation=None, **kwargs):
"""
In addition to the methods described above, cruddy also provides a
generic handler interface. This is mainly useful when you want to wrap
a cruddy handler in a Lambda function and then call that Lambda
function to access the CRUD capabilities.
To call the handler, you simply put all necessary parameters into a
Python dictionary and then call the handler with that dict.
```
params = {
'operation': 'create',
'item': {'foo': 'bar', 'fie': 'baz'}
}
response = crud.handler(**params)
```
"""
response = self._new_response()
if operation is None:
response.status = 'error'
response.error_type = 'MissingOperation'
response.error_message = 'You must pass an operation'
return response
operation = operation.lower()
self._check_supported_op(operation, response)
if response.status == 'success':
method = getattr(self, operation, None)
if callable(method):
response = method(**kwargs)
else:
response.status == 'error'
response.error_type = 'NotImplemented'
msg = 'Operation: {} is not implemented'.format(operation)
response.error_message = msg
return response
|
Min-ops/cruddy
|
cruddy/__init__.py
|
CRUD.handler
|
python
|
def handler(self, operation=None, **kwargs):
response = self._new_response()
if operation is None:
response.status = 'error'
response.error_type = 'MissingOperation'
response.error_message = 'You must pass an operation'
return response
operation = operation.lower()
self._check_supported_op(operation, response)
if response.status == 'success':
method = getattr(self, operation, None)
if callable(method):
response = method(**kwargs)
else:
response.status == 'error'
response.error_type = 'NotImplemented'
msg = 'Operation: {} is not implemented'.format(operation)
response.error_message = msg
return response
|
In addition to the methods described above, cruddy also provides a
generic handler interface. This is mainly useful when you want to wrap
a cruddy handler in a Lambda function and then call that Lambda
function to access the CRUD capabilities.
To call the handler, you simply put all necessary parameters into a
Python dictionary and then call the handler with that dict.
```
params = {
'operation': 'create',
'item': {'foo': 'bar', 'fie': 'baz'}
}
response = crud.handler(**params)
```
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/__init__.py#L410-L445
|
[
"def _check_supported_op(self, op_name, response):\n if op_name not in self.supported_ops:\n response.status = 'error'\n response.error_type = 'UnsupportedOperation'\n response.error_message = 'Unsupported operation: {}'.format(\n op_name)\n return False\n return True\n",
"def _new_response(self):\n return CRUDResponse(self._debug)\n"
] |
class CRUD(object):
SupportedOps = ["create", "update", "get", "delete", "bulk_delete",
"list", "search", "increment_counter",
"describe", "ping"]
def __init__(self, **kwargs):
"""
Create a new CRUD handler. The CRUD handler accepts the following
parameters:
* table_name - name of the backing DynamoDB table (required)
* profile_name - name of the AWS credential profile to use when
creating the boto3 Session
* region_name - name of the AWS region to use when creating the
boto3 Session
* prototype - a dictionary of name/value pairs that will be used to
initialize newly created items
* supported_ops - a list of operations supported by the CRUD handler
(choices are list, get, create, update, delete, search,
increment_counter, describe, help, ping)
* encrypted_attributes - a list of tuples where the first item in the
tuple is the name of the attribute that should be encrypted and the
second item in the tuple is the KMS master key ID to use for
encrypting/decrypting the value
* debug - if not False this will cause the raw_response to be left
in the response dictionary
"""
self.table_name = kwargs['table_name']
profile_name = kwargs.get('profile_name')
region_name = kwargs.get('region_name')
placebo = kwargs.get('placebo')
placebo_dir = kwargs.get('placebo_dir')
placebo_mode = kwargs.get('placebo_mode', 'record')
self.prototype = kwargs.get('prototype', dict())
self._prototype_handler = PrototypeHandler(self.prototype)
self.supported_ops = kwargs.get('supported_ops', self.SupportedOps)
self.supported_ops.append('describe')
self.encrypted_attributes = kwargs.get('encrypted_attributes', list())
session = boto3.Session(profile_name=profile_name,
region_name=region_name)
if placebo and placebo_dir:
self.pill = placebo.attach(session, placebo_dir, debug=True)
if placebo_mode == 'record':
self.pill.record()
else:
self.pill.playback()
else:
self.pill = None
ddb_resource = session.resource('dynamodb')
self.table = ddb_resource.Table(self.table_name)
self._indexes = {}
self._analyze_table()
self._debug = kwargs.get('debug', False)
if self.encrypted_attributes:
self._kms_client = session.client('kms')
else:
self._kms_client = None
def _analyze_table(self):
# First check the Key Schema
if len(self.table.key_schema) != 1:
LOG.info('cruddy does not support RANGE keys')
else:
self._indexes[self.table.key_schema[0]['AttributeName']] = None
# Now process any GSI's
if self.table.global_secondary_indexes:
for gsi in self.table.global_secondary_indexes:
# find HASH of GSI, that's all we support for now
# if the GSI has a RANGE, we ignore it for now
if len(gsi['KeySchema']) == 1:
gsi_hash = gsi['KeySchema'][0]['AttributeName']
self._indexes[gsi_hash] = gsi['IndexName']
# Because the Boto3 DynamoDB client turns all numeric types into Decimals
# (which is actually the right thing to do) we need to convert those
# Decimal values back into integers or floats before serializing to JSON.
def _replace_decimals(self, obj):
if isinstance(obj, list):
for i in xrange(len(obj)):
obj[i] = self._replace_decimals(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.iterkeys():
obj[k] = self._replace_decimals(obj[k])
return obj
elif isinstance(obj, decimal.Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def _encrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.encrypt(
KeyId=master_key_id,
Plaintext=item[encrypted_attr])
blob = response['CiphertextBlob']
item[encrypted_attr] = base64.b64encode(blob)
def _decrypt(self, item):
for encrypted_attr, master_key_id in self.encrypted_attributes:
if encrypted_attr in item:
response = self._kms_client.decrypt(
CiphertextBlob=base64.b64decode(item[encrypted_attr]))
item[encrypted_attr] = response['Plaintext']
def _check_supported_op(self, op_name, response):
if op_name not in self.supported_ops:
response.status = 'error'
response.error_type = 'UnsupportedOperation'
response.error_message = 'Unsupported operation: {}'.format(
op_name)
return False
return True
def _call_ddb_method(self, method, kwargs, response):
try:
response.raw_response = method(**kwargs)
except ClientError as e:
LOG.debug(e)
response.status = 'error'
response.error_message = e.response['Error'].get('Message')
response.error_code = e.response['Error'].get('Code')
response.error_type = e.response['Error'].get('Type')
except Exception as e:
response.status = 'error'
response.error_type = e.__class__.__name__
response.error_code = None
response.error_message = str(e)
def _new_response(self):
return CRUDResponse(self._debug)
def ping(self, **kwargs):
"""
A no-op method that simply returns a successful response.
"""
response = self._new_response()
return response
def describe(self, **kwargs):
"""
Returns descriptive information about this cruddy handler and the
methods supported by it.
"""
response = self._new_response()
description = {
'cruddy_version': __version__,
'table_name': self.table_name,
'supported_operations': copy.copy(self.supported_ops),
'prototype': copy.deepcopy(self.prototype),
'operations': {}
}
for name, method in inspect.getmembers(self, inspect.ismethod):
if not name.startswith('_'):
argspec = inspect.getargspec(method)
if argspec.defaults is None:
defaults = None
else:
defaults = list(argspec.defaults)
method_info = {
'docs': inspect.getdoc(method),
'argspec': {
'args': argspec.args,
'varargs': argspec.varargs,
'keywords': argspec.keywords,
'defaults': defaults
}
}
description['operations'][name] = method_info
response.data = description
return response
def search(self, query, **kwargs):
"""
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
"""
response = self._new_response()
if self._check_supported_op('search', response):
if '=' not in query:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Only the = operation is supported'
response.error_message = msg
else:
key, value = query.split('=')
if key not in self._indexes:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Attribute {} is not indexed'.format(key)
response.error_message = msg
else:
params = {'KeyConditionExpression': Key(key).eq(value)}
index_name = self._indexes[key]
if index_name:
params['IndexName'] = index_name
pe = kwargs.get('projection_expression')
if pe:
params['ProjectionExpression'] = pe
self._call_ddb_method(self.table.query,
params, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def list(self, **kwargs):
"""
Returns a list of items in the database. Encrypted attributes are not
decrypted when listing items.
"""
response = self._new_response()
if self._check_supported_op('list', response):
self._call_ddb_method(self.table.scan, {}, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response
def get(self, id, decrypt=False, id_name='id', **kwargs):
"""
Returns the item corresponding to ``id``. If the ``decrypt`` param is
not False (the default) any encrypted attributes in the item will be
decrypted before the item is returned. If not, the encrypted
attributes will contain the encrypted value.
"""
response = self._new_response()
if self._check_supported_op('get', response):
if id is None:
response.status = 'error'
response.error_type = 'IDRequired'
response.error_message = 'Get requires an id'
else:
params = {'Key': {id_name: id},
'ConsistentRead': True}
self._call_ddb_method(self.table.get_item,
params, response)
if response.status == 'success':
if 'Item' in response.raw_response:
item = response.raw_response['Item']
if decrypt:
self._decrypt(item)
response.data = self._replace_decimals(item)
else:
response.status = 'error'
response.error_type = 'NotFound'
msg = 'item ({}) not found'.format(id)
response.error_message = msg
response.prepare()
return response
def create(self, item, **kwargs):
"""
Creates a new item. You pass in an item containing initial values.
Any attribute names defined in ``prototype`` that are missing from the
item will be added using the default value defined in ``prototype``.
"""
response = self._new_response()
if self._prototype_handler.check(item, 'create', response):
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def update(self, item, encrypt=True, **kwargs):
"""
Updates the item based on the current values of the dictionary passed
in.
"""
response = self._new_response()
if self._check_supported_op('update', response):
if self._prototype_handler.check(item, 'update', response):
if encrypt:
self._encrypt(item)
params = {'Item': item}
self._call_ddb_method(self.table.put_item,
params, response)
if response.status == 'success':
response.data = item
response.prepare()
return response
def increment_counter(self, id, counter_name, increment=1,
id_name='id', **kwargs):
"""
Atomically increments a counter attribute in the item identified by
``id``. You must specify the name of the attribute as ``counter_name``
and, optionally, the ``increment`` which defaults to ``1``.
"""
response = self._new_response()
if self._check_supported_op('increment_counter', response):
params = {
'Key': {id_name: id},
'UpdateExpression': 'set #ctr = #ctr + :val',
'ExpressionAttributeNames': {"#ctr": counter_name},
'ExpressionAttributeValues': {
':val': decimal.Decimal(increment)},
'ReturnValues': 'UPDATED_NEW'
}
self._call_ddb_method(self.table.update_item, params, response)
if response.status == 'success':
if 'Attributes' in response.raw_response:
self._replace_decimals(response.raw_response)
attr = response.raw_response['Attributes'][counter_name]
response.data = attr
response.prepare()
return response
def delete(self, id, id_name='id', **kwargs):
"""
Deletes the item corresponding to ``id``.
"""
response = self._new_response()
if self._check_supported_op('delete', response):
params = {'Key': {id_name: id}}
self._call_ddb_method(self.table.delete_item, params, response)
response.data = 'true'
response.prepare()
return response
def bulk_delete(self, query, **kwargs):
"""
Perform a search and delete all items that match.
"""
response = self._new_response()
if self._check_supported_op('search', response):
n = 0
pe = 'id'
response = self.search(query, projection_expression=pe, **kwargs)
while response.status == 'success' and response.data:
for item in response.data:
delete_response = self.delete(item['id'])
if response.status != 'success':
response = delete_response
break
n += 1
response = self.search(
query, projection_expression=pe, **kwargs)
if response.status == 'success':
response.data = {'deleted': n}
return response
|
Min-ops/cruddy
|
cruddy/scripts/cli.py
|
cli
|
python
|
def cli(ctx, profile, region, lambda_fn, config, debug):
ctx.obj = CLIHandler(profile, region, lambda_fn, config, debug)
|
cruddy is a CLI interface to the cruddy handler. It can be used in one
of two ways.
First, you can pass in a ``--config`` option which is a JSON file
containing all of your cruddy parameters and the CLI will create a cruddy
handler to manipulate the DynamoDB table directly.
Alternatively, you can pass in a ``--lambda-fn`` option which is the
name of an AWS Lambda function which contains a cruddy handler. In this
case the CLI will call the Lambda function to make the changes in the
underlying DynamoDB table.
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/scripts/cli.py#L90-L104
| null |
# Copyright (c) 2016 CloudNative, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import click
from cruddy import CRUD
from cruddy.lambdaclient import LambdaClient
class CLIHandler(object):
def __init__(self, profile_name, region_name,
lambda_fn, config_file, debug=False):
self.lambda_fn = lambda_fn
self.lambda_client = None
if lambda_fn:
self.lambda_client = LambdaClient(
profile_name=profile_name, region_name=region_name,
func_name=lambda_fn, debug=debug)
if config_file:
config = json.load(config_file)
self.crud = CRUD(**config)
self.debug = debug
def _handle_response(self, response):
if response.status == 'success':
click.echo(json.dumps(response.data, indent=4))
else:
click.echo(click.style(response.status, fg='red'))
click.echo(click.style(response.error_type, fg='red'))
click.echo(click.style(response.error_message, fg='red'))
def _invoke_lambda(self, payload, raw):
response = self.lambda_client.invoke(payload)
if raw:
return response
self._handle_response(response)
def _invoke_cruddy(self, payload, raw):
response = self.crud.handler(**payload)
if raw:
return response
self._handle_response(response)
def invoke(self, payload, raw=False):
if self.lambda_fn:
return self._invoke_lambda(payload, raw)
elif self.crud:
return self._invoke_cruddy(payload, raw)
else:
msg = 'You must specify either --lambda-fn or --config'
click.echo(click.style(msg, fg='red'))
pass_handler = click.make_pass_decorator(CLIHandler)
@click.group()
@click.option(
'--profile',
default=None,
help='AWS credential profile')
@click.option(
'--region',
default=None,
help='AWS region')
@click.option(
'--lambda-fn',
help='AWS Lambda controller name')
@click.option(
'--config',
help='cruddy config file', type=click.File('rb'))
@click.option(
'--debug/--no-debug',
default=False,
help='Turn on debugging output'
)
@click.version_option('0.11.1')
@click.pass_context
@cli.command()
@pass_handler
def describe(handler):
"""Describe the cruddy handler"""
data = {'operation': 'describe'}
handler.invoke(data)
@cli.command()
@pass_handler
def list(handler):
"""List the items"""
data = {'operation': 'list'}
handler.invoke(data)
@cli.command()
@click.option(
'--decrypt/--no-decrypt',
default=False,
help='Decrypt any encrypted attributes')
@click.argument('item_id', nargs=1)
@pass_handler
def get(handler, item_id, decrypt):
"""Get an item"""
data = {'operation': 'get',
'decrypt': decrypt,
'id': item_id}
handler.invoke(data)
@cli.command()
@click.option('--id-name', default='id', help='Name of id attribute')
@click.argument('item_id', nargs=1)
@pass_handler
def delete(handler, item_id, id_name):
"""Delete an item"""
data = {'operation': 'delete',
'id': item_id,
'id_name': id_name}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def bulk_delete(handler, query):
"""Perform a search and delete all items that match"""
data = {'operation': 'bulk_delete',
'query': query}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def search(handler, query):
"""Perform a search"""
data = {'operation': 'search',
'query': query}
handler.invoke(data)
@cli.command()
@click.option('--increment', default=1, help='increment by this much')
@click.argument('item_id', nargs=1)
@click.argument('counter_name', nargs=1)
@pass_handler
def increment(handler, increment, item_id, counter_name):
"""Increment a counter attribute atomically"""
data = {'operation': 'increment_counter',
'id': item_id,
'counter_name': counter_name,
'increment': increment}
handler.invoke(data)
@cli.command()
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def create(handler, item_document):
"""Create a new item from a JSON document"""
data = {'operation': 'create',
'item': json.load(item_document)}
handler.invoke(data)
@cli.command()
@click.option(
'--encrypt/--no-encrypt',
default=True,
help='Encrypt any encrypted attributes')
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def update(handler, item_document):
"""Update an item from a JSON document"""
data = {'operation': 'update',
'encrypt': encrypt,
'item': json.load(item_document)}
handler.invoke(data)
def _build_signature_line(method_name, argspec):
arg_len = len(argspec['args'])
if argspec['defaults']:
defaults_offset = arg_len - len(argspec['defaults'])
else:
defaults_offset = 0
signature = '**{}**('.format(method_name)
params = []
for i in range(0, arg_len):
param_string = argspec['args'][i]
if argspec['defaults'] is not None:
if i >= defaults_offset:
param_string += '={}'.format(
argspec['defaults'][i - defaults_offset])
params.append(param_string)
signature += ', '.join(params)
signature += ')'
return signature
@cli.command()
@pass_handler
def help(handler):
"""
Returns a Markdown document that describes this handler and
it's operations.
"""
data = {'operation': 'describe'}
response = handler.invoke(data, raw=True)
description = response.data
lines = []
lines.append('# {}'.format(handler.lambda_fn))
lines.append('## Handler Info')
lines.append('**Cruddy version**: {}'.format(
description['cruddy_version']))
lines.append('')
lines.append('**Table name**: {}'.format(description['table_name']))
lines.append('')
lines.append('**Supported operations**:')
lines.append('')
for op in description['supported_operations']:
lines.append('* {}'.format(op))
lines.append('')
lines.append('**Prototype**:')
lines.append('')
lines.append('```')
lines.append(str(description['prototype']))
lines.append('```')
lines.append('')
lines.append('## Operations')
for op_name in description['operations']:
op = description['operations'][op_name]
lines.append('### {}'.format(op_name))
lines.append('')
lines.append(_build_signature_line(
op_name, description['operations'][op_name]['argspec']))
lines.append('')
if op['docs'] is None:
lines.append('')
else:
lines.append(op['docs'])
lines.append('')
click.echo('\n'.join(lines))
if __name__ == '__main__':
list()
|
Min-ops/cruddy
|
cruddy/scripts/cli.py
|
get
|
python
|
def get(handler, item_id, decrypt):
data = {'operation': 'get',
'decrypt': decrypt,
'id': item_id}
handler.invoke(data)
|
Get an item
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/scripts/cli.py#L130-L135
| null |
# Copyright (c) 2016 CloudNative, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import click
from cruddy import CRUD
from cruddy.lambdaclient import LambdaClient
class CLIHandler(object):
def __init__(self, profile_name, region_name,
lambda_fn, config_file, debug=False):
self.lambda_fn = lambda_fn
self.lambda_client = None
if lambda_fn:
self.lambda_client = LambdaClient(
profile_name=profile_name, region_name=region_name,
func_name=lambda_fn, debug=debug)
if config_file:
config = json.load(config_file)
self.crud = CRUD(**config)
self.debug = debug
def _handle_response(self, response):
if response.status == 'success':
click.echo(json.dumps(response.data, indent=4))
else:
click.echo(click.style(response.status, fg='red'))
click.echo(click.style(response.error_type, fg='red'))
click.echo(click.style(response.error_message, fg='red'))
def _invoke_lambda(self, payload, raw):
response = self.lambda_client.invoke(payload)
if raw:
return response
self._handle_response(response)
def _invoke_cruddy(self, payload, raw):
response = self.crud.handler(**payload)
if raw:
return response
self._handle_response(response)
def invoke(self, payload, raw=False):
if self.lambda_fn:
return self._invoke_lambda(payload, raw)
elif self.crud:
return self._invoke_cruddy(payload, raw)
else:
msg = 'You must specify either --lambda-fn or --config'
click.echo(click.style(msg, fg='red'))
pass_handler = click.make_pass_decorator(CLIHandler)
@click.group()
@click.option(
'--profile',
default=None,
help='AWS credential profile')
@click.option(
'--region',
default=None,
help='AWS region')
@click.option(
'--lambda-fn',
help='AWS Lambda controller name')
@click.option(
'--config',
help='cruddy config file', type=click.File('rb'))
@click.option(
'--debug/--no-debug',
default=False,
help='Turn on debugging output'
)
@click.version_option('0.11.1')
@click.pass_context
def cli(ctx, profile, region, lambda_fn, config, debug):
"""
cruddy is a CLI interface to the cruddy handler. It can be used in one
of two ways.
First, you can pass in a ``--config`` option which is a JSON file
containing all of your cruddy parameters and the CLI will create a cruddy
handler to manipulate the DynamoDB table directly.
Alternatively, you can pass in a ``--lambda-fn`` option which is the
name of an AWS Lambda function which contains a cruddy handler. In this
case the CLI will call the Lambda function to make the changes in the
underlying DynamoDB table.
"""
ctx.obj = CLIHandler(profile, region, lambda_fn, config, debug)
@cli.command()
@pass_handler
def describe(handler):
"""Describe the cruddy handler"""
data = {'operation': 'describe'}
handler.invoke(data)
@cli.command()
@pass_handler
def list(handler):
"""List the items"""
data = {'operation': 'list'}
handler.invoke(data)
@cli.command()
@click.option(
'--decrypt/--no-decrypt',
default=False,
help='Decrypt any encrypted attributes')
@click.argument('item_id', nargs=1)
@pass_handler
@cli.command()
@click.option('--id-name', default='id', help='Name of id attribute')
@click.argument('item_id', nargs=1)
@pass_handler
def delete(handler, item_id, id_name):
"""Delete an item"""
data = {'operation': 'delete',
'id': item_id,
'id_name': id_name}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def bulk_delete(handler, query):
"""Perform a search and delete all items that match"""
data = {'operation': 'bulk_delete',
'query': query}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def search(handler, query):
"""Perform a search"""
data = {'operation': 'search',
'query': query}
handler.invoke(data)
@cli.command()
@click.option('--increment', default=1, help='increment by this much')
@click.argument('item_id', nargs=1)
@click.argument('counter_name', nargs=1)
@pass_handler
def increment(handler, increment, item_id, counter_name):
"""Increment a counter attribute atomically"""
data = {'operation': 'increment_counter',
'id': item_id,
'counter_name': counter_name,
'increment': increment}
handler.invoke(data)
@cli.command()
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def create(handler, item_document):
"""Create a new item from a JSON document"""
data = {'operation': 'create',
'item': json.load(item_document)}
handler.invoke(data)
@cli.command()
@click.option(
'--encrypt/--no-encrypt',
default=True,
help='Encrypt any encrypted attributes')
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def update(handler, item_document):
"""Update an item from a JSON document"""
data = {'operation': 'update',
'encrypt': encrypt,
'item': json.load(item_document)}
handler.invoke(data)
def _build_signature_line(method_name, argspec):
arg_len = len(argspec['args'])
if argspec['defaults']:
defaults_offset = arg_len - len(argspec['defaults'])
else:
defaults_offset = 0
signature = '**{}**('.format(method_name)
params = []
for i in range(0, arg_len):
param_string = argspec['args'][i]
if argspec['defaults'] is not None:
if i >= defaults_offset:
param_string += '={}'.format(
argspec['defaults'][i - defaults_offset])
params.append(param_string)
signature += ', '.join(params)
signature += ')'
return signature
@cli.command()
@pass_handler
def help(handler):
"""
Returns a Markdown document that describes this handler and
it's operations.
"""
data = {'operation': 'describe'}
response = handler.invoke(data, raw=True)
description = response.data
lines = []
lines.append('# {}'.format(handler.lambda_fn))
lines.append('## Handler Info')
lines.append('**Cruddy version**: {}'.format(
description['cruddy_version']))
lines.append('')
lines.append('**Table name**: {}'.format(description['table_name']))
lines.append('')
lines.append('**Supported operations**:')
lines.append('')
for op in description['supported_operations']:
lines.append('* {}'.format(op))
lines.append('')
lines.append('**Prototype**:')
lines.append('')
lines.append('```')
lines.append(str(description['prototype']))
lines.append('```')
lines.append('')
lines.append('## Operations')
for op_name in description['operations']:
op = description['operations'][op_name]
lines.append('### {}'.format(op_name))
lines.append('')
lines.append(_build_signature_line(
op_name, description['operations'][op_name]['argspec']))
lines.append('')
if op['docs'] is None:
lines.append('')
else:
lines.append(op['docs'])
lines.append('')
click.echo('\n'.join(lines))
if __name__ == '__main__':
list()
|
Min-ops/cruddy
|
cruddy/scripts/cli.py
|
delete
|
python
|
def delete(handler, item_id, id_name):
data = {'operation': 'delete',
'id': item_id,
'id_name': id_name}
handler.invoke(data)
|
Delete an item
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/scripts/cli.py#L142-L147
| null |
# Copyright (c) 2016 CloudNative, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import click
from cruddy import CRUD
from cruddy.lambdaclient import LambdaClient
class CLIHandler(object):
def __init__(self, profile_name, region_name,
lambda_fn, config_file, debug=False):
self.lambda_fn = lambda_fn
self.lambda_client = None
if lambda_fn:
self.lambda_client = LambdaClient(
profile_name=profile_name, region_name=region_name,
func_name=lambda_fn, debug=debug)
if config_file:
config = json.load(config_file)
self.crud = CRUD(**config)
self.debug = debug
def _handle_response(self, response):
if response.status == 'success':
click.echo(json.dumps(response.data, indent=4))
else:
click.echo(click.style(response.status, fg='red'))
click.echo(click.style(response.error_type, fg='red'))
click.echo(click.style(response.error_message, fg='red'))
def _invoke_lambda(self, payload, raw):
response = self.lambda_client.invoke(payload)
if raw:
return response
self._handle_response(response)
def _invoke_cruddy(self, payload, raw):
response = self.crud.handler(**payload)
if raw:
return response
self._handle_response(response)
def invoke(self, payload, raw=False):
if self.lambda_fn:
return self._invoke_lambda(payload, raw)
elif self.crud:
return self._invoke_cruddy(payload, raw)
else:
msg = 'You must specify either --lambda-fn or --config'
click.echo(click.style(msg, fg='red'))
pass_handler = click.make_pass_decorator(CLIHandler)
@click.group()
@click.option(
'--profile',
default=None,
help='AWS credential profile')
@click.option(
'--region',
default=None,
help='AWS region')
@click.option(
'--lambda-fn',
help='AWS Lambda controller name')
@click.option(
'--config',
help='cruddy config file', type=click.File('rb'))
@click.option(
'--debug/--no-debug',
default=False,
help='Turn on debugging output'
)
@click.version_option('0.11.1')
@click.pass_context
def cli(ctx, profile, region, lambda_fn, config, debug):
"""
cruddy is a CLI interface to the cruddy handler. It can be used in one
of two ways.
First, you can pass in a ``--config`` option which is a JSON file
containing all of your cruddy parameters and the CLI will create a cruddy
handler to manipulate the DynamoDB table directly.
Alternatively, you can pass in a ``--lambda-fn`` option which is the
name of an AWS Lambda function which contains a cruddy handler. In this
case the CLI will call the Lambda function to make the changes in the
underlying DynamoDB table.
"""
ctx.obj = CLIHandler(profile, region, lambda_fn, config, debug)
@cli.command()
@pass_handler
def describe(handler):
"""Describe the cruddy handler"""
data = {'operation': 'describe'}
handler.invoke(data)
@cli.command()
@pass_handler
def list(handler):
"""List the items"""
data = {'operation': 'list'}
handler.invoke(data)
@cli.command()
@click.option(
'--decrypt/--no-decrypt',
default=False,
help='Decrypt any encrypted attributes')
@click.argument('item_id', nargs=1)
@pass_handler
def get(handler, item_id, decrypt):
"""Get an item"""
data = {'operation': 'get',
'decrypt': decrypt,
'id': item_id}
handler.invoke(data)
@cli.command()
@click.option('--id-name', default='id', help='Name of id attribute')
@click.argument('item_id', nargs=1)
@pass_handler
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def bulk_delete(handler, query):
"""Perform a search and delete all items that match"""
data = {'operation': 'bulk_delete',
'query': query}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def search(handler, query):
"""Perform a search"""
data = {'operation': 'search',
'query': query}
handler.invoke(data)
@cli.command()
@click.option('--increment', default=1, help='increment by this much')
@click.argument('item_id', nargs=1)
@click.argument('counter_name', nargs=1)
@pass_handler
def increment(handler, increment, item_id, counter_name):
"""Increment a counter attribute atomically"""
data = {'operation': 'increment_counter',
'id': item_id,
'counter_name': counter_name,
'increment': increment}
handler.invoke(data)
@cli.command()
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def create(handler, item_document):
"""Create a new item from a JSON document"""
data = {'operation': 'create',
'item': json.load(item_document)}
handler.invoke(data)
@cli.command()
@click.option(
'--encrypt/--no-encrypt',
default=True,
help='Encrypt any encrypted attributes')
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def update(handler, item_document):
"""Update an item from a JSON document"""
data = {'operation': 'update',
'encrypt': encrypt,
'item': json.load(item_document)}
handler.invoke(data)
def _build_signature_line(method_name, argspec):
arg_len = len(argspec['args'])
if argspec['defaults']:
defaults_offset = arg_len - len(argspec['defaults'])
else:
defaults_offset = 0
signature = '**{}**('.format(method_name)
params = []
for i in range(0, arg_len):
param_string = argspec['args'][i]
if argspec['defaults'] is not None:
if i >= defaults_offset:
param_string += '={}'.format(
argspec['defaults'][i - defaults_offset])
params.append(param_string)
signature += ', '.join(params)
signature += ')'
return signature
@cli.command()
@pass_handler
def help(handler):
"""
Returns a Markdown document that describes this handler and
it's operations.
"""
data = {'operation': 'describe'}
response = handler.invoke(data, raw=True)
description = response.data
lines = []
lines.append('# {}'.format(handler.lambda_fn))
lines.append('## Handler Info')
lines.append('**Cruddy version**: {}'.format(
description['cruddy_version']))
lines.append('')
lines.append('**Table name**: {}'.format(description['table_name']))
lines.append('')
lines.append('**Supported operations**:')
lines.append('')
for op in description['supported_operations']:
lines.append('* {}'.format(op))
lines.append('')
lines.append('**Prototype**:')
lines.append('')
lines.append('```')
lines.append(str(description['prototype']))
lines.append('```')
lines.append('')
lines.append('## Operations')
for op_name in description['operations']:
op = description['operations'][op_name]
lines.append('### {}'.format(op_name))
lines.append('')
lines.append(_build_signature_line(
op_name, description['operations'][op_name]['argspec']))
lines.append('')
if op['docs'] is None:
lines.append('')
else:
lines.append(op['docs'])
lines.append('')
click.echo('\n'.join(lines))
if __name__ == '__main__':
list()
|
Min-ops/cruddy
|
cruddy/scripts/cli.py
|
increment
|
python
|
def increment(handler, increment, item_id, counter_name):
data = {'operation': 'increment_counter',
'id': item_id,
'counter_name': counter_name,
'increment': increment}
handler.invoke(data)
|
Increment a counter attribute atomically
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/scripts/cli.py#L175-L181
| null |
# Copyright (c) 2016 CloudNative, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import click
from cruddy import CRUD
from cruddy.lambdaclient import LambdaClient
class CLIHandler(object):
def __init__(self, profile_name, region_name,
lambda_fn, config_file, debug=False):
self.lambda_fn = lambda_fn
self.lambda_client = None
if lambda_fn:
self.lambda_client = LambdaClient(
profile_name=profile_name, region_name=region_name,
func_name=lambda_fn, debug=debug)
if config_file:
config = json.load(config_file)
self.crud = CRUD(**config)
self.debug = debug
def _handle_response(self, response):
if response.status == 'success':
click.echo(json.dumps(response.data, indent=4))
else:
click.echo(click.style(response.status, fg='red'))
click.echo(click.style(response.error_type, fg='red'))
click.echo(click.style(response.error_message, fg='red'))
def _invoke_lambda(self, payload, raw):
response = self.lambda_client.invoke(payload)
if raw:
return response
self._handle_response(response)
def _invoke_cruddy(self, payload, raw):
response = self.crud.handler(**payload)
if raw:
return response
self._handle_response(response)
def invoke(self, payload, raw=False):
if self.lambda_fn:
return self._invoke_lambda(payload, raw)
elif self.crud:
return self._invoke_cruddy(payload, raw)
else:
msg = 'You must specify either --lambda-fn or --config'
click.echo(click.style(msg, fg='red'))
pass_handler = click.make_pass_decorator(CLIHandler)
@click.group()
@click.option(
'--profile',
default=None,
help='AWS credential profile')
@click.option(
'--region',
default=None,
help='AWS region')
@click.option(
'--lambda-fn',
help='AWS Lambda controller name')
@click.option(
'--config',
help='cruddy config file', type=click.File('rb'))
@click.option(
'--debug/--no-debug',
default=False,
help='Turn on debugging output'
)
@click.version_option('0.11.1')
@click.pass_context
def cli(ctx, profile, region, lambda_fn, config, debug):
"""
cruddy is a CLI interface to the cruddy handler. It can be used in one
of two ways.
First, you can pass in a ``--config`` option which is a JSON file
containing all of your cruddy parameters and the CLI will create a cruddy
handler to manipulate the DynamoDB table directly.
Alternatively, you can pass in a ``--lambda-fn`` option which is the
name of an AWS Lambda function which contains a cruddy handler. In this
case the CLI will call the Lambda function to make the changes in the
underlying DynamoDB table.
"""
ctx.obj = CLIHandler(profile, region, lambda_fn, config, debug)
@cli.command()
@pass_handler
def describe(handler):
"""Describe the cruddy handler"""
data = {'operation': 'describe'}
handler.invoke(data)
@cli.command()
@pass_handler
def list(handler):
"""List the items"""
data = {'operation': 'list'}
handler.invoke(data)
@cli.command()
@click.option(
'--decrypt/--no-decrypt',
default=False,
help='Decrypt any encrypted attributes')
@click.argument('item_id', nargs=1)
@pass_handler
def get(handler, item_id, decrypt):
"""Get an item"""
data = {'operation': 'get',
'decrypt': decrypt,
'id': item_id}
handler.invoke(data)
@cli.command()
@click.option('--id-name', default='id', help='Name of id attribute')
@click.argument('item_id', nargs=1)
@pass_handler
def delete(handler, item_id, id_name):
"""Delete an item"""
data = {'operation': 'delete',
'id': item_id,
'id_name': id_name}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def bulk_delete(handler, query):
"""Perform a search and delete all items that match"""
data = {'operation': 'bulk_delete',
'query': query}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def search(handler, query):
"""Perform a search"""
data = {'operation': 'search',
'query': query}
handler.invoke(data)
@cli.command()
@click.option('--increment', default=1, help='increment by this much')
@click.argument('item_id', nargs=1)
@click.argument('counter_name', nargs=1)
@pass_handler
@cli.command()
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def create(handler, item_document):
"""Create a new item from a JSON document"""
data = {'operation': 'create',
'item': json.load(item_document)}
handler.invoke(data)
@cli.command()
@click.option(
'--encrypt/--no-encrypt',
default=True,
help='Encrypt any encrypted attributes')
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def update(handler, item_document):
"""Update an item from a JSON document"""
data = {'operation': 'update',
'encrypt': encrypt,
'item': json.load(item_document)}
handler.invoke(data)
def _build_signature_line(method_name, argspec):
arg_len = len(argspec['args'])
if argspec['defaults']:
defaults_offset = arg_len - len(argspec['defaults'])
else:
defaults_offset = 0
signature = '**{}**('.format(method_name)
params = []
for i in range(0, arg_len):
param_string = argspec['args'][i]
if argspec['defaults'] is not None:
if i >= defaults_offset:
param_string += '={}'.format(
argspec['defaults'][i - defaults_offset])
params.append(param_string)
signature += ', '.join(params)
signature += ')'
return signature
@cli.command()
@pass_handler
def help(handler):
"""
Returns a Markdown document that describes this handler and
it's operations.
"""
data = {'operation': 'describe'}
response = handler.invoke(data, raw=True)
description = response.data
lines = []
lines.append('# {}'.format(handler.lambda_fn))
lines.append('## Handler Info')
lines.append('**Cruddy version**: {}'.format(
description['cruddy_version']))
lines.append('')
lines.append('**Table name**: {}'.format(description['table_name']))
lines.append('')
lines.append('**Supported operations**:')
lines.append('')
for op in description['supported_operations']:
lines.append('* {}'.format(op))
lines.append('')
lines.append('**Prototype**:')
lines.append('')
lines.append('```')
lines.append(str(description['prototype']))
lines.append('```')
lines.append('')
lines.append('## Operations')
for op_name in description['operations']:
op = description['operations'][op_name]
lines.append('### {}'.format(op_name))
lines.append('')
lines.append(_build_signature_line(
op_name, description['operations'][op_name]['argspec']))
lines.append('')
if op['docs'] is None:
lines.append('')
else:
lines.append(op['docs'])
lines.append('')
click.echo('\n'.join(lines))
if __name__ == '__main__':
list()
|
Min-ops/cruddy
|
cruddy/scripts/cli.py
|
create
|
python
|
def create(handler, item_document):
data = {'operation': 'create',
'item': json.load(item_document)}
handler.invoke(data)
|
Create a new item from a JSON document
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/scripts/cli.py#L187-L191
| null |
# Copyright (c) 2016 CloudNative, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import click
from cruddy import CRUD
from cruddy.lambdaclient import LambdaClient
class CLIHandler(object):
def __init__(self, profile_name, region_name,
lambda_fn, config_file, debug=False):
self.lambda_fn = lambda_fn
self.lambda_client = None
if lambda_fn:
self.lambda_client = LambdaClient(
profile_name=profile_name, region_name=region_name,
func_name=lambda_fn, debug=debug)
if config_file:
config = json.load(config_file)
self.crud = CRUD(**config)
self.debug = debug
def _handle_response(self, response):
if response.status == 'success':
click.echo(json.dumps(response.data, indent=4))
else:
click.echo(click.style(response.status, fg='red'))
click.echo(click.style(response.error_type, fg='red'))
click.echo(click.style(response.error_message, fg='red'))
def _invoke_lambda(self, payload, raw):
response = self.lambda_client.invoke(payload)
if raw:
return response
self._handle_response(response)
def _invoke_cruddy(self, payload, raw):
response = self.crud.handler(**payload)
if raw:
return response
self._handle_response(response)
def invoke(self, payload, raw=False):
if self.lambda_fn:
return self._invoke_lambda(payload, raw)
elif self.crud:
return self._invoke_cruddy(payload, raw)
else:
msg = 'You must specify either --lambda-fn or --config'
click.echo(click.style(msg, fg='red'))
pass_handler = click.make_pass_decorator(CLIHandler)
@click.group()
@click.option(
'--profile',
default=None,
help='AWS credential profile')
@click.option(
'--region',
default=None,
help='AWS region')
@click.option(
'--lambda-fn',
help='AWS Lambda controller name')
@click.option(
'--config',
help='cruddy config file', type=click.File('rb'))
@click.option(
'--debug/--no-debug',
default=False,
help='Turn on debugging output'
)
@click.version_option('0.11.1')
@click.pass_context
def cli(ctx, profile, region, lambda_fn, config, debug):
"""
cruddy is a CLI interface to the cruddy handler. It can be used in one
of two ways.
First, you can pass in a ``--config`` option which is a JSON file
containing all of your cruddy parameters and the CLI will create a cruddy
handler to manipulate the DynamoDB table directly.
Alternatively, you can pass in a ``--lambda-fn`` option which is the
name of an AWS Lambda function which contains a cruddy handler. In this
case the CLI will call the Lambda function to make the changes in the
underlying DynamoDB table.
"""
ctx.obj = CLIHandler(profile, region, lambda_fn, config, debug)
@cli.command()
@pass_handler
def describe(handler):
"""Describe the cruddy handler"""
data = {'operation': 'describe'}
handler.invoke(data)
@cli.command()
@pass_handler
def list(handler):
"""List the items"""
data = {'operation': 'list'}
handler.invoke(data)
@cli.command()
@click.option(
'--decrypt/--no-decrypt',
default=False,
help='Decrypt any encrypted attributes')
@click.argument('item_id', nargs=1)
@pass_handler
def get(handler, item_id, decrypt):
"""Get an item"""
data = {'operation': 'get',
'decrypt': decrypt,
'id': item_id}
handler.invoke(data)
@cli.command()
@click.option('--id-name', default='id', help='Name of id attribute')
@click.argument('item_id', nargs=1)
@pass_handler
def delete(handler, item_id, id_name):
"""Delete an item"""
data = {'operation': 'delete',
'id': item_id,
'id_name': id_name}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def bulk_delete(handler, query):
"""Perform a search and delete all items that match"""
data = {'operation': 'bulk_delete',
'query': query}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def search(handler, query):
"""Perform a search"""
data = {'operation': 'search',
'query': query}
handler.invoke(data)
@cli.command()
@click.option('--increment', default=1, help='increment by this much')
@click.argument('item_id', nargs=1)
@click.argument('counter_name', nargs=1)
@pass_handler
def increment(handler, increment, item_id, counter_name):
"""Increment a counter attribute atomically"""
data = {'operation': 'increment_counter',
'id': item_id,
'counter_name': counter_name,
'increment': increment}
handler.invoke(data)
@cli.command()
@click.argument('item_document', type=click.File('rb'))
@pass_handler
@cli.command()
@click.option(
'--encrypt/--no-encrypt',
default=True,
help='Encrypt any encrypted attributes')
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def update(handler, item_document):
"""Update an item from a JSON document"""
data = {'operation': 'update',
'encrypt': encrypt,
'item': json.load(item_document)}
handler.invoke(data)
def _build_signature_line(method_name, argspec):
arg_len = len(argspec['args'])
if argspec['defaults']:
defaults_offset = arg_len - len(argspec['defaults'])
else:
defaults_offset = 0
signature = '**{}**('.format(method_name)
params = []
for i in range(0, arg_len):
param_string = argspec['args'][i]
if argspec['defaults'] is not None:
if i >= defaults_offset:
param_string += '={}'.format(
argspec['defaults'][i - defaults_offset])
params.append(param_string)
signature += ', '.join(params)
signature += ')'
return signature
@cli.command()
@pass_handler
def help(handler):
"""
Returns a Markdown document that describes this handler and
it's operations.
"""
data = {'operation': 'describe'}
response = handler.invoke(data, raw=True)
description = response.data
lines = []
lines.append('# {}'.format(handler.lambda_fn))
lines.append('## Handler Info')
lines.append('**Cruddy version**: {}'.format(
description['cruddy_version']))
lines.append('')
lines.append('**Table name**: {}'.format(description['table_name']))
lines.append('')
lines.append('**Supported operations**:')
lines.append('')
for op in description['supported_operations']:
lines.append('* {}'.format(op))
lines.append('')
lines.append('**Prototype**:')
lines.append('')
lines.append('```')
lines.append(str(description['prototype']))
lines.append('```')
lines.append('')
lines.append('## Operations')
for op_name in description['operations']:
op = description['operations'][op_name]
lines.append('### {}'.format(op_name))
lines.append('')
lines.append(_build_signature_line(
op_name, description['operations'][op_name]['argspec']))
lines.append('')
if op['docs'] is None:
lines.append('')
else:
lines.append(op['docs'])
lines.append('')
click.echo('\n'.join(lines))
if __name__ == '__main__':
list()
|
Min-ops/cruddy
|
cruddy/scripts/cli.py
|
update
|
python
|
def update(handler, item_document):
data = {'operation': 'update',
'encrypt': encrypt,
'item': json.load(item_document)}
handler.invoke(data)
|
Update an item from a JSON document
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/scripts/cli.py#L201-L206
| null |
# Copyright (c) 2016 CloudNative, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import click
from cruddy import CRUD
from cruddy.lambdaclient import LambdaClient
class CLIHandler(object):
def __init__(self, profile_name, region_name,
lambda_fn, config_file, debug=False):
self.lambda_fn = lambda_fn
self.lambda_client = None
if lambda_fn:
self.lambda_client = LambdaClient(
profile_name=profile_name, region_name=region_name,
func_name=lambda_fn, debug=debug)
if config_file:
config = json.load(config_file)
self.crud = CRUD(**config)
self.debug = debug
def _handle_response(self, response):
if response.status == 'success':
click.echo(json.dumps(response.data, indent=4))
else:
click.echo(click.style(response.status, fg='red'))
click.echo(click.style(response.error_type, fg='red'))
click.echo(click.style(response.error_message, fg='red'))
def _invoke_lambda(self, payload, raw):
response = self.lambda_client.invoke(payload)
if raw:
return response
self._handle_response(response)
def _invoke_cruddy(self, payload, raw):
response = self.crud.handler(**payload)
if raw:
return response
self._handle_response(response)
def invoke(self, payload, raw=False):
if self.lambda_fn:
return self._invoke_lambda(payload, raw)
elif self.crud:
return self._invoke_cruddy(payload, raw)
else:
msg = 'You must specify either --lambda-fn or --config'
click.echo(click.style(msg, fg='red'))
pass_handler = click.make_pass_decorator(CLIHandler)
@click.group()
@click.option(
'--profile',
default=None,
help='AWS credential profile')
@click.option(
'--region',
default=None,
help='AWS region')
@click.option(
'--lambda-fn',
help='AWS Lambda controller name')
@click.option(
'--config',
help='cruddy config file', type=click.File('rb'))
@click.option(
'--debug/--no-debug',
default=False,
help='Turn on debugging output'
)
@click.version_option('0.11.1')
@click.pass_context
def cli(ctx, profile, region, lambda_fn, config, debug):
"""
cruddy is a CLI interface to the cruddy handler. It can be used in one
of two ways.
First, you can pass in a ``--config`` option which is a JSON file
containing all of your cruddy parameters and the CLI will create a cruddy
handler to manipulate the DynamoDB table directly.
Alternatively, you can pass in a ``--lambda-fn`` option which is the
name of an AWS Lambda function which contains a cruddy handler. In this
case the CLI will call the Lambda function to make the changes in the
underlying DynamoDB table.
"""
ctx.obj = CLIHandler(profile, region, lambda_fn, config, debug)
@cli.command()
@pass_handler
def describe(handler):
"""Describe the cruddy handler"""
data = {'operation': 'describe'}
handler.invoke(data)
@cli.command()
@pass_handler
def list(handler):
"""List the items"""
data = {'operation': 'list'}
handler.invoke(data)
@cli.command()
@click.option(
'--decrypt/--no-decrypt',
default=False,
help='Decrypt any encrypted attributes')
@click.argument('item_id', nargs=1)
@pass_handler
def get(handler, item_id, decrypt):
"""Get an item"""
data = {'operation': 'get',
'decrypt': decrypt,
'id': item_id}
handler.invoke(data)
@cli.command()
@click.option('--id-name', default='id', help='Name of id attribute')
@click.argument('item_id', nargs=1)
@pass_handler
def delete(handler, item_id, id_name):
"""Delete an item"""
data = {'operation': 'delete',
'id': item_id,
'id_name': id_name}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def bulk_delete(handler, query):
"""Perform a search and delete all items that match"""
data = {'operation': 'bulk_delete',
'query': query}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def search(handler, query):
"""Perform a search"""
data = {'operation': 'search',
'query': query}
handler.invoke(data)
@cli.command()
@click.option('--increment', default=1, help='increment by this much')
@click.argument('item_id', nargs=1)
@click.argument('counter_name', nargs=1)
@pass_handler
def increment(handler, increment, item_id, counter_name):
"""Increment a counter attribute atomically"""
data = {'operation': 'increment_counter',
'id': item_id,
'counter_name': counter_name,
'increment': increment}
handler.invoke(data)
@cli.command()
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def create(handler, item_document):
"""Create a new item from a JSON document"""
data = {'operation': 'create',
'item': json.load(item_document)}
handler.invoke(data)
@cli.command()
@click.option(
'--encrypt/--no-encrypt',
default=True,
help='Encrypt any encrypted attributes')
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def _build_signature_line(method_name, argspec):
arg_len = len(argspec['args'])
if argspec['defaults']:
defaults_offset = arg_len - len(argspec['defaults'])
else:
defaults_offset = 0
signature = '**{}**('.format(method_name)
params = []
for i in range(0, arg_len):
param_string = argspec['args'][i]
if argspec['defaults'] is not None:
if i >= defaults_offset:
param_string += '={}'.format(
argspec['defaults'][i - defaults_offset])
params.append(param_string)
signature += ', '.join(params)
signature += ')'
return signature
@cli.command()
@pass_handler
def help(handler):
"""
Returns a Markdown document that describes this handler and
it's operations.
"""
data = {'operation': 'describe'}
response = handler.invoke(data, raw=True)
description = response.data
lines = []
lines.append('# {}'.format(handler.lambda_fn))
lines.append('## Handler Info')
lines.append('**Cruddy version**: {}'.format(
description['cruddy_version']))
lines.append('')
lines.append('**Table name**: {}'.format(description['table_name']))
lines.append('')
lines.append('**Supported operations**:')
lines.append('')
for op in description['supported_operations']:
lines.append('* {}'.format(op))
lines.append('')
lines.append('**Prototype**:')
lines.append('')
lines.append('```')
lines.append(str(description['prototype']))
lines.append('```')
lines.append('')
lines.append('## Operations')
for op_name in description['operations']:
op = description['operations'][op_name]
lines.append('### {}'.format(op_name))
lines.append('')
lines.append(_build_signature_line(
op_name, description['operations'][op_name]['argspec']))
lines.append('')
if op['docs'] is None:
lines.append('')
else:
lines.append(op['docs'])
lines.append('')
click.echo('\n'.join(lines))
if __name__ == '__main__':
list()
|
Min-ops/cruddy
|
cruddy/scripts/cli.py
|
help
|
python
|
def help(handler):
data = {'operation': 'describe'}
response = handler.invoke(data, raw=True)
description = response.data
lines = []
lines.append('# {}'.format(handler.lambda_fn))
lines.append('## Handler Info')
lines.append('**Cruddy version**: {}'.format(
description['cruddy_version']))
lines.append('')
lines.append('**Table name**: {}'.format(description['table_name']))
lines.append('')
lines.append('**Supported operations**:')
lines.append('')
for op in description['supported_operations']:
lines.append('* {}'.format(op))
lines.append('')
lines.append('**Prototype**:')
lines.append('')
lines.append('```')
lines.append(str(description['prototype']))
lines.append('```')
lines.append('')
lines.append('## Operations')
for op_name in description['operations']:
op = description['operations'][op_name]
lines.append('### {}'.format(op_name))
lines.append('')
lines.append(_build_signature_line(
op_name, description['operations'][op_name]['argspec']))
lines.append('')
if op['docs'] is None:
lines.append('')
else:
lines.append(op['docs'])
lines.append('')
click.echo('\n'.join(lines))
|
Returns a Markdown document that describes this handler and
it's operations.
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/scripts/cli.py#L231-L271
|
[
"def _build_signature_line(method_name, argspec):\n arg_len = len(argspec['args'])\n if argspec['defaults']:\n defaults_offset = arg_len - len(argspec['defaults'])\n else:\n defaults_offset = 0\n signature = '**{}**('.format(method_name)\n params = []\n for i in range(0, arg_len):\n param_string = argspec['args'][i]\n if argspec['defaults'] is not None:\n if i >= defaults_offset:\n param_string += '={}'.format(\n argspec['defaults'][i - defaults_offset])\n params.append(param_string)\n signature += ', '.join(params)\n signature += ')'\n return signature\n"
] |
# Copyright (c) 2016 CloudNative, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import click
from cruddy import CRUD
from cruddy.lambdaclient import LambdaClient
class CLIHandler(object):
def __init__(self, profile_name, region_name,
lambda_fn, config_file, debug=False):
self.lambda_fn = lambda_fn
self.lambda_client = None
if lambda_fn:
self.lambda_client = LambdaClient(
profile_name=profile_name, region_name=region_name,
func_name=lambda_fn, debug=debug)
if config_file:
config = json.load(config_file)
self.crud = CRUD(**config)
self.debug = debug
def _handle_response(self, response):
if response.status == 'success':
click.echo(json.dumps(response.data, indent=4))
else:
click.echo(click.style(response.status, fg='red'))
click.echo(click.style(response.error_type, fg='red'))
click.echo(click.style(response.error_message, fg='red'))
def _invoke_lambda(self, payload, raw):
response = self.lambda_client.invoke(payload)
if raw:
return response
self._handle_response(response)
def _invoke_cruddy(self, payload, raw):
response = self.crud.handler(**payload)
if raw:
return response
self._handle_response(response)
def invoke(self, payload, raw=False):
if self.lambda_fn:
return self._invoke_lambda(payload, raw)
elif self.crud:
return self._invoke_cruddy(payload, raw)
else:
msg = 'You must specify either --lambda-fn or --config'
click.echo(click.style(msg, fg='red'))
pass_handler = click.make_pass_decorator(CLIHandler)
@click.group()
@click.option(
'--profile',
default=None,
help='AWS credential profile')
@click.option(
'--region',
default=None,
help='AWS region')
@click.option(
'--lambda-fn',
help='AWS Lambda controller name')
@click.option(
'--config',
help='cruddy config file', type=click.File('rb'))
@click.option(
'--debug/--no-debug',
default=False,
help='Turn on debugging output'
)
@click.version_option('0.11.1')
@click.pass_context
def cli(ctx, profile, region, lambda_fn, config, debug):
"""
cruddy is a CLI interface to the cruddy handler. It can be used in one
of two ways.
First, you can pass in a ``--config`` option which is a JSON file
containing all of your cruddy parameters and the CLI will create a cruddy
handler to manipulate the DynamoDB table directly.
Alternatively, you can pass in a ``--lambda-fn`` option which is the
name of an AWS Lambda function which contains a cruddy handler. In this
case the CLI will call the Lambda function to make the changes in the
underlying DynamoDB table.
"""
ctx.obj = CLIHandler(profile, region, lambda_fn, config, debug)
@cli.command()
@pass_handler
def describe(handler):
"""Describe the cruddy handler"""
data = {'operation': 'describe'}
handler.invoke(data)
@cli.command()
@pass_handler
def list(handler):
"""List the items"""
data = {'operation': 'list'}
handler.invoke(data)
@cli.command()
@click.option(
'--decrypt/--no-decrypt',
default=False,
help='Decrypt any encrypted attributes')
@click.argument('item_id', nargs=1)
@pass_handler
def get(handler, item_id, decrypt):
"""Get an item"""
data = {'operation': 'get',
'decrypt': decrypt,
'id': item_id}
handler.invoke(data)
@cli.command()
@click.option('--id-name', default='id', help='Name of id attribute')
@click.argument('item_id', nargs=1)
@pass_handler
def delete(handler, item_id, id_name):
"""Delete an item"""
data = {'operation': 'delete',
'id': item_id,
'id_name': id_name}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def bulk_delete(handler, query):
"""Perform a search and delete all items that match"""
data = {'operation': 'bulk_delete',
'query': query}
handler.invoke(data)
@cli.command()
@click.argument('query', nargs=1)
@pass_handler
def search(handler, query):
"""Perform a search"""
data = {'operation': 'search',
'query': query}
handler.invoke(data)
@cli.command()
@click.option('--increment', default=1, help='increment by this much')
@click.argument('item_id', nargs=1)
@click.argument('counter_name', nargs=1)
@pass_handler
def increment(handler, increment, item_id, counter_name):
"""Increment a counter attribute atomically"""
data = {'operation': 'increment_counter',
'id': item_id,
'counter_name': counter_name,
'increment': increment}
handler.invoke(data)
@cli.command()
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def create(handler, item_document):
"""Create a new item from a JSON document"""
data = {'operation': 'create',
'item': json.load(item_document)}
handler.invoke(data)
@cli.command()
@click.option(
'--encrypt/--no-encrypt',
default=True,
help='Encrypt any encrypted attributes')
@click.argument('item_document', type=click.File('rb'))
@pass_handler
def update(handler, item_document):
"""Update an item from a JSON document"""
data = {'operation': 'update',
'encrypt': encrypt,
'item': json.load(item_document)}
handler.invoke(data)
def _build_signature_line(method_name, argspec):
arg_len = len(argspec['args'])
if argspec['defaults']:
defaults_offset = arg_len - len(argspec['defaults'])
else:
defaults_offset = 0
signature = '**{}**('.format(method_name)
params = []
for i in range(0, arg_len):
param_string = argspec['args'][i]
if argspec['defaults'] is not None:
if i >= defaults_offset:
param_string += '={}'.format(
argspec['defaults'][i - defaults_offset])
params.append(param_string)
signature += ', '.join(params)
signature += ')'
return signature
@cli.command()
@pass_handler
if __name__ == '__main__':
list()
|
Min-ops/cruddy
|
cruddy/lambdaclient.py
|
LambdaClient.call_operation
|
python
|
def call_operation(self, operation, **kwargs):
data = {'operation': operation}
data.update(kwargs)
return self.invoke(data)
|
A generic method to call any operation supported by the Lambda handler
|
train
|
https://github.com/Min-ops/cruddy/blob/b9ba3dda1757e1075bc1c62a6f43473eea27de41/cruddy/lambdaclient.py#L62-L68
|
[
"def invoke(self, payload):\n try:\n response = self._lambda_client.invoke(\n FunctionName=self.func_name,\n InvocationType='RequestResponse',\n Payload=json.dumps(payload)\n )\n LOG.debug('response: %s', response)\n if response.get('StatusCode') == 200:\n payload = response['Payload'].read()\n LOG.debug('response.payload: %s', payload)\n try:\n response = json.loads(payload)\n except ValueError:\n # Probably a plain text response, or an error...\n response = payload\n print(response)\n return CRUDResponse(response_data=response)\n else:\n LOG.error('Call to lambda function %s failed', self.func_name)\n LOG.error(response.get('FunctionError'))\n LOG.error(response.get('ResponseMetadata'))\n return False\n except botocore.exceptions.ClientError:\n LOG.exception('Could not call Lambda function %s', self.func_name)\n raise\n"
] |
class LambdaClient(object):
def __init__(self, func_name, profile_name=None,
region_name=None, **kwargs):
self.func_name = func_name
self._lambda_client = None
session = boto3.Session(
profile_name=profile_name, region_name=region_name)
self._lambda_client = session.client('lambda')
def invoke(self, payload):
try:
response = self._lambda_client.invoke(
FunctionName=self.func_name,
InvocationType='RequestResponse',
Payload=json.dumps(payload)
)
LOG.debug('response: %s', response)
if response.get('StatusCode') == 200:
payload = response['Payload'].read()
LOG.debug('response.payload: %s', payload)
try:
response = json.loads(payload)
except ValueError:
# Probably a plain text response, or an error...
response = payload
print(response)
return CRUDResponse(response_data=response)
else:
LOG.error('Call to lambda function %s failed', self.func_name)
LOG.error(response.get('FunctionError'))
LOG.error(response.get('ResponseMetadata'))
return False
except botocore.exceptions.ClientError:
LOG.exception('Could not call Lambda function %s', self.func_name)
raise
def ping(self, **kwargs):
data = {'operation': 'describe'}
data.update(kwargs)
return self.invoke(data)
def describe(self, **kwargs):
data = {'operation': 'describe'}
data.update(kwargs)
return self.invoke(data)
def help(self, **kwargs):
data = {'operation': 'help'}
data.update(kwargs)
return self.invoke(data)
def list(self, **kwargs):
data = {'operation': 'list'}
data.update(kwargs)
return self.invoke(data)
def get(self, item_id, **kwargs):
data = {'operation': 'get',
'id': item_id}
data.update(kwargs)
return self.invoke(data)
def create(self, item, **kwargs):
data = {'operation': 'create',
'item': item}
data.update(kwargs)
return self.invoke(data)
def update(self, item, **kwargs):
data = {'operation': 'update',
'item': item}
encrypt = kwargs.get('encrypt', True)
data['encrypt'] = encrypt
data.update(kwargs)
return self.invoke(data)
def delete(self, item_id, **kwargs):
data = {'operation': 'delete',
'id': item_id}
data.update(kwargs)
return self.invoke(data)
def bulk_delete(self, query, **kwargs):
data = {'operation': 'bulk_delete',
'query': query}
data.update(kwargs)
return self.invoke(data)
def search(self, query, **kwargs):
data = {'operation': 'search',
'query': query}
data.update(kwargs)
return self.invoke(data)
def increment(self, item_id, counter_name, **kwargs):
id_name = kwargs.get('id_name', 'id')
increment = kwargs.get('increment', 1)
data = {'operation': 'increment_counter',
id_name: item_id,
'counter_name': counter_name,
'increment': increment}
data.update(kwargs)
return self.invoke(data)
|
aroberge/experimental
|
experimental/core/console.py
|
start_console
|
python
|
def start_console(local_vars={}):
'''Starts a console; modified from code.interact'''
transforms.CONSOLE_ACTIVE = True
transforms.remove_not_allowed_in_console()
sys.ps1 = prompt
console = ExperimentalInteractiveConsole(locals=local_vars)
console.interact(banner=banner)
|
Starts a console; modified from code.interact
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/core/console.py#L66-L72
|
[
"def remove_not_allowed_in_console():\n '''This function should be called from the console, when it starts.\n\n Some transformers are not allowed in the console and they could have\n been loaded prior to the console being activated. We effectively remove them\n and print an information message specific to that transformer\n as written in the transformer module.\n\n '''\n not_allowed_in_console = []\n if CONSOLE_ACTIVE:\n for name in transformers:\n tr_module = import_transformer(name)\n if hasattr(tr_module, \"NO_CONSOLE\"):\n not_allowed_in_console.append((name, tr_module))\n for name, tr_module in not_allowed_in_console:\n print(tr_module.NO_CONSOLE)\n # Note: we do not remove them, so as to avoid seeing the\n # information message displayed again if an attempt is\n # made to re-import them from a console instruction.\n transformers[name] = NullTransformer()\n"
] |
#pylint: disable=W0102, C0103
import code
import platform
import os
import sys
from . import transforms
from .. import version
# define banner and prompt here so that they can be imported in tests
banner = "experimental console version {}. [Python version: {}]\n".format(
version.__version__, platform.python_version())
prompt = "~~> "
class ExperimentalInteractiveConsole(code.InteractiveConsole):
'''A Python console that emulates the normal Python interpreter
except that it support experimental code transformations.'''
def push(self, line):
"""Transform and push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
if transforms.FROM_EXPERIMENTAL.match(line):
transforms.add_transformers(line)
self.buffer.append("\n")
else:
self.buffer.append(line)
add_pass = False
if line.rstrip(' ').endswith(":"):
add_pass = True
source = "\n".join(self.buffer)
if add_pass:
source += "pass"
source = transforms.transform(source)
if add_pass:
source = source.rstrip(' ')
if source.endswith("pass"):
source = source[:-4]
# some transformations may strip an empty line meant to end a block
if not self.buffer[-1]:
source += "\n"
try:
more = self.runsource(source, self.filename)
except SystemExit:
os._exit(1)
if not more:
self.resetbuffer()
return more
|
aroberge/experimental
|
experimental/core/console.py
|
ExperimentalInteractiveConsole.push
|
python
|
def push(self, line):
if transforms.FROM_EXPERIMENTAL.match(line):
transforms.add_transformers(line)
self.buffer.append("\n")
else:
self.buffer.append(line)
add_pass = False
if line.rstrip(' ').endswith(":"):
add_pass = True
source = "\n".join(self.buffer)
if add_pass:
source += "pass"
source = transforms.transform(source)
if add_pass:
source = source.rstrip(' ')
if source.endswith("pass"):
source = source[:-4]
# some transformations may strip an empty line meant to end a block
if not self.buffer[-1]:
source += "\n"
try:
more = self.runsource(source, self.filename)
except SystemExit:
os._exit(1)
if not more:
self.resetbuffer()
return more
|
Transform and push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/core/console.py#L21-L63
| null |
class ExperimentalInteractiveConsole(code.InteractiveConsole):
'''A Python console that emulates the normal Python interpreter
except that it support experimental code transformations.'''
|
aroberge/experimental
|
experimental/core/transforms.py
|
add_transformers
|
python
|
def add_transformers(line):
'''Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict
'''
assert FROM_EXPERIMENTAL.match(line)
line = FROM_EXPERIMENTAL.sub(' ', line)
# we now have: " transformer1 [,...]"
line = line.split("#")[0] # remove any end of line comments
# and insert each transformer as an item in a list
for trans in line.replace(' ', '').split(','):
import_transformer(trans)
|
Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/core/transforms.py#L19-L31
|
[
"def import_transformer(name):\n '''If needed, import a transformer, and adds it to the globally known dict\n The code inside a module where a transformer is defined should be\n standard Python code, which does not need any transformation.\n So, we disable the import hook, and let the normal module import\n do its job - which is faster and likely more reliable than our\n custom method.\n '''\n if name in transformers:\n return transformers[name]\n\n # We are adding a transformer built from normal/standard Python code.\n # As we are not performing transformations, we temporarily disable\n # our import hook, both to avoid potential problems AND because we\n # found that this resulted in much faster code.\n hook = sys.meta_path[0]\n sys.meta_path = sys.meta_path[1:]\n try:\n transformers[name] = __import__(name)\n # Some transformers are not allowed in the console.\n # If an attempt is made to activate one of them in the console,\n # we replace it by a transformer that does nothing and print a\n # message specific to that transformer as written in its module.\n if CONSOLE_ACTIVE:\n if hasattr(transformers[name], \"NO_CONSOLE\"):\n print(transformers[name].NO_CONSOLE)\n transformers[name] = NullTransformer()\n except ImportError:\n sys.stderr.write(\"Warning: Import Error in add_transformers: %s not found\\n\" % name)\n transformers[name] = NullTransformer()\n except Exception as e:\n sys.stderr.write(\"Unexpected exception in transforms.import_transformer%s\\n \" %\n e.__class__.__name__)\n finally:\n sys.meta_path.insert(0, hook) # restore import hook\n\n return transformers[name]\n"
] |
#pylint: disable=W1401, C0103, W0703
'''This module takes care of identifying, importing and adding source
code transformers. It also contains a function, `transform`, which
takes care of invoking all known transformers to convert a source code.
'''
import re
import sys
FROM_EXPERIMENTAL = re.compile("(^from\s+__experimental__\s+import\s+)")
CONSOLE_ACTIVE = False # changed by console.start_console()
class NullTransformer:
'''NullTransformer is a convenience class which can generate instances
to be used when a given transformer cannot be imported.'''
def transform_source(self, source): #pylint: disable=I0011, R0201, C0111
return source
transformers = {}
def import_transformer(name):
'''If needed, import a transformer, and adds it to the globally known dict
The code inside a module where a transformer is defined should be
standard Python code, which does not need any transformation.
So, we disable the import hook, and let the normal module import
do its job - which is faster and likely more reliable than our
custom method.
'''
if name in transformers:
return transformers[name]
# We are adding a transformer built from normal/standard Python code.
# As we are not performing transformations, we temporarily disable
# our import hook, both to avoid potential problems AND because we
# found that this resulted in much faster code.
hook = sys.meta_path[0]
sys.meta_path = sys.meta_path[1:]
try:
transformers[name] = __import__(name)
# Some transformers are not allowed in the console.
# If an attempt is made to activate one of them in the console,
# we replace it by a transformer that does nothing and print a
# message specific to that transformer as written in its module.
if CONSOLE_ACTIVE:
if hasattr(transformers[name], "NO_CONSOLE"):
print(transformers[name].NO_CONSOLE)
transformers[name] = NullTransformer()
except ImportError:
sys.stderr.write("Warning: Import Error in add_transformers: %s not found\n" % name)
transformers[name] = NullTransformer()
except Exception as e:
sys.stderr.write("Unexpected exception in transforms.import_transformer%s\n " %
e.__class__.__name__)
finally:
sys.meta_path.insert(0, hook) # restore import hook
return transformers[name]
def extract_transformers_from_source(source):
'''Scan a source for lines of the form
from __experimental__ import transformer1 [,...]
identifying transformers to be used. Such line is passed to the
add_transformer function, after which it is removed from the
code to be executed.
'''
lines = source.split('\n')
linenumbers = []
for number, line in enumerate(lines):
if FROM_EXPERIMENTAL.match(line):
add_transformers(line)
linenumbers.insert(0, number)
# drop the "fake" import from the source code
for number in linenumbers:
del lines[number]
return '\n'.join(lines)
def remove_not_allowed_in_console():
'''This function should be called from the console, when it starts.
Some transformers are not allowed in the console and they could have
been loaded prior to the console being activated. We effectively remove them
and print an information message specific to that transformer
as written in the transformer module.
'''
not_allowed_in_console = []
if CONSOLE_ACTIVE:
for name in transformers:
tr_module = import_transformer(name)
if hasattr(tr_module, "NO_CONSOLE"):
not_allowed_in_console.append((name, tr_module))
for name, tr_module in not_allowed_in_console:
print(tr_module.NO_CONSOLE)
# Note: we do not remove them, so as to avoid seeing the
# information message displayed again if an attempt is
# made to re-import them from a console instruction.
transformers[name] = NullTransformer()
def transform(source):
'''Used to convert the source code, making use of known transformers.
"transformers" are modules which must contain a function
transform_source(source)
which returns a tranformed source.
Some transformers (for example, those found in the standard library
module lib2to3) cannot cope with non-standard syntax; as a result, they
may fail during a first attempt. We keep track of all failing
transformers and keep retrying them until either they all succeeded
or a fixed set of them fails twice in a row.
'''
source = extract_transformers_from_source(source)
# Some transformer fail when multiple non-Python constructs
# are present. So, we loop multiple times keeping track of
# which transformations have been unsuccessfully performed.
not_done = transformers
while True:
failed = {}
for name in not_done:
tr_module = import_transformer(name)
try:
source = tr_module.transform_source(source)
except Exception as e:
failed[name] = tr_module
# from traceback import print_exc
# print("Unexpected exception in transforms.transform",
# e.__class__.__name__)
# print_exc()
if not failed:
break
# Insanity is doing the same Tting over and overaAgain and
# expecting different results ...
# If the exact same set of transformations are not performed
# twice in a row, there is no point in trying out a third time.
if failed == not_done:
print("Warning: the following transforms could not be done:")
for key in failed:
print(key)
break
not_done = failed # attempt another pass
return source
|
aroberge/experimental
|
experimental/core/transforms.py
|
import_transformer
|
python
|
def import_transformer(name):
'''If needed, import a transformer, and adds it to the globally known dict
The code inside a module where a transformer is defined should be
standard Python code, which does not need any transformation.
So, we disable the import hook, and let the normal module import
do its job - which is faster and likely more reliable than our
custom method.
'''
if name in transformers:
return transformers[name]
# We are adding a transformer built from normal/standard Python code.
# As we are not performing transformations, we temporarily disable
# our import hook, both to avoid potential problems AND because we
# found that this resulted in much faster code.
hook = sys.meta_path[0]
sys.meta_path = sys.meta_path[1:]
try:
transformers[name] = __import__(name)
# Some transformers are not allowed in the console.
# If an attempt is made to activate one of them in the console,
# we replace it by a transformer that does nothing and print a
# message specific to that transformer as written in its module.
if CONSOLE_ACTIVE:
if hasattr(transformers[name], "NO_CONSOLE"):
print(transformers[name].NO_CONSOLE)
transformers[name] = NullTransformer()
except ImportError:
sys.stderr.write("Warning: Import Error in add_transformers: %s not found\n" % name)
transformers[name] = NullTransformer()
except Exception as e:
sys.stderr.write("Unexpected exception in transforms.import_transformer%s\n " %
e.__class__.__name__)
finally:
sys.meta_path.insert(0, hook) # restore import hook
return transformers[name]
|
If needed, import a transformer, and adds it to the globally known dict
The code inside a module where a transformer is defined should be
standard Python code, which does not need any transformation.
So, we disable the import hook, and let the normal module import
do its job - which is faster and likely more reliable than our
custom method.
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/core/transforms.py#L34-L70
| null |
#pylint: disable=W1401, C0103, W0703
'''This module takes care of identifying, importing and adding source
code transformers. It also contains a function, `transform`, which
takes care of invoking all known transformers to convert a source code.
'''
import re
import sys
FROM_EXPERIMENTAL = re.compile("(^from\s+__experimental__\s+import\s+)")
CONSOLE_ACTIVE = False # changed by console.start_console()
class NullTransformer:
'''NullTransformer is a convenience class which can generate instances
to be used when a given transformer cannot be imported.'''
def transform_source(self, source): #pylint: disable=I0011, R0201, C0111
return source
transformers = {}
def add_transformers(line):
'''Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict
'''
assert FROM_EXPERIMENTAL.match(line)
line = FROM_EXPERIMENTAL.sub(' ', line)
# we now have: " transformer1 [,...]"
line = line.split("#")[0] # remove any end of line comments
# and insert each transformer as an item in a list
for trans in line.replace(' ', '').split(','):
import_transformer(trans)
def extract_transformers_from_source(source):
'''Scan a source for lines of the form
from __experimental__ import transformer1 [,...]
identifying transformers to be used. Such line is passed to the
add_transformer function, after which it is removed from the
code to be executed.
'''
lines = source.split('\n')
linenumbers = []
for number, line in enumerate(lines):
if FROM_EXPERIMENTAL.match(line):
add_transformers(line)
linenumbers.insert(0, number)
# drop the "fake" import from the source code
for number in linenumbers:
del lines[number]
return '\n'.join(lines)
def remove_not_allowed_in_console():
'''This function should be called from the console, when it starts.
Some transformers are not allowed in the console and they could have
been loaded prior to the console being activated. We effectively remove them
and print an information message specific to that transformer
as written in the transformer module.
'''
not_allowed_in_console = []
if CONSOLE_ACTIVE:
for name in transformers:
tr_module = import_transformer(name)
if hasattr(tr_module, "NO_CONSOLE"):
not_allowed_in_console.append((name, tr_module))
for name, tr_module in not_allowed_in_console:
print(tr_module.NO_CONSOLE)
# Note: we do not remove them, so as to avoid seeing the
# information message displayed again if an attempt is
# made to re-import them from a console instruction.
transformers[name] = NullTransformer()
def transform(source):
'''Used to convert the source code, making use of known transformers.
"transformers" are modules which must contain a function
transform_source(source)
which returns a tranformed source.
Some transformers (for example, those found in the standard library
module lib2to3) cannot cope with non-standard syntax; as a result, they
may fail during a first attempt. We keep track of all failing
transformers and keep retrying them until either they all succeeded
or a fixed set of them fails twice in a row.
'''
source = extract_transformers_from_source(source)
# Some transformer fail when multiple non-Python constructs
# are present. So, we loop multiple times keeping track of
# which transformations have been unsuccessfully performed.
not_done = transformers
while True:
failed = {}
for name in not_done:
tr_module = import_transformer(name)
try:
source = tr_module.transform_source(source)
except Exception as e:
failed[name] = tr_module
# from traceback import print_exc
# print("Unexpected exception in transforms.transform",
# e.__class__.__name__)
# print_exc()
if not failed:
break
# Insanity is doing the same Tting over and overaAgain and
# expecting different results ...
# If the exact same set of transformations are not performed
# twice in a row, there is no point in trying out a third time.
if failed == not_done:
print("Warning: the following transforms could not be done:")
for key in failed:
print(key)
break
not_done = failed # attempt another pass
return source
|
aroberge/experimental
|
experimental/core/transforms.py
|
extract_transformers_from_source
|
python
|
def extract_transformers_from_source(source):
'''Scan a source for lines of the form
from __experimental__ import transformer1 [,...]
identifying transformers to be used. Such line is passed to the
add_transformer function, after which it is removed from the
code to be executed.
'''
lines = source.split('\n')
linenumbers = []
for number, line in enumerate(lines):
if FROM_EXPERIMENTAL.match(line):
add_transformers(line)
linenumbers.insert(0, number)
# drop the "fake" import from the source code
for number in linenumbers:
del lines[number]
return '\n'.join(lines)
|
Scan a source for lines of the form
from __experimental__ import transformer1 [,...]
identifying transformers to be used. Such line is passed to the
add_transformer function, after which it is removed from the
code to be executed.
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/core/transforms.py#L72-L89
|
[
"def add_transformers(line):\n '''Extract the transformers names from a line of code of the form\n from __experimental__ import transformer1 [,...]\n and adds them to the globally known dict\n '''\n assert FROM_EXPERIMENTAL.match(line)\n\n line = FROM_EXPERIMENTAL.sub(' ', line)\n # we now have: \" transformer1 [,...]\"\n line = line.split(\"#\")[0] # remove any end of line comments\n # and insert each transformer as an item in a list\n for trans in line.replace(' ', '').split(','):\n import_transformer(trans)\n"
] |
#pylint: disable=W1401, C0103, W0703
'''This module takes care of identifying, importing and adding source
code transformers. It also contains a function, `transform`, which
takes care of invoking all known transformers to convert a source code.
'''
import re
import sys
FROM_EXPERIMENTAL = re.compile("(^from\s+__experimental__\s+import\s+)")
CONSOLE_ACTIVE = False # changed by console.start_console()
class NullTransformer:
'''NullTransformer is a convenience class which can generate instances
to be used when a given transformer cannot be imported.'''
def transform_source(self, source): #pylint: disable=I0011, R0201, C0111
return source
transformers = {}
def add_transformers(line):
'''Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict
'''
assert FROM_EXPERIMENTAL.match(line)
line = FROM_EXPERIMENTAL.sub(' ', line)
# we now have: " transformer1 [,...]"
line = line.split("#")[0] # remove any end of line comments
# and insert each transformer as an item in a list
for trans in line.replace(' ', '').split(','):
import_transformer(trans)
def import_transformer(name):
'''If needed, import a transformer, and adds it to the globally known dict
The code inside a module where a transformer is defined should be
standard Python code, which does not need any transformation.
So, we disable the import hook, and let the normal module import
do its job - which is faster and likely more reliable than our
custom method.
'''
if name in transformers:
return transformers[name]
# We are adding a transformer built from normal/standard Python code.
# As we are not performing transformations, we temporarily disable
# our import hook, both to avoid potential problems AND because we
# found that this resulted in much faster code.
hook = sys.meta_path[0]
sys.meta_path = sys.meta_path[1:]
try:
transformers[name] = __import__(name)
# Some transformers are not allowed in the console.
# If an attempt is made to activate one of them in the console,
# we replace it by a transformer that does nothing and print a
# message specific to that transformer as written in its module.
if CONSOLE_ACTIVE:
if hasattr(transformers[name], "NO_CONSOLE"):
print(transformers[name].NO_CONSOLE)
transformers[name] = NullTransformer()
except ImportError:
sys.stderr.write("Warning: Import Error in add_transformers: %s not found\n" % name)
transformers[name] = NullTransformer()
except Exception as e:
sys.stderr.write("Unexpected exception in transforms.import_transformer%s\n " %
e.__class__.__name__)
finally:
sys.meta_path.insert(0, hook) # restore import hook
return transformers[name]
def remove_not_allowed_in_console():
'''This function should be called from the console, when it starts.
Some transformers are not allowed in the console and they could have
been loaded prior to the console being activated. We effectively remove them
and print an information message specific to that transformer
as written in the transformer module.
'''
not_allowed_in_console = []
if CONSOLE_ACTIVE:
for name in transformers:
tr_module = import_transformer(name)
if hasattr(tr_module, "NO_CONSOLE"):
not_allowed_in_console.append((name, tr_module))
for name, tr_module in not_allowed_in_console:
print(tr_module.NO_CONSOLE)
# Note: we do not remove them, so as to avoid seeing the
# information message displayed again if an attempt is
# made to re-import them from a console instruction.
transformers[name] = NullTransformer()
def transform(source):
'''Used to convert the source code, making use of known transformers.
"transformers" are modules which must contain a function
transform_source(source)
which returns a tranformed source.
Some transformers (for example, those found in the standard library
module lib2to3) cannot cope with non-standard syntax; as a result, they
may fail during a first attempt. We keep track of all failing
transformers and keep retrying them until either they all succeeded
or a fixed set of them fails twice in a row.
'''
source = extract_transformers_from_source(source)
# Some transformer fail when multiple non-Python constructs
# are present. So, we loop multiple times keeping track of
# which transformations have been unsuccessfully performed.
not_done = transformers
while True:
failed = {}
for name in not_done:
tr_module = import_transformer(name)
try:
source = tr_module.transform_source(source)
except Exception as e:
failed[name] = tr_module
# from traceback import print_exc
# print("Unexpected exception in transforms.transform",
# e.__class__.__name__)
# print_exc()
if not failed:
break
# Insanity is doing the same Tting over and overaAgain and
# expecting different results ...
# If the exact same set of transformations are not performed
# twice in a row, there is no point in trying out a third time.
if failed == not_done:
print("Warning: the following transforms could not be done:")
for key in failed:
print(key)
break
not_done = failed # attempt another pass
return source
|
aroberge/experimental
|
experimental/core/transforms.py
|
remove_not_allowed_in_console
|
python
|
def remove_not_allowed_in_console():
'''This function should be called from the console, when it starts.
Some transformers are not allowed in the console and they could have
been loaded prior to the console being activated. We effectively remove them
and print an information message specific to that transformer
as written in the transformer module.
'''
not_allowed_in_console = []
if CONSOLE_ACTIVE:
for name in transformers:
tr_module = import_transformer(name)
if hasattr(tr_module, "NO_CONSOLE"):
not_allowed_in_console.append((name, tr_module))
for name, tr_module in not_allowed_in_console:
print(tr_module.NO_CONSOLE)
# Note: we do not remove them, so as to avoid seeing the
# information message displayed again if an attempt is
# made to re-import them from a console instruction.
transformers[name] = NullTransformer()
|
This function should be called from the console, when it starts.
Some transformers are not allowed in the console and they could have
been loaded prior to the console being activated. We effectively remove them
and print an information message specific to that transformer
as written in the transformer module.
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/core/transforms.py#L91-L111
| null |
#pylint: disable=W1401, C0103, W0703
'''This module takes care of identifying, importing and adding source
code transformers. It also contains a function, `transform`, which
takes care of invoking all known transformers to convert a source code.
'''
import re
import sys
FROM_EXPERIMENTAL = re.compile("(^from\s+__experimental__\s+import\s+)")
CONSOLE_ACTIVE = False # changed by console.start_console()
class NullTransformer:
'''NullTransformer is a convenience class which can generate instances
to be used when a given transformer cannot be imported.'''
def transform_source(self, source): #pylint: disable=I0011, R0201, C0111
return source
transformers = {}
def add_transformers(line):
'''Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict
'''
assert FROM_EXPERIMENTAL.match(line)
line = FROM_EXPERIMENTAL.sub(' ', line)
# we now have: " transformer1 [,...]"
line = line.split("#")[0] # remove any end of line comments
# and insert each transformer as an item in a list
for trans in line.replace(' ', '').split(','):
import_transformer(trans)
def import_transformer(name):
'''If needed, import a transformer, and adds it to the globally known dict
The code inside a module where a transformer is defined should be
standard Python code, which does not need any transformation.
So, we disable the import hook, and let the normal module import
do its job - which is faster and likely more reliable than our
custom method.
'''
if name in transformers:
return transformers[name]
# We are adding a transformer built from normal/standard Python code.
# As we are not performing transformations, we temporarily disable
# our import hook, both to avoid potential problems AND because we
# found that this resulted in much faster code.
hook = sys.meta_path[0]
sys.meta_path = sys.meta_path[1:]
try:
transformers[name] = __import__(name)
# Some transformers are not allowed in the console.
# If an attempt is made to activate one of them in the console,
# we replace it by a transformer that does nothing and print a
# message specific to that transformer as written in its module.
if CONSOLE_ACTIVE:
if hasattr(transformers[name], "NO_CONSOLE"):
print(transformers[name].NO_CONSOLE)
transformers[name] = NullTransformer()
except ImportError:
sys.stderr.write("Warning: Import Error in add_transformers: %s not found\n" % name)
transformers[name] = NullTransformer()
except Exception as e:
sys.stderr.write("Unexpected exception in transforms.import_transformer%s\n " %
e.__class__.__name__)
finally:
sys.meta_path.insert(0, hook) # restore import hook
return transformers[name]
def extract_transformers_from_source(source):
'''Scan a source for lines of the form
from __experimental__ import transformer1 [,...]
identifying transformers to be used. Such line is passed to the
add_transformer function, after which it is removed from the
code to be executed.
'''
lines = source.split('\n')
linenumbers = []
for number, line in enumerate(lines):
if FROM_EXPERIMENTAL.match(line):
add_transformers(line)
linenumbers.insert(0, number)
# drop the "fake" import from the source code
for number in linenumbers:
del lines[number]
return '\n'.join(lines)
def transform(source):
'''Used to convert the source code, making use of known transformers.
"transformers" are modules which must contain a function
transform_source(source)
which returns a tranformed source.
Some transformers (for example, those found in the standard library
module lib2to3) cannot cope with non-standard syntax; as a result, they
may fail during a first attempt. We keep track of all failing
transformers and keep retrying them until either they all succeeded
or a fixed set of them fails twice in a row.
'''
source = extract_transformers_from_source(source)
# Some transformer fail when multiple non-Python constructs
# are present. So, we loop multiple times keeping track of
# which transformations have been unsuccessfully performed.
not_done = transformers
while True:
failed = {}
for name in not_done:
tr_module = import_transformer(name)
try:
source = tr_module.transform_source(source)
except Exception as e:
failed[name] = tr_module
# from traceback import print_exc
# print("Unexpected exception in transforms.transform",
# e.__class__.__name__)
# print_exc()
if not failed:
break
# Insanity is doing the same Tting over and overaAgain and
# expecting different results ...
# If the exact same set of transformations are not performed
# twice in a row, there is no point in trying out a third time.
if failed == not_done:
print("Warning: the following transforms could not be done:")
for key in failed:
print(key)
break
not_done = failed # attempt another pass
return source
|
aroberge/experimental
|
experimental/core/transforms.py
|
transform
|
python
|
def transform(source):
'''Used to convert the source code, making use of known transformers.
"transformers" are modules which must contain a function
transform_source(source)
which returns a tranformed source.
Some transformers (for example, those found in the standard library
module lib2to3) cannot cope with non-standard syntax; as a result, they
may fail during a first attempt. We keep track of all failing
transformers and keep retrying them until either they all succeeded
or a fixed set of them fails twice in a row.
'''
source = extract_transformers_from_source(source)
# Some transformer fail when multiple non-Python constructs
# are present. So, we loop multiple times keeping track of
# which transformations have been unsuccessfully performed.
not_done = transformers
while True:
failed = {}
for name in not_done:
tr_module = import_transformer(name)
try:
source = tr_module.transform_source(source)
except Exception as e:
failed[name] = tr_module
# from traceback import print_exc
# print("Unexpected exception in transforms.transform",
# e.__class__.__name__)
# print_exc()
if not failed:
break
# Insanity is doing the same Tting over and overaAgain and
# expecting different results ...
# If the exact same set of transformations are not performed
# twice in a row, there is no point in trying out a third time.
if failed == not_done:
print("Warning: the following transforms could not be done:")
for key in failed:
print(key)
break
not_done = failed # attempt another pass
return source
|
Used to convert the source code, making use of known transformers.
"transformers" are modules which must contain a function
transform_source(source)
which returns a tranformed source.
Some transformers (for example, those found in the standard library
module lib2to3) cannot cope with non-standard syntax; as a result, they
may fail during a first attempt. We keep track of all failing
transformers and keep retrying them until either they all succeeded
or a fixed set of them fails twice in a row.
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/core/transforms.py#L114-L160
|
[
"def import_transformer(name):\n '''If needed, import a transformer, and adds it to the globally known dict\n The code inside a module where a transformer is defined should be\n standard Python code, which does not need any transformation.\n So, we disable the import hook, and let the normal module import\n do its job - which is faster and likely more reliable than our\n custom method.\n '''\n if name in transformers:\n return transformers[name]\n\n # We are adding a transformer built from normal/standard Python code.\n # As we are not performing transformations, we temporarily disable\n # our import hook, both to avoid potential problems AND because we\n # found that this resulted in much faster code.\n hook = sys.meta_path[0]\n sys.meta_path = sys.meta_path[1:]\n try:\n transformers[name] = __import__(name)\n # Some transformers are not allowed in the console.\n # If an attempt is made to activate one of them in the console,\n # we replace it by a transformer that does nothing and print a\n # message specific to that transformer as written in its module.\n if CONSOLE_ACTIVE:\n if hasattr(transformers[name], \"NO_CONSOLE\"):\n print(transformers[name].NO_CONSOLE)\n transformers[name] = NullTransformer()\n except ImportError:\n sys.stderr.write(\"Warning: Import Error in add_transformers: %s not found\\n\" % name)\n transformers[name] = NullTransformer()\n except Exception as e:\n sys.stderr.write(\"Unexpected exception in transforms.import_transformer%s\\n \" %\n e.__class__.__name__)\n finally:\n sys.meta_path.insert(0, hook) # restore import hook\n\n return transformers[name]\n",
"def extract_transformers_from_source(source):\n '''Scan a source for lines of the form\n from __experimental__ import transformer1 [,...]\n identifying transformers to be used. Such line is passed to the\n add_transformer function, after which it is removed from the\n code to be executed.\n '''\n lines = source.split('\\n')\n linenumbers = []\n for number, line in enumerate(lines):\n if FROM_EXPERIMENTAL.match(line):\n add_transformers(line)\n linenumbers.insert(0, number)\n\n # drop the \"fake\" import from the source code\n for number in linenumbers:\n del lines[number]\n return '\\n'.join(lines)\n"
] |
#pylint: disable=W1401, C0103, W0703
'''This module takes care of identifying, importing and adding source
code transformers. It also contains a function, `transform`, which
takes care of invoking all known transformers to convert a source code.
'''
import re
import sys
FROM_EXPERIMENTAL = re.compile("(^from\s+__experimental__\s+import\s+)")
CONSOLE_ACTIVE = False # changed by console.start_console()
class NullTransformer:
'''NullTransformer is a convenience class which can generate instances
to be used when a given transformer cannot be imported.'''
def transform_source(self, source): #pylint: disable=I0011, R0201, C0111
return source
transformers = {}
def add_transformers(line):
'''Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict
'''
assert FROM_EXPERIMENTAL.match(line)
line = FROM_EXPERIMENTAL.sub(' ', line)
# we now have: " transformer1 [,...]"
line = line.split("#")[0] # remove any end of line comments
# and insert each transformer as an item in a list
for trans in line.replace(' ', '').split(','):
import_transformer(trans)
def import_transformer(name):
'''If needed, import a transformer, and adds it to the globally known dict
The code inside a module where a transformer is defined should be
standard Python code, which does not need any transformation.
So, we disable the import hook, and let the normal module import
do its job - which is faster and likely more reliable than our
custom method.
'''
if name in transformers:
return transformers[name]
# We are adding a transformer built from normal/standard Python code.
# As we are not performing transformations, we temporarily disable
# our import hook, both to avoid potential problems AND because we
# found that this resulted in much faster code.
hook = sys.meta_path[0]
sys.meta_path = sys.meta_path[1:]
try:
transformers[name] = __import__(name)
# Some transformers are not allowed in the console.
# If an attempt is made to activate one of them in the console,
# we replace it by a transformer that does nothing and print a
# message specific to that transformer as written in its module.
if CONSOLE_ACTIVE:
if hasattr(transformers[name], "NO_CONSOLE"):
print(transformers[name].NO_CONSOLE)
transformers[name] = NullTransformer()
except ImportError:
sys.stderr.write("Warning: Import Error in add_transformers: %s not found\n" % name)
transformers[name] = NullTransformer()
except Exception as e:
sys.stderr.write("Unexpected exception in transforms.import_transformer%s\n " %
e.__class__.__name__)
finally:
sys.meta_path.insert(0, hook) # restore import hook
return transformers[name]
def extract_transformers_from_source(source):
'''Scan a source for lines of the form
from __experimental__ import transformer1 [,...]
identifying transformers to be used. Such line is passed to the
add_transformer function, after which it is removed from the
code to be executed.
'''
lines = source.split('\n')
linenumbers = []
for number, line in enumerate(lines):
if FROM_EXPERIMENTAL.match(line):
add_transformers(line)
linenumbers.insert(0, number)
# drop the "fake" import from the source code
for number in linenumbers:
del lines[number]
return '\n'.join(lines)
def remove_not_allowed_in_console():
'''This function should be called from the console, when it starts.
Some transformers are not allowed in the console and they could have
been loaded prior to the console being activated. We effectively remove them
and print an information message specific to that transformer
as written in the transformer module.
'''
not_allowed_in_console = []
if CONSOLE_ACTIVE:
for name in transformers:
tr_module = import_transformer(name)
if hasattr(tr_module, "NO_CONSOLE"):
not_allowed_in_console.append((name, tr_module))
for name, tr_module in not_allowed_in_console:
print(tr_module.NO_CONSOLE)
# Note: we do not remove them, so as to avoid seeing the
# information message displayed again if an attempt is
# made to re-import them from a console instruction.
transformers[name] = NullTransformer()
|
aroberge/experimental
|
experimental/core/import_hook.py
|
MyMetaFinder.find_spec
|
python
|
def find_spec(self, fullname, path, target=None):
'''finds the appropriate properties (spec) of a module, and sets
its loader.'''
if not path:
path = [os.getcwd()]
if "." in fullname:
name = fullname.split(".")[-1]
else:
name = fullname
for entry in path:
if os.path.isdir(os.path.join(entry, name)):
# this module has child modules
filename = os.path.join(entry, name, "__init__.py")
submodule_locations = [os.path.join(entry, name)]
else:
filename = os.path.join(entry, name + ".py")
submodule_locations = None
if not os.path.exists(filename):
continue
return spec_from_file_location(fullname, filename,
loader=MyLoader(filename),
submodule_search_locations=submodule_locations)
return None
|
finds the appropriate properties (spec) of a module, and sets
its loader.
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/core/import_hook.py#L47-L70
| null |
class MyMetaFinder(MetaPathFinder):
'''A custom finder to locate modules. The main reason for this code
is to ensure that our custom loader, which does the code transformations,
is used.'''
# we don't know how to import this
|
aroberge/experimental
|
experimental/core/import_hook.py
|
MyLoader.exec_module
|
python
|
def exec_module(self, module):
'''import the source code, transforma it before executing it so that
it is known to Python.'''
global MAIN_MODULE_NAME
if module.__name__ == MAIN_MODULE_NAME:
module.__name__ = "__main__"
MAIN_MODULE_NAME = None
with open(self.filename) as f:
source = f.read()
if transforms.transformers:
source = transforms.transform(source)
else:
for line in source.split('\n'):
if transforms.FROM_EXPERIMENTAL.match(line):
## transforms.transform will extract all such relevant
## lines and add them all relevant transformers
source = transforms.transform(source)
break
exec(source, vars(module))
|
import the source code, transforma it before executing it so that
it is known to Python.
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/core/import_hook.py#L83-L103
|
[
"def transform(source):\n '''Used to convert the source code, making use of known transformers.\n\n \"transformers\" are modules which must contain a function\n\n transform_source(source)\n\n which returns a tranformed source.\n Some transformers (for example, those found in the standard library\n module lib2to3) cannot cope with non-standard syntax; as a result, they\n may fail during a first attempt. We keep track of all failing\n transformers and keep retrying them until either they all succeeded\n or a fixed set of them fails twice in a row.\n '''\n source = extract_transformers_from_source(source)\n\n # Some transformer fail when multiple non-Python constructs\n # are present. So, we loop multiple times keeping track of\n # which transformations have been unsuccessfully performed.\n not_done = transformers\n while True:\n failed = {}\n for name in not_done:\n tr_module = import_transformer(name)\n try:\n source = tr_module.transform_source(source)\n except Exception as e:\n failed[name] = tr_module\n # from traceback import print_exc\n # print(\"Unexpected exception in transforms.transform\",\n # e.__class__.__name__)\n # print_exc()\n\n if not failed:\n break\n # Insanity is doing the same Tting over and overaAgain and\n # expecting different results ...\n # If the exact same set of transformations are not performed\n # twice in a row, there is no point in trying out a third time.\n if failed == not_done:\n print(\"Warning: the following transforms could not be done:\")\n for key in failed:\n print(key)\n break\n not_done = failed # attempt another pass\n\n return source\n"
] |
class MyLoader(Loader):
'''A custom loader which will transform the source prior to its execution'''
def __init__(self, filename):
self.filename = filename
def create_module(self, spec):
return None # use default module creation semantics
def get_code(self, _):
'''Hack to silence an error when running experimental as main script
See below for an explanation'''
return compile("None", "<string>", 'eval')
|
aroberge/experimental
|
experimental/transformers/repeat_keyword.py
|
transform_source
|
python
|
def transform_source(text):
'''Replaces instances of
repeat n:
by
for __VAR_i in range(n):
where __VAR_i is a string that does not appear elsewhere
in the code sample.
'''
loop_keyword = 'repeat'
nb = text.count(loop_keyword)
if nb == 0:
return text
var_names = get_unique_variable_names(text, nb)
toks = tokenize.generate_tokens(StringIO(text).readline)
result = []
replacing_keyword = False
for toktype, tokvalue, _, _, _ in toks:
if toktype == tokenize.NAME and tokvalue == loop_keyword:
result.extend([
(tokenize.NAME, 'for'),
(tokenize.NAME, var_names.pop()),
(tokenize.NAME, 'in'),
(tokenize.NAME, 'range'),
(tokenize.OP, '(')
])
replacing_keyword = True
elif replacing_keyword and tokvalue == ':':
result.extend([
(tokenize.OP, ')'),
(tokenize.OP, ':')
])
replacing_keyword = False
else:
result.append((toktype, tokvalue))
return tokenize.untokenize(result)
|
Replaces instances of
repeat n:
by
for __VAR_i in range(n):
where __VAR_i is a string that does not appear elsewhere
in the code sample.
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/transformers/repeat_keyword.py#L29-L70
|
[
"def get_unique_variable_names(text, nb):\n '''returns a list of possible variables names that\n are not found in the original text.'''\n base_name = '__VAR_'\n var_names = []\n i = 0\n j = 0\n while j < nb:\n tentative_name = base_name + str(i)\n if text.count(tentative_name) == 0 and tentative_name not in ALL_NAMES:\n var_names.append(tentative_name)\n ALL_NAMES.append(tentative_name)\n j += 1\n i += 1\n return var_names\n"
] |
''' from __experimental__ import repeat_keyword
introduces `repeat` as a keyword to write simple loops that repeat
a set number of times. That is:
repeat 3:
a = 2
repeat a*a:
pass
is equivalent to
for __VAR_1 in range(3):
a = 2
for __VAR_2 in range(a*a):
pass
The names of the variables are chosen so as to ensure that they
do not appear in the source code to be translated.
The transformation is done using the tokenize module; it should
only affect code and not content of strings.
'''
from io import StringIO
import tokenize
ALL_NAMES = []
def get_unique_variable_names(text, nb):
'''returns a list of possible variables names that
are not found in the original text.'''
base_name = '__VAR_'
var_names = []
i = 0
j = 0
while j < nb:
tentative_name = base_name + str(i)
if text.count(tentative_name) == 0 and tentative_name not in ALL_NAMES:
var_names.append(tentative_name)
ALL_NAMES.append(tentative_name)
j += 1
i += 1
return var_names
if __name__ == '__main__':
sample = '''# comment with repeat in it
repeat 3: # first loop
print('__VAR_1')
repeat (2*2):
pass'''
comparison = '''# comment with repeat in it
for __VAR_3 in range (3 ):# first loop
print ('__VAR_1')
for __VAR_2 in range ((2 *2 )):
pass '''
transformed = transform_source(sample)
if comparison == transformed:
print("Transformation done correctly")
else:
print("Transformation done incorrectly")
import difflib
d = difflib.Differ()
diff = d.compare(comparison.splitlines(), transformed.splitlines())
print('\n'.join(diff))
|
aroberge/experimental
|
experimental/transformers/repeat_keyword.py
|
get_unique_variable_names
|
python
|
def get_unique_variable_names(text, nb):
'''returns a list of possible variables names that
are not found in the original text.'''
base_name = '__VAR_'
var_names = []
i = 0
j = 0
while j < nb:
tentative_name = base_name + str(i)
if text.count(tentative_name) == 0 and tentative_name not in ALL_NAMES:
var_names.append(tentative_name)
ALL_NAMES.append(tentative_name)
j += 1
i += 1
return var_names
|
returns a list of possible variables names that
are not found in the original text.
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/transformers/repeat_keyword.py#L75-L89
| null |
''' from __experimental__ import repeat_keyword
introduces `repeat` as a keyword to write simple loops that repeat
a set number of times. That is:
repeat 3:
a = 2
repeat a*a:
pass
is equivalent to
for __VAR_1 in range(3):
a = 2
for __VAR_2 in range(a*a):
pass
The names of the variables are chosen so as to ensure that they
do not appear in the source code to be translated.
The transformation is done using the tokenize module; it should
only affect code and not content of strings.
'''
from io import StringIO
import tokenize
def transform_source(text):
'''Replaces instances of
repeat n:
by
for __VAR_i in range(n):
where __VAR_i is a string that does not appear elsewhere
in the code sample.
'''
loop_keyword = 'repeat'
nb = text.count(loop_keyword)
if nb == 0:
return text
var_names = get_unique_variable_names(text, nb)
toks = tokenize.generate_tokens(StringIO(text).readline)
result = []
replacing_keyword = False
for toktype, tokvalue, _, _, _ in toks:
if toktype == tokenize.NAME and tokvalue == loop_keyword:
result.extend([
(tokenize.NAME, 'for'),
(tokenize.NAME, var_names.pop()),
(tokenize.NAME, 'in'),
(tokenize.NAME, 'range'),
(tokenize.OP, '(')
])
replacing_keyword = True
elif replacing_keyword and tokvalue == ':':
result.extend([
(tokenize.OP, ')'),
(tokenize.OP, ':')
])
replacing_keyword = False
else:
result.append((toktype, tokvalue))
return tokenize.untokenize(result)
ALL_NAMES = []
if __name__ == '__main__':
sample = '''# comment with repeat in it
repeat 3: # first loop
print('__VAR_1')
repeat (2*2):
pass'''
comparison = '''# comment with repeat in it
for __VAR_3 in range (3 ):# first loop
print ('__VAR_1')
for __VAR_2 in range ((2 *2 )):
pass '''
transformed = transform_source(sample)
if comparison == transformed:
print("Transformation done correctly")
else:
print("Transformation done incorrectly")
import difflib
d = difflib.Differ()
diff = d.compare(comparison.splitlines(), transformed.splitlines())
print('\n'.join(diff))
|
aroberge/experimental
|
experimental/transformers/utils/one2one.py
|
translate
|
python
|
def translate(source, dictionary):
'''A dictionary with a one-to-one translation of keywords is used
to provide the transformation.
'''
toks = tokenize.generate_tokens(StringIO(source).readline)
result = []
for toktype, tokvalue, _, _, _ in toks:
if toktype == tokenize.NAME and tokvalue in dictionary:
result.append((toktype, dictionary[tokvalue]))
else:
result.append((toktype, tokvalue))
return tokenize.untokenize(result)
|
A dictionary with a one-to-one translation of keywords is used
to provide the transformation.
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/transformers/utils/one2one.py#L12-L23
| null |
'''This module contains a single function: translate.
Using the tokenize module, this function parses some source code
an apply a translation based on a one-to-one translation table
represented by a Python dict.
'''
from io import StringIO
import tokenize
|
aroberge/experimental
|
experimental/transformers/int_seq.py
|
__experimental_range
|
python
|
def __experimental_range(start, stop, var, cond, loc={}):
'''Utility function made to reproduce range() with unit integer step
but with the added possibility of specifying a condition
on the looping variable (e.g. var % 2 == 0)
'''
locals().update(loc)
if start < stop:
for __ in range(start, stop):
locals()[var] = __
if eval(cond, globals(), locals()):
yield __
else:
for __ in range(start, stop, -1):
locals()[var] = __
if eval(cond, globals(), locals()):
yield __
|
Utility function made to reproduce range() with unit integer step
but with the added possibility of specifying a condition
on the looping variable (e.g. var % 2 == 0)
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/transformers/int_seq.py#L51-L66
| null |
''' from __experimental__ import int_seq
makes it possible to use an alternative syntax instead of using `range`
in a for loop. To be more specific, instead of
for i in range(3):
print(i)
we could write
for i in 0 <= i < 3:
print(i)
or
for i in 0 <= i <= 2: # compare upper boundary with previous case
print(i)
By reversing the order of the comparison operators, we iterate in reverse.
Thus, for example
for i in 10 >= i > 0:
print(i)
would be equivalent to
for i in range(10, 0, -1):
print(i)
An additional condition can be added; for example
for i in 1 <= i < 10 if (i % 2 == 0):
print(i)
would print the first 4 even integers.
In addition, `inseq` is possible to use as a keyword instead of `in`.
`inseq` is meant to mean `in sequence`. Also, the "range" can be enclosed
in parentheses for greater clarity. Thus, the following is valid:
for i inseq (1 <= i < 10) if (i % 2 == 0):
print(i)
The transformation is done using a regex search and is only valid
on a single line. **There is no guarantee that all legitimately
valid cases will be recognized as such.**
'''
import builtins
import re
builtins.__experimental_range = __experimental_range
######################################################################
#
# WARNING
#
# In the unlikely case that you know less about regular expressions than I do
# please do not use what I do as any indication of how one should use regular
# expressions (regex).
#
# The regex use below is admitedly awful, very likely sub-optimal,
# and could almost certainly be vastly improved upon, either by someone
# who actually knows how to use regular expressions effectively or,
# even better, by not using regular expressions at all, and either using
# Python's tokenize module, or writing a custom parser.
#
#######################################################################
no_condition = r"""(?P<indented_for>\s*for\s+)
(?P<var>[a-zA-Z_]\w*)
\s+ (in|inseq) \s*
\(?\s* # optional opening (
(?P<start>[-\w]+)
\s* %s \s*
(?P=var)
\s* %s \s*
(?P<stop>[-\w]+)
\s*\)? # optional closing )
\s* : \s*
"""
# A problem with the optional () is that the regex will be
# satisfied if only one of them is present. We'll take care of
# this by ensuring an equal number of opening and closing parentheses.
cases = []
le_lt = re.compile(no_condition % ("<=", "<"), re.VERBOSE)
cases.append((le_lt, "{0} {1} in range({2}, {3}):"))
le_le = re.compile(no_condition % ("<=", "<="), re.VERBOSE)
cases.append((le_le, "{0} {1} in range({2}, {3}+1):"))
lt_lt = re.compile(no_condition % ("<", "<"), re.VERBOSE)
cases.append((lt_lt, "{0} {1} in range({2}+1, {3}):"))
lt_le = re.compile(no_condition % ("<", "<="), re.VERBOSE)
cases.append((lt_le, "{0} {1} in range({2}+1, {3}+1):"))
ge_gt = re.compile(no_condition % (">=", ">"), re.VERBOSE)
cases.append((ge_gt, "{0} {1} in range({2}, {3}, -1):"))
ge_ge = re.compile(no_condition % (">=", ">="), re.VERBOSE)
cases.append((ge_ge, "{0} {1} in range({2}, {3}-1, -1):"))
gt_gt = re.compile(no_condition % (">", ">"), re.VERBOSE)
cases.append((gt_gt, "{0} {1} in range({2}-1, {3}, -1):"))
gt_ge = re.compile(no_condition % (">", ">="), re.VERBOSE)
cases.append((gt_ge, "{0} {1} in range({2}-1, {3}-1, -1):"))
with_condition = r"""(?P<indented_for>\s*for\s+)
(?P<var>[a-zA-Z_]\w*)
\s+ (in|inseq) \s*
\(?\s* # optional opening (
(?P<start>[-\w]+)
\s* %s \s*
(?P=var)
\s* %s \s*
(?P<stop>[-\w]+)
\s*\)? # optional closing )
\s* if \s+
(?P<cond>.+)
\s* : \s*
"""
le_lt_cond = re.compile(with_condition % ("<=", "<"), re.VERBOSE)
cases.append((le_lt_cond, "{0} {1} in __experimental_range({2}, {3}, '{1}', '{4}', loc=locals()):"))
le_le_cond = re.compile(with_condition % ("<=", "<="), re.VERBOSE)
cases.append((le_le_cond, "{0} {1} in __experimental_range({2}, {3}+1, '{1}', '{4}', loc=locals()):"))
lt_lt_cond = re.compile(with_condition % ("<", "<"), re.VERBOSE)
cases.append((lt_lt_cond, "{0} {1} in __experimental_range({2}+1, {3}, '{1}', '{4}', loc=locals()):"))
lt_le_cond = re.compile(with_condition % ("<", "<="), re.VERBOSE)
cases.append((lt_le_cond, "{0} {1} in __experimental_range({2}+1, {3}+1, '{1}', '{4}', loc=locals()):"))
ge_gt_cond = re.compile(with_condition % (">=", ">"), re.VERBOSE)
cases.append((ge_gt_cond, "{0} {1} in __experimental_range({2}, {3}, '{1}', '{4}', loc=locals()):"))
ge_ge_cond = re.compile(with_condition % (">=", ">="), re.VERBOSE)
cases.append((ge_ge_cond, "{0} {1} in __experimental_range({2}, {3}-1, '{1}', '{4}', loc=locals()):"))
gt_gt_cond = re.compile(with_condition % (">", ">"), re.VERBOSE)
cases.append((gt_gt_cond, "{0} {1} in __experimental_range({2}-1, {3}, '{1}', '{4}', loc=locals()):"))
gt_ge_cond = re.compile(with_condition % (">", ">="), re.VERBOSE)
cases.append((gt_ge_cond, "{0} {1} in __experimental_range({2}-1, {3}-1, '{1}', '{4}', loc=locals()):"))
def transform_source(source):
lines = source.split("\n")
new_lines = []
for line in lines:
begin = line.split("#")[0]
for (pattern, for_str) in cases:
result = pattern.search(begin)
if result is not None and begin.count('(') == begin.count(')'):
line = create_for(for_str, result)
break
new_lines.append(line)
return "\n".join(new_lines)
def create_for(line, search_result):
'''Create a new "for loop" line as a replacement for the original code.
'''
try:
return line.format(search_result.group("indented_for"),
search_result.group("var"),
search_result.group("start"),
search_result.group("stop"),
search_result.group("cond"))
except IndexError:
return line.format(search_result.group("indented_for"),
search_result.group("var"),
search_result.group("start"),
search_result.group("stop"))
|
aroberge/experimental
|
experimental/transformers/int_seq.py
|
create_for
|
python
|
def create_for(line, search_result):
'''Create a new "for loop" line as a replacement for the original code.
'''
try:
return line.format(search_result.group("indented_for"),
search_result.group("var"),
search_result.group("start"),
search_result.group("stop"),
search_result.group("cond"))
except IndexError:
return line.format(search_result.group("indented_for"),
search_result.group("var"),
search_result.group("start"),
search_result.group("stop"))
|
Create a new "for loop" line as a replacement for the original code.
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/transformers/int_seq.py#L180-L193
| null |
''' from __experimental__ import int_seq
makes it possible to use an alternative syntax instead of using `range`
in a for loop. To be more specific, instead of
for i in range(3):
print(i)
we could write
for i in 0 <= i < 3:
print(i)
or
for i in 0 <= i <= 2: # compare upper boundary with previous case
print(i)
By reversing the order of the comparison operators, we iterate in reverse.
Thus, for example
for i in 10 >= i > 0:
print(i)
would be equivalent to
for i in range(10, 0, -1):
print(i)
An additional condition can be added; for example
for i in 1 <= i < 10 if (i % 2 == 0):
print(i)
would print the first 4 even integers.
In addition, `inseq` is possible to use as a keyword instead of `in`.
`inseq` is meant to mean `in sequence`. Also, the "range" can be enclosed
in parentheses for greater clarity. Thus, the following is valid:
for i inseq (1 <= i < 10) if (i % 2 == 0):
print(i)
The transformation is done using a regex search and is only valid
on a single line. **There is no guarantee that all legitimately
valid cases will be recognized as such.**
'''
import builtins
import re
def __experimental_range(start, stop, var, cond, loc={}):
'''Utility function made to reproduce range() with unit integer step
but with the added possibility of specifying a condition
on the looping variable (e.g. var % 2 == 0)
'''
locals().update(loc)
if start < stop:
for __ in range(start, stop):
locals()[var] = __
if eval(cond, globals(), locals()):
yield __
else:
for __ in range(start, stop, -1):
locals()[var] = __
if eval(cond, globals(), locals()):
yield __
builtins.__experimental_range = __experimental_range
######################################################################
#
# WARNING
#
# In the unlikely case that you know less about regular expressions than I do
# please do not use what I do as any indication of how one should use regular
# expressions (regex).
#
# The regex use below is admitedly awful, very likely sub-optimal,
# and could almost certainly be vastly improved upon, either by someone
# who actually knows how to use regular expressions effectively or,
# even better, by not using regular expressions at all, and either using
# Python's tokenize module, or writing a custom parser.
#
#######################################################################
no_condition = r"""(?P<indented_for>\s*for\s+)
(?P<var>[a-zA-Z_]\w*)
\s+ (in|inseq) \s*
\(?\s* # optional opening (
(?P<start>[-\w]+)
\s* %s \s*
(?P=var)
\s* %s \s*
(?P<stop>[-\w]+)
\s*\)? # optional closing )
\s* : \s*
"""
# A problem with the optional () is that the regex will be
# satisfied if only one of them is present. We'll take care of
# this by ensuring an equal number of opening and closing parentheses.
cases = []
le_lt = re.compile(no_condition % ("<=", "<"), re.VERBOSE)
cases.append((le_lt, "{0} {1} in range({2}, {3}):"))
le_le = re.compile(no_condition % ("<=", "<="), re.VERBOSE)
cases.append((le_le, "{0} {1} in range({2}, {3}+1):"))
lt_lt = re.compile(no_condition % ("<", "<"), re.VERBOSE)
cases.append((lt_lt, "{0} {1} in range({2}+1, {3}):"))
lt_le = re.compile(no_condition % ("<", "<="), re.VERBOSE)
cases.append((lt_le, "{0} {1} in range({2}+1, {3}+1):"))
ge_gt = re.compile(no_condition % (">=", ">"), re.VERBOSE)
cases.append((ge_gt, "{0} {1} in range({2}, {3}, -1):"))
ge_ge = re.compile(no_condition % (">=", ">="), re.VERBOSE)
cases.append((ge_ge, "{0} {1} in range({2}, {3}-1, -1):"))
gt_gt = re.compile(no_condition % (">", ">"), re.VERBOSE)
cases.append((gt_gt, "{0} {1} in range({2}-1, {3}, -1):"))
gt_ge = re.compile(no_condition % (">", ">="), re.VERBOSE)
cases.append((gt_ge, "{0} {1} in range({2}-1, {3}-1, -1):"))
with_condition = r"""(?P<indented_for>\s*for\s+)
(?P<var>[a-zA-Z_]\w*)
\s+ (in|inseq) \s*
\(?\s* # optional opening (
(?P<start>[-\w]+)
\s* %s \s*
(?P=var)
\s* %s \s*
(?P<stop>[-\w]+)
\s*\)? # optional closing )
\s* if \s+
(?P<cond>.+)
\s* : \s*
"""
le_lt_cond = re.compile(with_condition % ("<=", "<"), re.VERBOSE)
cases.append((le_lt_cond, "{0} {1} in __experimental_range({2}, {3}, '{1}', '{4}', loc=locals()):"))
le_le_cond = re.compile(with_condition % ("<=", "<="), re.VERBOSE)
cases.append((le_le_cond, "{0} {1} in __experimental_range({2}, {3}+1, '{1}', '{4}', loc=locals()):"))
lt_lt_cond = re.compile(with_condition % ("<", "<"), re.VERBOSE)
cases.append((lt_lt_cond, "{0} {1} in __experimental_range({2}+1, {3}, '{1}', '{4}', loc=locals()):"))
lt_le_cond = re.compile(with_condition % ("<", "<="), re.VERBOSE)
cases.append((lt_le_cond, "{0} {1} in __experimental_range({2}+1, {3}+1, '{1}', '{4}', loc=locals()):"))
ge_gt_cond = re.compile(with_condition % (">=", ">"), re.VERBOSE)
cases.append((ge_gt_cond, "{0} {1} in __experimental_range({2}, {3}, '{1}', '{4}', loc=locals()):"))
ge_ge_cond = re.compile(with_condition % (">=", ">="), re.VERBOSE)
cases.append((ge_ge_cond, "{0} {1} in __experimental_range({2}, {3}-1, '{1}', '{4}', loc=locals()):"))
gt_gt_cond = re.compile(with_condition % (">", ">"), re.VERBOSE)
cases.append((gt_gt_cond, "{0} {1} in __experimental_range({2}-1, {3}, '{1}', '{4}', loc=locals()):"))
gt_ge_cond = re.compile(with_condition % (">", ">="), re.VERBOSE)
cases.append((gt_ge_cond, "{0} {1} in __experimental_range({2}-1, {3}-1, '{1}', '{4}', loc=locals()):"))
def transform_source(source):
lines = source.split("\n")
new_lines = []
for line in lines:
begin = line.split("#")[0]
for (pattern, for_str) in cases:
result = pattern.search(begin)
if result is not None and begin.count('(') == begin.count(')'):
line = create_for(for_str, result)
break
new_lines.append(line)
return "\n".join(new_lines)
|
aroberge/experimental
|
experimental/transformers/switch_statement.py
|
transform_source
|
python
|
def transform_source(text):
'''Replaces instances of
switch expression:
by
for __case in _Switch(n):
and replaces
case expression:
by
if __case(expression):
and
default:
by
if __case():
'''
toks = tokenize.generate_tokens(StringIO(text).readline)
result = []
replacing_keyword = False
for toktype, tokvalue, _, _, _ in toks:
if toktype == tokenize.NAME and tokvalue == 'switch':
result.extend([
(tokenize.NAME, 'for'),
(tokenize.NAME, '__case'),
(tokenize.NAME, 'in'),
(tokenize.NAME, '_Switch'),
(tokenize.OP, '(')
])
replacing_keyword = True
elif toktype == tokenize.NAME and (tokvalue == 'case' or tokvalue == 'default'):
result.extend([
(tokenize.NAME, 'if'),
(tokenize.NAME, '__case'),
(tokenize.OP, '(')
])
replacing_keyword = True
elif replacing_keyword and tokvalue == ':':
result.extend([
(tokenize.OP, ')'),
(tokenize.OP, ':')
])
replacing_keyword = False
else:
result.append((toktype, tokvalue))
return tokenize.untokenize(result)
|
Replaces instances of
switch expression:
by
for __case in _Switch(n):
and replaces
case expression:
by
if __case(expression):
and
default:
by
if __case():
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/transformers/switch_statement.py#L73-L125
| null |
'''from __experimental__ import switch_statement
allows the use of a Pythonic switch statement (implemented with if clauses).
A current limitation is that there can only be one level of switch statement
i.e. you cannot have a switch statement inside a case of another switch statement.
Here's an example usage
def example(n):
result = ''
switch n:
case 2:
result += '2 is even and '
case 3, 5, 7:
result += f'{n} is prime'
break
case 0: pass
case 1:
pass
case 4, 6, 8, 9:
result = f'{n} is not prime'
break
default:
result = f'{n} is not a single digit integer'
return result
def test_switch():
assert example(0) == '0 is not prime'
assert example(1) == '1 is not prime'
assert example(2) == '2 is even and 2 is prime'
assert example(3) == '3 is prime'
assert example(4) == '4 is not prime'
assert example(5) == '5 is prime'
assert example(6) == '6 is not prime'
assert example(7) == '7 is prime'
assert example(8) == '8 is not prime'
assert example(9) == '9 is not prime'
assert example(10) == '10 is not a single digit integer'
assert example(42) == '42 is not a single digit integer'
'''
import builtins
import tokenize
from io import StringIO
class Switch:
''' Adapted from http://code.activestate.com/recipes/410692/'''
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
yield self.match
raise StopIteration
def __next__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
builtins._Switch = Switch
|
aroberge/experimental
|
experimental/transformers/utils/simple2to3.py
|
get_lib2to3_fixers
|
python
|
def get_lib2to3_fixers():
'''returns a list of all fixers found in the lib2to3 library'''
fixers = []
fixer_dirname = fixer_dir.__path__[0]
for name in sorted(os.listdir(fixer_dirname)):
if name.startswith("fix_") and name.endswith(".py"):
fixers.append("lib2to3.fixes." + name[:-3])
return fixers
|
returns a list of all fixers found in the lib2to3 library
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/transformers/utils/simple2to3.py#L10-L17
| null |
import os
from lib2to3.refactor import RefactoringTool
import lib2to3.fixes as fixer_dir
# This simple module appears to be incompatible with the import
# hook. For this reason, it is important to wrap calls to it
# with a try/except clause. I have found that a bare except,
# that catches all errors, is definitely the best way to proceed.
def get_single_fixer(fixname):
'''return a single fixer found in the lib2to3 library'''
fixer_dirname = fixer_dir.__path__[0]
for name in sorted(os.listdir(fixer_dirname)):
if (name.startswith("fix_") and name.endswith(".py")
and fixname == name[4:-3]):
return "lib2to3.fixes." + name[:-3]
class MyRefactoringTool(RefactoringTool):
'''This class must be instantiated with a list of all desired fixers'''
_used_fixes = []
def __init__(self, fixer_names):
# avoid duplicating fixers if called multiple times
fixers = [fix for fix in fixer_names if fix not in self._used_fixes]
super().__init__(fixers, options=None, explicit=None)
self._used_fixes.extend(fixers)
def refactor_source(self, source):
source += "\n" # Silence certain parse errors
tree = self.refactor_string(source, "original")
return str(tree)[:-1]
|
aroberge/experimental
|
experimental/transformers/utils/simple2to3.py
|
get_single_fixer
|
python
|
def get_single_fixer(fixname):
'''return a single fixer found in the lib2to3 library'''
fixer_dirname = fixer_dir.__path__[0]
for name in sorted(os.listdir(fixer_dirname)):
if (name.startswith("fix_") and name.endswith(".py")
and fixname == name[4:-3]):
return "lib2to3.fixes." + name[:-3]
|
return a single fixer found in the lib2to3 library
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/transformers/utils/simple2to3.py#L20-L26
| null |
import os
from lib2to3.refactor import RefactoringTool
import lib2to3.fixes as fixer_dir
# This simple module appears to be incompatible with the import
# hook. For this reason, it is important to wrap calls to it
# with a try/except clause. I have found that a bare except,
# that catches all errors, is definitely the best way to proceed.
def get_lib2to3_fixers():
'''returns a list of all fixers found in the lib2to3 library'''
fixers = []
fixer_dirname = fixer_dir.__path__[0]
for name in sorted(os.listdir(fixer_dirname)):
if name.startswith("fix_") and name.endswith(".py"):
fixers.append("lib2to3.fixes." + name[:-3])
return fixers
class MyRefactoringTool(RefactoringTool):
'''This class must be instantiated with a list of all desired fixers'''
_used_fixes = []
def __init__(self, fixer_names):
# avoid duplicating fixers if called multiple times
fixers = [fix for fix in fixer_names if fix not in self._used_fixes]
super().__init__(fixers, options=None, explicit=None)
self._used_fixes.extend(fixers)
def refactor_source(self, source):
source += "\n" # Silence certain parse errors
tree = self.refactor_string(source, "original")
return str(tree)[:-1]
|
aroberge/experimental
|
experimental/transformers/where_clause.py
|
transform_source
|
python
|
def transform_source(text):
'''removes a "where" clause which is identified by the use of "where"
as an identifier and ends at the first DEDENT (i.e. decrease in indentation)'''
toks = tokenize.generate_tokens(StringIO(text).readline)
result = []
where_clause = False
for toktype, tokvalue, _, _, _ in toks:
if toktype == tokenize.NAME and tokvalue == "where":
where_clause = True
elif where_clause and toktype == tokenize.DEDENT:
where_clause = False
continue
if not where_clause:
result.append((toktype, tokvalue))
return tokenize.untokenize(result)
|
removes a "where" clause which is identified by the use of "where"
as an identifier and ends at the first DEDENT (i.e. decrease in indentation)
|
train
|
https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/transformers/where_clause.py#L31-L46
| null |
''' from __experimental__ import where_clause
shows how one could use `where` as a keyword to introduce a code
block that would be ignored by Python. The idea was to use this as
a _pythonic_ notation as an alternative for the optional type hinting described
in PEP484. **This idea has been rejected** as it would not have
been compatible with some older versions of Python, unlike the
approach that has been accepted.
https://www.python.org/dev/peps/pep-0484/#other-forms-of-new-syntax
:warning: This transformation **cannot** be used in the console.
For more details, please see two of my recent blog posts:
https://aroberge.blogspot.ca/2015/12/revisiting-old-friend-yet-again.html
https://aroberge.blogspot.ca/2015/01/type-hinting-in-python-focus-on.html
I first suggested this idea more than 12 years ago! ;-)
https://aroberge.blogspot.ca/2005/01/where-keyword-and-python-as-pseudo.html
'''
from io import StringIO
import tokenize
NO_CONSOLE = '\nWarning: where_clause is not allowed in the console.\n'
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.login
|
python
|
def login(self):
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
|
Login to verisure app api
Login before calling any read or write commands
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L73-L95
| null |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session._get_installations
|
python
|
def _get_installations(self):
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
|
Get information about installations
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L127-L150
| null |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.get_overview
|
python
|
def get_overview(self):
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
Get overview for installation
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L160-L174
|
[
"def overview(guid):\n return installation(guid) + 'overview'.format(\n installation=installation)\n",
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.set_smartplug_state
|
python
|
def set_smartplug_state(self, device_label, state):
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
|
Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L176-L195
|
[
"def smartplug(guid):\n return installation(guid) + 'smartplug/state'\n",
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.get_history
|
python
|
def get_history(self, filters=(), pagesize=15, offset=0):
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L250-L274
|
[
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n",
"def history(guid):\n return ('{base_url}/celapi/customereventlog/installation/{guid}'\n + '/eventlog').format(\n base_url=BASE_URL,\n guid=guid)\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.get_climate
|
python
|
def get_climate(self, device_label):
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
Get climate history
Args:
device_label: device label of climate device
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L276-L293
|
[
"def climate(guid):\n return installation(guid) + 'climate/simple/search'\n",
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.set_lock_state
|
python
|
def set_lock_state(self, code, device_label, state):
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L309-L329
|
[
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n",
"def set_lockstate(guid, device_label, state):\n return installation(guid) + 'device/{device_label}/{state}'.format(\n device_label=device_label, state=state)\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.get_lock_state_transaction
|
python
|
def get_lock_state_transaction(self, transaction_id):
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L331-L347
|
[
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n",
"def get_lockstate_transaction(guid, transaction_id):\n return (installation(guid)\n + 'doorlockstate/change/result/{transaction_id}'.format(\n transaction_id=transaction_id))\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.get_lock_config
|
python
|
def get_lock_config(self, device_label):
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
Get lock configuration
Args:
device_label (str): device label of lock
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L349-L365
|
[
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n",
"def lockconfig(guid, device_label):\n return installation(guid) + 'device/{device_label}/doorlockconfig'.format(\n device_label=device_label)\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.set_lock_config
|
python
|
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
|
Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L367-L394
|
[
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n",
"def lockconfig(guid, device_label):\n return installation(guid) + 'device/{device_label}/doorlockconfig'.format(\n device_label=device_label)\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.capture_image
|
python
|
def capture_image(self, device_label):
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
|
Capture smartcam image
Args:
device_label (str): device label of camera
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L396-L411
|
[
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n",
"def imagecapture(guid, device_label):\n return (installation(guid)\n + 'device/{device_label}/customerimagecamera/imagecapture'.format(\n device_label=device_label))\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.get_camera_imageseries
|
python
|
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L413-L437
|
[
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n",
"def get_imageseries(guid):\n return (installation(guid)\n + 'device/customerimagecamera/imageseries/search')\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.download_image
|
python
|
def download_image(self, device_label, image_id, file_name):
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
|
Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L439-L460
|
[
"def download_image(guid, device_label, image_id):\n return (installation(guid)\n + 'device/{device_label}/customerimagecamera/image/{image_id}/'\n ).format(\n device_label=device_label,\n image_id=image_id)\n",
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.logout
|
python
|
def logout(self):
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
|
Logout and remove vid
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L503-L513
|
[
"def login():\n return '{base_url}/xbn/2/cookie'.format(\n base_url=BASE_URL)\n",
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.set_heat_pump_mode
|
python
|
def set_heat_pump_mode(self, device_label, mode):
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L529-L546
|
[
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n",
"def set_heatpump_state(guid, device_label):\n return (installation(guid)\n + 'device/{device_label}/heatpump/config'\n ).format(\n device_label=device_label)\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_feature(self, device_label, feature):
""" Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/session.py
|
Session.set_heat_pump_feature
|
python
|
def set_heat_pump_feature(self, device_label, feature):
response = None
try:
response = requests.put(
urls.set_heatpump_feature(self._giid, device_label, feature),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
Set heatpump mode
Args:
feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L605-L621
|
[
"def _validate_response(response):\n \"\"\" Verify that response is OK \"\"\"\n if response.status_code == 200:\n return\n raise ResponseError(response.status_code, response.text)\n",
"def set_heatpump_feature(guid, device_label, featurestate):\n return (installation(guid)\n + 'device/{device_label}/heatpump/config/feature/{feature}'\n ).format(\n device_label=device_label, feature=featurestate)\n"
] |
class Session(object):
""" Verisure app session
Args:
username (str): Username used to login to verisure app
password (str): Password used to login to verisure app
"""
def __init__(self, username, password,
cookieFileName='~/.verisure-cookie'):
self._username = username
self._password = password
self._cookieFileName = os.path.expanduser(cookieFileName)
self._vid = None
self._giid = None
self.installations = None
def __enter__(self):
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
""" If of interest, add exception handler
"""
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
"""
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid']
def _create_cookie(self):
auth = 'Basic {}'.format(
base64.b64encode(
'CPE/{username}:{password}'.format(
username=self._username,
password=self._password).encode('utf-8')
).decode('utf-8'))
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.post(
urls.login(),
headers={
'Authorization': auth,
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise LoginError(ex)
_validate_response(response)
self._vid = json.loads(response.text)['cookie']
def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text)
def set_giid(self, giid):
""" Set installation giid
Args:
giid (str): Installation identifier
"""
self._giid = giid
def get_overview(self):
""" Get overview for installation """
response = None
try:
response = requests.get(
urls.overview(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
"""
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def set_arm_state(self, code, state):
""" Set alarm state
Args:
code (str): Personal alarm code (four or six digits)
state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'
"""
response = None
try:
response = requests.put(
urls.set_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code), "state": state}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state_transaction(self, transaction_id):
""" Get arm state transaction status
Args:
transaction_id: Transaction ID received from set_arm_state
"""
response = None
try:
response = requests.get(
urls.get_armstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_arm_state(self):
""" Get arm state """
response = None
try:
response = requests.get(
urls.get_armstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
"""
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_climate(self, device_label):
""" Get climate history
Args:
device_label: device label of climate device
"""
response = None
try:
response = requests.get(
urls.climate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"deviceLabel": device_label})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state(self):
""" Get current lock status """
response = None
try:
response = requests.get(
urls.get_lockstate(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
"""
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_state_transaction(self, transaction_id):
""" Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
"""
response = None
try:
response = requests.get(
urls.get_lockstate_transaction(self._giid, transaction_id),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
"""
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
"""
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
"""
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
"""
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
def get_vacation_mode(self):
""" Get current vacation mode """
response = None
try:
response = requests.get(
urls.get_vacationmode(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def get_door_window(self):
""" Get door_window states"""
response = None
try:
response = requests.get(
urls.door_window(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def test_ethernet(self):
""" Test ethernet status """
response = None
try:
response = requests.post(
urls.test_ethernet(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def logout(self):
""" Logout and remove vid """
response = None
try:
response = requests.delete(
urls.login(),
headers={
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
def get_heat_pump_state(self, device_label):
""" Get heatpump states"""
response = None
try:
response = requests.get(
urls.get_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_mode(self, device_label, mode):
""" Set heatpump mode
Args:
mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'mode': mode}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_power(self, device_label, power):
""" Set heatpump mode
Args:
power (str): 'ON', 'OFF'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'power': power}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_fan_speed(self, device_label, fan_speed):
""" Set heatpump mode
Args:
fan_speed (str): 'LOW', 'MEDIUM_LOW', 'MEDIUM, 'MEDIUM_HIGH' or 'HIGH'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'fanSpeed': fan_speed}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_target_temperature(self, device_label, target_temp):
""" Set heatpump mode
Args:
target_temperature (int): required temperature of the heatpump
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'targetTemperature': target_temp}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
def set_heat_pump_airswingdirection(self, device_label, airswingdirection):
""" Set heatpump mode
Args:
airSwingDirection (str): 'AUTO' '0_DEGREES' '30_DEGREES'
'60_DEGREES' '90_DEGREES'
"""
response = None
try:
response = requests.put(
urls.set_heatpump_state(self._giid, device_label),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({'airSwingDirection':
{"vertical": airswingdirection}}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
persandstrom/python-verisure
|
verisure/__main__.py
|
print_result
|
python
|
def print_result(overview, *names):
if names:
for name in names:
toprint = overview
for part in name.split('/'):
toprint = toprint[part]
print(json.dumps(toprint, indent=4, separators=(',', ': ')))
else:
print(json.dumps(overview, indent=4, separators=(',', ': ')))
|
Print the result of a verisure request
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/__main__.py#L22-L31
| null |
""" Command line interface for Verisure MyPages """
from __future__ import print_function
import argparse
import json
import verisure
COMMAND_OVERVIEW = 'overview'
COMMAND_SET = 'set'
COMMAND_CLIMATE = 'climate'
COMMAND_EVENTLOG = 'eventlog'
COMMAND_INSTALLATIONS = 'installations'
COMMAND_CAPTURE = 'capture'
COMMAND_IMAGESERIES = 'imageseries'
COMMAND_GETIMAGE = 'getimage'
COMMAND_ARMSTATE = 'armstate'
COMMAND_DOOR_WINDOW = 'door_window'
COMMAND_VACATIONMODE = 'vacationmode'
COMMAND_TEST_ETHERNET = 'test_ethernet'
# pylint: disable=too-many-locals,too-many-statements
def main():
""" Start verisure command line """
parser = argparse.ArgumentParser(
description='Read or change status of verisure devices')
parser.add_argument(
'username',
help='MyPages username')
parser.add_argument(
'password',
help='MyPages password')
parser.add_argument(
'-i', '--installation',
help='Installation number',
type=int,
default=1)
parser.add_argument(
'-c', '--cookie',
help='File to store cookie in',
default='~/.verisure-cookie')
commandsparser = parser.add_subparsers(
help='commands',
dest='command')
# installations command
commandsparser.add_parser(
COMMAND_INSTALLATIONS,
help='Get information about installations')
# overview command
overview_parser = commandsparser.add_parser(
COMMAND_OVERVIEW,
help='Read status of one or many device types')
overview_parser.add_argument(
'filter',
nargs='*',
help='Read status for device type')
# armstate command
commandsparser.add_parser(
COMMAND_ARMSTATE,
help='Get arm state')
# Set command
set_parser = commandsparser.add_parser(
COMMAND_SET,
help='Set status of a device')
set_device = set_parser.add_subparsers(
help='device',
dest='device')
# Set smartplug
set_smartplug = set_device.add_parser(
'smartplug',
help='set smartplug value')
set_smartplug.add_argument(
'device_label',
help='device label')
set_smartplug.add_argument(
'new_value',
choices=[
'on',
'off'],
help='new value')
# Set alarm
set_alarm = set_device.add_parser(
'alarm',
help='set alarm status')
set_alarm.add_argument(
'code',
help='alarm code')
set_alarm.add_argument(
'new_status',
choices=[
'ARMED_HOME',
'ARMED_AWAY',
'DISARMED'],
help='new status')
# Set lock
set_lock = set_device.add_parser(
'lock',
help='set lock status')
set_lock.add_argument(
'code',
help='alarm code')
set_lock.add_argument(
'serial_number',
help='serial number')
set_lock.add_argument(
'new_status',
choices=[
'lock',
'unlock'],
help='new status')
# Get climate history
history_climate = commandsparser.add_parser(
COMMAND_CLIMATE,
help='get climate history')
history_climate.add_argument(
'device_label',
help='device label')
# Event log command
eventlog_parser = commandsparser.add_parser(
COMMAND_EVENTLOG,
help='Get event log')
eventlog_parser.add_argument(
'-p', '--pagesize',
type=int,
default=15,
help='Number of elements on one page')
eventlog_parser.add_argument(
'-o', '--offset',
type=int,
default=0,
help='Page offset')
eventlog_parser.add_argument(
'-f', '--filter',
nargs='*',
default=[],
choices=[
'ARM',
'DISARM',
'FIRE',
'INTRUSION',
'TECHNICAL',
'SOS',
'WARNING',
'LOCK',
'UNLOCK'],
help='Filter event log')
# Capture command
capture_parser = commandsparser.add_parser(
COMMAND_CAPTURE,
help='Capture image')
capture_parser.add_argument(
'device_label',
help='Device label')
# Image series command
commandsparser.add_parser(
COMMAND_IMAGESERIES,
help='Get image series')
# Get image command
getimage_parser = commandsparser.add_parser(
COMMAND_GETIMAGE,
help='Download image')
getimage_parser.add_argument(
'device_label',
help='Device label')
getimage_parser.add_argument(
'image_id',
help='image ID')
getimage_parser.add_argument(
'file_name',
help='Output file name')
# Vacation mode command
commandsparser.add_parser(
COMMAND_VACATIONMODE,
help='Get vacation mode info')
# Door window status command
commandsparser.add_parser(
COMMAND_DOOR_WINDOW,
help='Get door/window status')
# Test ethernet command
commandsparser.add_parser(
COMMAND_TEST_ETHERNET,
help='Update ethernet status')
args = parser.parse_args()
session = verisure.Session(args.username, args.password, args.cookie)
session.login()
try:
session.set_giid(session.installations[args.installation - 1]['giid'])
if args.command == COMMAND_INSTALLATIONS:
print_result(session.installations)
if args.command == COMMAND_OVERVIEW:
print_result(session.get_overview(), *args.filter)
if args.command == COMMAND_ARMSTATE:
print_result(session.get_arm_state())
if args.command == COMMAND_SET:
if args.device == 'smartplug':
session.set_smartplug_state(
args.device_label,
args.new_value == 'on')
if args.device == 'alarm':
print_result(session.set_arm_state(
args.code,
args.new_status))
if args.device == 'lock':
print_result(session.set_lock_state(
args.code,
args.serial_number,
args.new_status))
if args.command == COMMAND_CLIMATE:
print_result(session.get_climate(args.device_label))
if args.command == COMMAND_EVENTLOG:
print_result(
session.get_history(
args.filter,
pagesize=args.pagesize,
offset=args.offset))
if args.command == COMMAND_CAPTURE:
session.capture_image(args.device_label)
if args.command == COMMAND_IMAGESERIES:
print_result(session.get_camera_imageseries())
if args.command == COMMAND_GETIMAGE:
session.download_image(
args.device_label,
args.image_id,
args.file_name)
if args.command == COMMAND_VACATIONMODE:
print_result(session.get_vacation_mode())
if args.command == COMMAND_DOOR_WINDOW:
print_result(session.get_door_window())
if args.command == COMMAND_TEST_ETHERNET:
session.test_ethernet()
except verisure.session.ResponseError as ex:
print(ex.text)
# pylint: disable=C0103
if __name__ == "__main__":
main()
|
persandstrom/python-verisure
|
verisure/__main__.py
|
main
|
python
|
def main():
parser = argparse.ArgumentParser(
description='Read or change status of verisure devices')
parser.add_argument(
'username',
help='MyPages username')
parser.add_argument(
'password',
help='MyPages password')
parser.add_argument(
'-i', '--installation',
help='Installation number',
type=int,
default=1)
parser.add_argument(
'-c', '--cookie',
help='File to store cookie in',
default='~/.verisure-cookie')
commandsparser = parser.add_subparsers(
help='commands',
dest='command')
# installations command
commandsparser.add_parser(
COMMAND_INSTALLATIONS,
help='Get information about installations')
# overview command
overview_parser = commandsparser.add_parser(
COMMAND_OVERVIEW,
help='Read status of one or many device types')
overview_parser.add_argument(
'filter',
nargs='*',
help='Read status for device type')
# armstate command
commandsparser.add_parser(
COMMAND_ARMSTATE,
help='Get arm state')
# Set command
set_parser = commandsparser.add_parser(
COMMAND_SET,
help='Set status of a device')
set_device = set_parser.add_subparsers(
help='device',
dest='device')
# Set smartplug
set_smartplug = set_device.add_parser(
'smartplug',
help='set smartplug value')
set_smartplug.add_argument(
'device_label',
help='device label')
set_smartplug.add_argument(
'new_value',
choices=[
'on',
'off'],
help='new value')
# Set alarm
set_alarm = set_device.add_parser(
'alarm',
help='set alarm status')
set_alarm.add_argument(
'code',
help='alarm code')
set_alarm.add_argument(
'new_status',
choices=[
'ARMED_HOME',
'ARMED_AWAY',
'DISARMED'],
help='new status')
# Set lock
set_lock = set_device.add_parser(
'lock',
help='set lock status')
set_lock.add_argument(
'code',
help='alarm code')
set_lock.add_argument(
'serial_number',
help='serial number')
set_lock.add_argument(
'new_status',
choices=[
'lock',
'unlock'],
help='new status')
# Get climate history
history_climate = commandsparser.add_parser(
COMMAND_CLIMATE,
help='get climate history')
history_climate.add_argument(
'device_label',
help='device label')
# Event log command
eventlog_parser = commandsparser.add_parser(
COMMAND_EVENTLOG,
help='Get event log')
eventlog_parser.add_argument(
'-p', '--pagesize',
type=int,
default=15,
help='Number of elements on one page')
eventlog_parser.add_argument(
'-o', '--offset',
type=int,
default=0,
help='Page offset')
eventlog_parser.add_argument(
'-f', '--filter',
nargs='*',
default=[],
choices=[
'ARM',
'DISARM',
'FIRE',
'INTRUSION',
'TECHNICAL',
'SOS',
'WARNING',
'LOCK',
'UNLOCK'],
help='Filter event log')
# Capture command
capture_parser = commandsparser.add_parser(
COMMAND_CAPTURE,
help='Capture image')
capture_parser.add_argument(
'device_label',
help='Device label')
# Image series command
commandsparser.add_parser(
COMMAND_IMAGESERIES,
help='Get image series')
# Get image command
getimage_parser = commandsparser.add_parser(
COMMAND_GETIMAGE,
help='Download image')
getimage_parser.add_argument(
'device_label',
help='Device label')
getimage_parser.add_argument(
'image_id',
help='image ID')
getimage_parser.add_argument(
'file_name',
help='Output file name')
# Vacation mode command
commandsparser.add_parser(
COMMAND_VACATIONMODE,
help='Get vacation mode info')
# Door window status command
commandsparser.add_parser(
COMMAND_DOOR_WINDOW,
help='Get door/window status')
# Test ethernet command
commandsparser.add_parser(
COMMAND_TEST_ETHERNET,
help='Update ethernet status')
args = parser.parse_args()
session = verisure.Session(args.username, args.password, args.cookie)
session.login()
try:
session.set_giid(session.installations[args.installation - 1]['giid'])
if args.command == COMMAND_INSTALLATIONS:
print_result(session.installations)
if args.command == COMMAND_OVERVIEW:
print_result(session.get_overview(), *args.filter)
if args.command == COMMAND_ARMSTATE:
print_result(session.get_arm_state())
if args.command == COMMAND_SET:
if args.device == 'smartplug':
session.set_smartplug_state(
args.device_label,
args.new_value == 'on')
if args.device == 'alarm':
print_result(session.set_arm_state(
args.code,
args.new_status))
if args.device == 'lock':
print_result(session.set_lock_state(
args.code,
args.serial_number,
args.new_status))
if args.command == COMMAND_CLIMATE:
print_result(session.get_climate(args.device_label))
if args.command == COMMAND_EVENTLOG:
print_result(
session.get_history(
args.filter,
pagesize=args.pagesize,
offset=args.offset))
if args.command == COMMAND_CAPTURE:
session.capture_image(args.device_label)
if args.command == COMMAND_IMAGESERIES:
print_result(session.get_camera_imageseries())
if args.command == COMMAND_GETIMAGE:
session.download_image(
args.device_label,
args.image_id,
args.file_name)
if args.command == COMMAND_VACATIONMODE:
print_result(session.get_vacation_mode())
if args.command == COMMAND_DOOR_WINDOW:
print_result(session.get_door_window())
if args.command == COMMAND_TEST_ETHERNET:
session.test_ethernet()
except verisure.session.ResponseError as ex:
print(ex.text)
|
Start verisure command line
|
train
|
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/__main__.py#L35-L261
|
[
"def print_result(overview, *names):\n \"\"\" Print the result of a verisure request \"\"\"\n if names:\n for name in names:\n toprint = overview\n for part in name.split('/'):\n toprint = toprint[part]\n print(json.dumps(toprint, indent=4, separators=(',', ': ')))\n else:\n print(json.dumps(overview, indent=4, separators=(',', ': ')))\n",
"def login(self):\n \"\"\" Login to verisure app api\n\n Login before calling any read or write commands\n\n \"\"\"\n if os.path.exists(self._cookieFileName):\n with open(self._cookieFileName, 'r') as cookieFile:\n self._vid = cookieFile.read().strip()\n\n try:\n self._get_installations()\n except ResponseError:\n self._vid = None\n os.remove(self._cookieFileName)\n\n if self._vid is None:\n self._create_cookie()\n with open(self._cookieFileName, 'w') as cookieFile:\n cookieFile.write(self._vid)\n self._get_installations()\n\n self._giid = self.installations[0]['giid']\n",
"def set_giid(self, giid):\n \"\"\" Set installation giid\n\n Args:\n giid (str): Installation identifier\n \"\"\"\n self._giid = giid\n",
"def get_overview(self):\n \"\"\" Get overview for installation \"\"\"\n response = None\n try:\n response = requests.get(\n urls.overview(self._giid),\n headers={\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Accept-Encoding': 'gzip, deflate',\n 'Content-Type': 'application/json',\n 'Cookie': 'vid={}'.format(self._vid)})\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n return json.loads(response.text)\n",
"def set_smartplug_state(self, device_label, state):\n \"\"\" Turn on or off smartplug\n\n Args:\n device_label (str): Smartplug device label\n state (boolean): new status, 'True' or 'False'\n \"\"\"\n response = None\n try:\n response = requests.post(\n urls.smartplug(self._giid),\n headers={\n 'Content-Type': 'application/json',\n 'Cookie': 'vid={}'.format(self._vid)},\n data=json.dumps([{\n \"deviceLabel\": device_label,\n \"state\": state}]))\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n",
"def set_arm_state(self, code, state):\n \"\"\" Set alarm state\n\n Args:\n code (str): Personal alarm code (four or six digits)\n state (str): 'ARMED_HOME', 'ARMED_AWAY' or 'DISARMED'\n \"\"\"\n response = None\n try:\n response = requests.put(\n urls.set_armstate(self._giid),\n headers={\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Content-Type': 'application/json',\n 'Cookie': 'vid={}'.format(self._vid)},\n data=json.dumps({\"code\": str(code), \"state\": state}))\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n return json.loads(response.text)\n",
"def get_arm_state(self):\n \"\"\" Get arm state \"\"\"\n response = None\n try:\n response = requests.get(\n urls.get_armstate(self._giid),\n headers={\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Cookie': 'vid={}'.format(self._vid)})\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n return json.loads(response.text)\n",
"def get_history(self, filters=(), pagesize=15, offset=0):\n \"\"\" Get recent events\n\n Args:\n filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',\n 'TECHNICAL', 'SOS', 'WARNING', 'LOCK',\n 'UNLOCK'\n pagesize (int): Number of events to display\n offset (int): Skip pagesize * offset first events\n \"\"\"\n response = None\n try:\n response = requests.get(\n urls.history(self._giid),\n headers={\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Cookie': 'vid={}'.format(self._vid)},\n params={\n \"offset\": int(offset),\n \"pagesize\": int(pagesize),\n \"notificationCategories\": filters})\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n return json.loads(response.text)\n",
"def get_climate(self, device_label):\n \"\"\" Get climate history\n Args:\n device_label: device label of climate device\n \"\"\"\n response = None\n try:\n response = requests.get(\n urls.climate(self._giid),\n headers={\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Cookie': 'vid={}'.format(self._vid)},\n params={\n \"deviceLabel\": device_label})\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n return json.loads(response.text)\n",
"def set_lock_state(self, code, device_label, state):\n \"\"\" Lock or unlock\n\n Args:\n code (str): Lock code\n device_label (str): device label of lock\n state (str): 'lock' or 'unlock'\n \"\"\"\n response = None\n try:\n response = requests.put(\n urls.set_lockstate(self._giid, device_label, state),\n headers={\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Content-Type': 'application/json',\n 'Cookie': 'vid={}'.format(self._vid)},\n data=json.dumps({\"code\": str(code)}))\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n return json.loads(response.text)\n",
"def capture_image(self, device_label):\n \"\"\" Capture smartcam image\n\n Args:\n device_label (str): device label of camera\n \"\"\"\n response = None\n try:\n response = requests.post(\n urls.imagecapture(self._giid, device_label),\n headers={\n 'Content-Type': 'application/json',\n 'Cookie': 'vid={}'.format(self._vid)})\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n",
"def get_camera_imageseries(self, number_of_imageseries=10, offset=0):\n \"\"\" Get smartcam image series\n\n Args:\n number_of_imageseries (int): number of image series to get\n offset (int): skip offset amount of image series\n \"\"\"\n response = None\n try:\n response = requests.get(\n urls.get_imageseries(self._giid),\n headers={\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Cookie': 'vid={}'.format(self._vid)},\n params={\n \"numberOfImageSeries\": int(number_of_imageseries),\n \"offset\": int(offset),\n \"fromDate\": \"\",\n \"toDate\": \"\",\n \"onlyNotViewed\": \"\",\n \"_\": self._giid})\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n return json.loads(response.text)\n",
"def download_image(self, device_label, image_id, file_name):\n \"\"\" Download image taken by a smartcam\n\n Args:\n device_label (str): device label of camera\n image_id (str): image id from image series\n file_name (str): path to file\n \"\"\"\n response = None\n try:\n response = requests.get(\n urls.download_image(self._giid, device_label, image_id),\n headers={\n 'Cookie': 'vid={}'.format(self._vid)},\n stream=True)\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n with open(file_name, 'wb') as image_file:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk:\n image_file.write(chunk)\n",
"def get_vacation_mode(self):\n \"\"\" Get current vacation mode \"\"\"\n response = None\n try:\n response = requests.get(\n urls.get_vacationmode(self._giid),\n headers={\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Cookie': 'vid={}'.format(self._vid)})\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n return json.loads(response.text)\n",
"def get_door_window(self):\n \"\"\" Get door_window states\"\"\"\n response = None\n try:\n response = requests.get(\n urls.door_window(self._giid),\n headers={\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Cookie': 'vid={}'.format(self._vid)})\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n return json.loads(response.text)\n",
"def test_ethernet(self):\n \"\"\" Test ethernet status \"\"\"\n response = None\n try:\n response = requests.post(\n urls.test_ethernet(self._giid),\n headers={\n 'Content-Type': 'application/json',\n 'Cookie': 'vid={}'.format(self._vid)})\n except requests.exceptions.RequestException as ex:\n raise RequestError(ex)\n _validate_response(response)\n"
] |
""" Command line interface for Verisure MyPages """
from __future__ import print_function
import argparse
import json
import verisure
COMMAND_OVERVIEW = 'overview'
COMMAND_SET = 'set'
COMMAND_CLIMATE = 'climate'
COMMAND_EVENTLOG = 'eventlog'
COMMAND_INSTALLATIONS = 'installations'
COMMAND_CAPTURE = 'capture'
COMMAND_IMAGESERIES = 'imageseries'
COMMAND_GETIMAGE = 'getimage'
COMMAND_ARMSTATE = 'armstate'
COMMAND_DOOR_WINDOW = 'door_window'
COMMAND_VACATIONMODE = 'vacationmode'
COMMAND_TEST_ETHERNET = 'test_ethernet'
def print_result(overview, *names):
""" Print the result of a verisure request """
if names:
for name in names:
toprint = overview
for part in name.split('/'):
toprint = toprint[part]
print(json.dumps(toprint, indent=4, separators=(',', ': ')))
else:
print(json.dumps(overview, indent=4, separators=(',', ': ')))
# pylint: disable=too-many-locals,too-many-statements
# pylint: disable=C0103
if __name__ == "__main__":
main()
|
common-workflow-language/workflow-service
|
wes_client/util.py
|
get_version
|
python
|
def get_version(extension, workflow_file):
'''Determines the version of a .py, .wdl, or .cwl file.'''
if extension == 'py' and two_seven_compatible(workflow_file):
return '2.7'
elif extension == 'cwl':
return yaml.load(open(workflow_file))['cwlVersion']
else: # Must be a wdl file.
# Borrowed from https://github.com/Sage-Bionetworks/synapse-orchestrator/blob/develop/synorchestrator/util.py#L142
try:
return [l.lstrip('version') for l in workflow_file.splitlines() if 'version' in l.split(' ')][0]
except IndexError:
return 'draft-2'
|
Determines the version of a .py, .wdl, or .cwl file.
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L27-L38
|
[
"def two_seven_compatible(filePath):\n \"\"\"Determines if a python file is 2.7 compatible by seeing if it compiles in a subprocess\"\"\"\n try:\n check_call(['python2', '-m', 'py_compile', filePath], stderr=DEVNULL)\n except CalledProcessError:\n raise RuntimeError('Python files must be 2.7 compatible')\n return True\n"
] |
import os
import json
import schema_salad.ref_resolver
from subprocess32 import check_call, DEVNULL, CalledProcessError
import yaml
import glob
import requests
import logging
from wes_service.util import visit
from future.standard_library import hooks
with hooks():
from urllib.request import urlopen, pathname2url
def two_seven_compatible(filePath):
"""Determines if a python file is 2.7 compatible by seeing if it compiles in a subprocess"""
try:
check_call(['python2', '-m', 'py_compile', filePath], stderr=DEVNULL)
except CalledProcessError:
raise RuntimeError('Python files must be 2.7 compatible')
return True
def wf_info(workflow_path):
"""
Returns the version of the file and the file extension.
Assumes that the file path is to the file directly ie, ends with a valid file extension.Supports checking local
files as well as files at http:// and https:// locations. Files at these remote locations are recreated locally to
enable our approach to version checking, then removed after version is extracted.
"""
supported_formats = ['py', 'wdl', 'cwl']
file_type = workflow_path.lower().split('.')[-1] # Grab the file extension
workflow_path = workflow_path if ':' in workflow_path else 'file://' + workflow_path
if file_type in supported_formats:
if workflow_path.startswith('file://'):
version = get_version(file_type, workflow_path[7:])
elif workflow_path.startswith('https://') or workflow_path.startswith('http://'):
# If file not local go fetch it.
html = urlopen(workflow_path).read()
local_loc = os.path.join(os.getcwd(), 'fetchedFromRemote.' + file_type)
with open(local_loc, 'w') as f:
f.write(html.decode())
version = wf_info('file://' + local_loc)[0] # Don't take the file_type here, found it above.
os.remove(local_loc) # TODO: Find a way to avoid recreating file before version determination.
else:
raise NotImplementedError('Unsupported workflow file location: {}. Must be local or HTTP(S).'.format(workflow_path))
else:
raise TypeError('Unsupported workflow type: .{}. Must be {}.'.format(file_type, '.py, .cwl, or .wdl'))
return version, file_type.upper()
def modify_jsonyaml_paths(jsonyaml_file):
"""
Changes relative paths in a json/yaml file to be relative
to where the json/yaml file is located.
:param jsonyaml_file: Path to a json/yaml file.
"""
loader = schema_salad.ref_resolver.Loader({
"location": {"@type": "@id"},
"path": {"@type": "@id"}
})
input_dict, _ = loader.resolve_ref(jsonyaml_file, checklinks=False)
basedir = os.path.dirname(jsonyaml_file)
def fixpaths(d):
"""Make sure all paths have a URI scheme."""
if isinstance(d, dict):
if "path" in d:
if ":" not in d["path"]:
local_path = os.path.normpath(os.path.join(os.getcwd(), basedir, d["path"]))
d["location"] = pathname2url(local_path)
else:
d["location"] = d["path"]
del d["path"]
visit(input_dict, fixpaths)
return json.dumps(input_dict)
def build_wes_request(workflow_file, json_path, attachments=None):
"""
:param str workflow_file: Path to cwl/wdl file. Can be http/https/file.
:param json_path: Path to accompanying json file.
:param attachments: Any other files needing to be uploaded to the server.
:return: A list of tuples formatted to be sent in a post to the wes-server (Swagger API).
"""
workflow_file = "file://" + workflow_file if ":" not in workflow_file else workflow_file
wfbase = None
if json_path.startswith("file://"):
wfbase = os.path.dirname(json_path[7:])
json_path = json_path[7:]
with open(json_path) as f:
wf_params = json.dumps(json.load(f))
elif json_path.startswith("http"):
wf_params = modify_jsonyaml_paths(json_path)
else:
wf_params = json_path
wf_version, wf_type = wf_info(workflow_file)
parts = [("workflow_params", wf_params),
("workflow_type", wf_type),
("workflow_type_version", wf_version)]
if workflow_file.startswith("file://"):
if wfbase is None:
wfbase = os.path.dirname(workflow_file[7:])
parts.append(("workflow_attachment", (os.path.basename(workflow_file[7:]), open(workflow_file[7:], "rb"))))
parts.append(("workflow_url", os.path.basename(workflow_file[7:])))
else:
parts.append(("workflow_url", workflow_file))
if wfbase is None:
wfbase = os.getcwd()
if attachments:
for attachment in attachments:
if attachment.startswith("file://"):
attachment = attachment[7:]
attach_f = open(attachment, "rb")
relpath = os.path.relpath(attachment, wfbase)
elif attachment.startswith("http"):
attach_f = urlopen(attachment)
relpath = os.path.basename(attach_f)
parts.append(("workflow_attachment", (relpath, attach_f)))
return parts
def expand_globs(attachments):
expanded_list = []
for filepath in attachments:
if 'file://' in filepath:
for f in glob.glob(filepath[7:]):
expanded_list += ['file://' + os.path.abspath(f)]
elif ':' not in filepath:
for f in glob.glob(filepath):
expanded_list += ['file://' + os.path.abspath(f)]
else:
expanded_list += [filepath]
return set(expanded_list)
def wes_reponse(postresult):
if postresult.status_code != 200:
error = str(json.loads(postresult.text))
logging.error(error)
raise Exception(error)
return json.loads(postresult.text)
class WESClient(object):
def __init__(self, service):
self.auth = service['auth']
self.proto = service['proto']
self.host = service['host']
def get_service_info(self):
"""
Get information about Workflow Execution Service. May
include information related (but not limited to) the
workflow descriptor formats, versions supported, the
WES API versions supported, and information about general
the service availability.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/service-info" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def list_runs(self):
"""
List the workflows, this endpoint will list the workflows
in order of oldest to newest. There is no guarantee of
live updates as the user traverses the pages, the behavior
should be decided (and documented) by each implementation.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def run(self, wf, jsonyaml, attachments):
"""
Composes and sends a post request that signals the wes server to run a workflow.
:param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file.
:param str jsonyaml: A local path to a json or yaml file.
:param list attachments: A list of local paths to files that will be uploaded to the server.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the post result as a dictionary.
"""
attachments = list(expand_globs(attachments))
parts = build_wes_request(wf, jsonyaml, attachments)
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
files=parts,
headers=self.auth)
return wes_reponse(postresult)
def cancel(self, run_id):
"""
Cancel a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the delete result as a dictionary.
"""
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs/%s/cancel" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_log(self, run_id):
"""
Get detailed info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_status(self, run_id):
"""
Get quick status info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s/status" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
|
common-workflow-language/workflow-service
|
wes_client/util.py
|
wf_info
|
python
|
def wf_info(workflow_path):
supported_formats = ['py', 'wdl', 'cwl']
file_type = workflow_path.lower().split('.')[-1] # Grab the file extension
workflow_path = workflow_path if ':' in workflow_path else 'file://' + workflow_path
if file_type in supported_formats:
if workflow_path.startswith('file://'):
version = get_version(file_type, workflow_path[7:])
elif workflow_path.startswith('https://') or workflow_path.startswith('http://'):
# If file not local go fetch it.
html = urlopen(workflow_path).read()
local_loc = os.path.join(os.getcwd(), 'fetchedFromRemote.' + file_type)
with open(local_loc, 'w') as f:
f.write(html.decode())
version = wf_info('file://' + local_loc)[0] # Don't take the file_type here, found it above.
os.remove(local_loc) # TODO: Find a way to avoid recreating file before version determination.
else:
raise NotImplementedError('Unsupported workflow file location: {}. Must be local or HTTP(S).'.format(workflow_path))
else:
raise TypeError('Unsupported workflow type: .{}. Must be {}.'.format(file_type, '.py, .cwl, or .wdl'))
return version, file_type.upper()
|
Returns the version of the file and the file extension.
Assumes that the file path is to the file directly ie, ends with a valid file extension.Supports checking local
files as well as files at http:// and https:// locations. Files at these remote locations are recreated locally to
enable our approach to version checking, then removed after version is extracted.
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L41-L69
|
[
"def get_version(extension, workflow_file):\n '''Determines the version of a .py, .wdl, or .cwl file.'''\n if extension == 'py' and two_seven_compatible(workflow_file):\n return '2.7'\n elif extension == 'cwl':\n return yaml.load(open(workflow_file))['cwlVersion']\n else: # Must be a wdl file.\n # Borrowed from https://github.com/Sage-Bionetworks/synapse-orchestrator/blob/develop/synorchestrator/util.py#L142\n try:\n return [l.lstrip('version') for l in workflow_file.splitlines() if 'version' in l.split(' ')][0]\n except IndexError:\n return 'draft-2'\n"
] |
import os
import json
import schema_salad.ref_resolver
from subprocess32 import check_call, DEVNULL, CalledProcessError
import yaml
import glob
import requests
import logging
from wes_service.util import visit
from future.standard_library import hooks
with hooks():
from urllib.request import urlopen, pathname2url
def two_seven_compatible(filePath):
"""Determines if a python file is 2.7 compatible by seeing if it compiles in a subprocess"""
try:
check_call(['python2', '-m', 'py_compile', filePath], stderr=DEVNULL)
except CalledProcessError:
raise RuntimeError('Python files must be 2.7 compatible')
return True
def get_version(extension, workflow_file):
'''Determines the version of a .py, .wdl, or .cwl file.'''
if extension == 'py' and two_seven_compatible(workflow_file):
return '2.7'
elif extension == 'cwl':
return yaml.load(open(workflow_file))['cwlVersion']
else: # Must be a wdl file.
# Borrowed from https://github.com/Sage-Bionetworks/synapse-orchestrator/blob/develop/synorchestrator/util.py#L142
try:
return [l.lstrip('version') for l in workflow_file.splitlines() if 'version' in l.split(' ')][0]
except IndexError:
return 'draft-2'
def modify_jsonyaml_paths(jsonyaml_file):
"""
Changes relative paths in a json/yaml file to be relative
to where the json/yaml file is located.
:param jsonyaml_file: Path to a json/yaml file.
"""
loader = schema_salad.ref_resolver.Loader({
"location": {"@type": "@id"},
"path": {"@type": "@id"}
})
input_dict, _ = loader.resolve_ref(jsonyaml_file, checklinks=False)
basedir = os.path.dirname(jsonyaml_file)
def fixpaths(d):
"""Make sure all paths have a URI scheme."""
if isinstance(d, dict):
if "path" in d:
if ":" not in d["path"]:
local_path = os.path.normpath(os.path.join(os.getcwd(), basedir, d["path"]))
d["location"] = pathname2url(local_path)
else:
d["location"] = d["path"]
del d["path"]
visit(input_dict, fixpaths)
return json.dumps(input_dict)
def build_wes_request(workflow_file, json_path, attachments=None):
"""
:param str workflow_file: Path to cwl/wdl file. Can be http/https/file.
:param json_path: Path to accompanying json file.
:param attachments: Any other files needing to be uploaded to the server.
:return: A list of tuples formatted to be sent in a post to the wes-server (Swagger API).
"""
workflow_file = "file://" + workflow_file if ":" not in workflow_file else workflow_file
wfbase = None
if json_path.startswith("file://"):
wfbase = os.path.dirname(json_path[7:])
json_path = json_path[7:]
with open(json_path) as f:
wf_params = json.dumps(json.load(f))
elif json_path.startswith("http"):
wf_params = modify_jsonyaml_paths(json_path)
else:
wf_params = json_path
wf_version, wf_type = wf_info(workflow_file)
parts = [("workflow_params", wf_params),
("workflow_type", wf_type),
("workflow_type_version", wf_version)]
if workflow_file.startswith("file://"):
if wfbase is None:
wfbase = os.path.dirname(workflow_file[7:])
parts.append(("workflow_attachment", (os.path.basename(workflow_file[7:]), open(workflow_file[7:], "rb"))))
parts.append(("workflow_url", os.path.basename(workflow_file[7:])))
else:
parts.append(("workflow_url", workflow_file))
if wfbase is None:
wfbase = os.getcwd()
if attachments:
for attachment in attachments:
if attachment.startswith("file://"):
attachment = attachment[7:]
attach_f = open(attachment, "rb")
relpath = os.path.relpath(attachment, wfbase)
elif attachment.startswith("http"):
attach_f = urlopen(attachment)
relpath = os.path.basename(attach_f)
parts.append(("workflow_attachment", (relpath, attach_f)))
return parts
def expand_globs(attachments):
expanded_list = []
for filepath in attachments:
if 'file://' in filepath:
for f in glob.glob(filepath[7:]):
expanded_list += ['file://' + os.path.abspath(f)]
elif ':' not in filepath:
for f in glob.glob(filepath):
expanded_list += ['file://' + os.path.abspath(f)]
else:
expanded_list += [filepath]
return set(expanded_list)
def wes_reponse(postresult):
if postresult.status_code != 200:
error = str(json.loads(postresult.text))
logging.error(error)
raise Exception(error)
return json.loads(postresult.text)
class WESClient(object):
def __init__(self, service):
self.auth = service['auth']
self.proto = service['proto']
self.host = service['host']
def get_service_info(self):
"""
Get information about Workflow Execution Service. May
include information related (but not limited to) the
workflow descriptor formats, versions supported, the
WES API versions supported, and information about general
the service availability.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/service-info" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def list_runs(self):
"""
List the workflows, this endpoint will list the workflows
in order of oldest to newest. There is no guarantee of
live updates as the user traverses the pages, the behavior
should be decided (and documented) by each implementation.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def run(self, wf, jsonyaml, attachments):
"""
Composes and sends a post request that signals the wes server to run a workflow.
:param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file.
:param str jsonyaml: A local path to a json or yaml file.
:param list attachments: A list of local paths to files that will be uploaded to the server.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the post result as a dictionary.
"""
attachments = list(expand_globs(attachments))
parts = build_wes_request(wf, jsonyaml, attachments)
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
files=parts,
headers=self.auth)
return wes_reponse(postresult)
def cancel(self, run_id):
"""
Cancel a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the delete result as a dictionary.
"""
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs/%s/cancel" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_log(self, run_id):
"""
Get detailed info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_status(self, run_id):
"""
Get quick status info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s/status" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
|
common-workflow-language/workflow-service
|
wes_client/util.py
|
modify_jsonyaml_paths
|
python
|
def modify_jsonyaml_paths(jsonyaml_file):
loader = schema_salad.ref_resolver.Loader({
"location": {"@type": "@id"},
"path": {"@type": "@id"}
})
input_dict, _ = loader.resolve_ref(jsonyaml_file, checklinks=False)
basedir = os.path.dirname(jsonyaml_file)
def fixpaths(d):
"""Make sure all paths have a URI scheme."""
if isinstance(d, dict):
if "path" in d:
if ":" not in d["path"]:
local_path = os.path.normpath(os.path.join(os.getcwd(), basedir, d["path"]))
d["location"] = pathname2url(local_path)
else:
d["location"] = d["path"]
del d["path"]
visit(input_dict, fixpaths)
return json.dumps(input_dict)
|
Changes relative paths in a json/yaml file to be relative
to where the json/yaml file is located.
:param jsonyaml_file: Path to a json/yaml file.
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L72-L98
|
[
"def visit(d, op):\n \"\"\"Recursively call op(d) for all list subelements and dictionary 'values' that d may have.\"\"\"\n op(d)\n if isinstance(d, list):\n for i in d:\n visit(i, op)\n elif isinstance(d, dict):\n for i in itervalues(d):\n visit(i, op)\n"
] |
import os
import json
import schema_salad.ref_resolver
from subprocess32 import check_call, DEVNULL, CalledProcessError
import yaml
import glob
import requests
import logging
from wes_service.util import visit
from future.standard_library import hooks
with hooks():
from urllib.request import urlopen, pathname2url
def two_seven_compatible(filePath):
"""Determines if a python file is 2.7 compatible by seeing if it compiles in a subprocess"""
try:
check_call(['python2', '-m', 'py_compile', filePath], stderr=DEVNULL)
except CalledProcessError:
raise RuntimeError('Python files must be 2.7 compatible')
return True
def get_version(extension, workflow_file):
'''Determines the version of a .py, .wdl, or .cwl file.'''
if extension == 'py' and two_seven_compatible(workflow_file):
return '2.7'
elif extension == 'cwl':
return yaml.load(open(workflow_file))['cwlVersion']
else: # Must be a wdl file.
# Borrowed from https://github.com/Sage-Bionetworks/synapse-orchestrator/blob/develop/synorchestrator/util.py#L142
try:
return [l.lstrip('version') for l in workflow_file.splitlines() if 'version' in l.split(' ')][0]
except IndexError:
return 'draft-2'
def wf_info(workflow_path):
"""
Returns the version of the file and the file extension.
Assumes that the file path is to the file directly ie, ends with a valid file extension.Supports checking local
files as well as files at http:// and https:// locations. Files at these remote locations are recreated locally to
enable our approach to version checking, then removed after version is extracted.
"""
supported_formats = ['py', 'wdl', 'cwl']
file_type = workflow_path.lower().split('.')[-1] # Grab the file extension
workflow_path = workflow_path if ':' in workflow_path else 'file://' + workflow_path
if file_type in supported_formats:
if workflow_path.startswith('file://'):
version = get_version(file_type, workflow_path[7:])
elif workflow_path.startswith('https://') or workflow_path.startswith('http://'):
# If file not local go fetch it.
html = urlopen(workflow_path).read()
local_loc = os.path.join(os.getcwd(), 'fetchedFromRemote.' + file_type)
with open(local_loc, 'w') as f:
f.write(html.decode())
version = wf_info('file://' + local_loc)[0] # Don't take the file_type here, found it above.
os.remove(local_loc) # TODO: Find a way to avoid recreating file before version determination.
else:
raise NotImplementedError('Unsupported workflow file location: {}. Must be local or HTTP(S).'.format(workflow_path))
else:
raise TypeError('Unsupported workflow type: .{}. Must be {}.'.format(file_type, '.py, .cwl, or .wdl'))
return version, file_type.upper()
def build_wes_request(workflow_file, json_path, attachments=None):
"""
:param str workflow_file: Path to cwl/wdl file. Can be http/https/file.
:param json_path: Path to accompanying json file.
:param attachments: Any other files needing to be uploaded to the server.
:return: A list of tuples formatted to be sent in a post to the wes-server (Swagger API).
"""
workflow_file = "file://" + workflow_file if ":" not in workflow_file else workflow_file
wfbase = None
if json_path.startswith("file://"):
wfbase = os.path.dirname(json_path[7:])
json_path = json_path[7:]
with open(json_path) as f:
wf_params = json.dumps(json.load(f))
elif json_path.startswith("http"):
wf_params = modify_jsonyaml_paths(json_path)
else:
wf_params = json_path
wf_version, wf_type = wf_info(workflow_file)
parts = [("workflow_params", wf_params),
("workflow_type", wf_type),
("workflow_type_version", wf_version)]
if workflow_file.startswith("file://"):
if wfbase is None:
wfbase = os.path.dirname(workflow_file[7:])
parts.append(("workflow_attachment", (os.path.basename(workflow_file[7:]), open(workflow_file[7:], "rb"))))
parts.append(("workflow_url", os.path.basename(workflow_file[7:])))
else:
parts.append(("workflow_url", workflow_file))
if wfbase is None:
wfbase = os.getcwd()
if attachments:
for attachment in attachments:
if attachment.startswith("file://"):
attachment = attachment[7:]
attach_f = open(attachment, "rb")
relpath = os.path.relpath(attachment, wfbase)
elif attachment.startswith("http"):
attach_f = urlopen(attachment)
relpath = os.path.basename(attach_f)
parts.append(("workflow_attachment", (relpath, attach_f)))
return parts
def expand_globs(attachments):
expanded_list = []
for filepath in attachments:
if 'file://' in filepath:
for f in glob.glob(filepath[7:]):
expanded_list += ['file://' + os.path.abspath(f)]
elif ':' not in filepath:
for f in glob.glob(filepath):
expanded_list += ['file://' + os.path.abspath(f)]
else:
expanded_list += [filepath]
return set(expanded_list)
def wes_reponse(postresult):
if postresult.status_code != 200:
error = str(json.loads(postresult.text))
logging.error(error)
raise Exception(error)
return json.loads(postresult.text)
class WESClient(object):
def __init__(self, service):
self.auth = service['auth']
self.proto = service['proto']
self.host = service['host']
def get_service_info(self):
"""
Get information about Workflow Execution Service. May
include information related (but not limited to) the
workflow descriptor formats, versions supported, the
WES API versions supported, and information about general
the service availability.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/service-info" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def list_runs(self):
"""
List the workflows, this endpoint will list the workflows
in order of oldest to newest. There is no guarantee of
live updates as the user traverses the pages, the behavior
should be decided (and documented) by each implementation.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def run(self, wf, jsonyaml, attachments):
"""
Composes and sends a post request that signals the wes server to run a workflow.
:param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file.
:param str jsonyaml: A local path to a json or yaml file.
:param list attachments: A list of local paths to files that will be uploaded to the server.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the post result as a dictionary.
"""
attachments = list(expand_globs(attachments))
parts = build_wes_request(wf, jsonyaml, attachments)
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
files=parts,
headers=self.auth)
return wes_reponse(postresult)
def cancel(self, run_id):
"""
Cancel a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the delete result as a dictionary.
"""
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs/%s/cancel" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_log(self, run_id):
"""
Get detailed info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_status(self, run_id):
"""
Get quick status info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s/status" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
|
common-workflow-language/workflow-service
|
wes_client/util.py
|
build_wes_request
|
python
|
def build_wes_request(workflow_file, json_path, attachments=None):
workflow_file = "file://" + workflow_file if ":" not in workflow_file else workflow_file
wfbase = None
if json_path.startswith("file://"):
wfbase = os.path.dirname(json_path[7:])
json_path = json_path[7:]
with open(json_path) as f:
wf_params = json.dumps(json.load(f))
elif json_path.startswith("http"):
wf_params = modify_jsonyaml_paths(json_path)
else:
wf_params = json_path
wf_version, wf_type = wf_info(workflow_file)
parts = [("workflow_params", wf_params),
("workflow_type", wf_type),
("workflow_type_version", wf_version)]
if workflow_file.startswith("file://"):
if wfbase is None:
wfbase = os.path.dirname(workflow_file[7:])
parts.append(("workflow_attachment", (os.path.basename(workflow_file[7:]), open(workflow_file[7:], "rb"))))
parts.append(("workflow_url", os.path.basename(workflow_file[7:])))
else:
parts.append(("workflow_url", workflow_file))
if wfbase is None:
wfbase = os.getcwd()
if attachments:
for attachment in attachments:
if attachment.startswith("file://"):
attachment = attachment[7:]
attach_f = open(attachment, "rb")
relpath = os.path.relpath(attachment, wfbase)
elif attachment.startswith("http"):
attach_f = urlopen(attachment)
relpath = os.path.basename(attach_f)
parts.append(("workflow_attachment", (relpath, attach_f)))
return parts
|
:param str workflow_file: Path to cwl/wdl file. Can be http/https/file.
:param json_path: Path to accompanying json file.
:param attachments: Any other files needing to be uploaded to the server.
:return: A list of tuples formatted to be sent in a post to the wes-server (Swagger API).
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L101-L148
|
[
"def wf_info(workflow_path):\n \"\"\"\n Returns the version of the file and the file extension.\n\n Assumes that the file path is to the file directly ie, ends with a valid file extension.Supports checking local\n files as well as files at http:// and https:// locations. Files at these remote locations are recreated locally to\n enable our approach to version checking, then removed after version is extracted.\n \"\"\"\n\n supported_formats = ['py', 'wdl', 'cwl']\n file_type = workflow_path.lower().split('.')[-1] # Grab the file extension\n workflow_path = workflow_path if ':' in workflow_path else 'file://' + workflow_path\n\n if file_type in supported_formats:\n if workflow_path.startswith('file://'):\n version = get_version(file_type, workflow_path[7:])\n elif workflow_path.startswith('https://') or workflow_path.startswith('http://'):\n # If file not local go fetch it.\n html = urlopen(workflow_path).read()\n local_loc = os.path.join(os.getcwd(), 'fetchedFromRemote.' + file_type)\n with open(local_loc, 'w') as f:\n f.write(html.decode())\n version = wf_info('file://' + local_loc)[0] # Don't take the file_type here, found it above.\n os.remove(local_loc) # TODO: Find a way to avoid recreating file before version determination.\n else:\n raise NotImplementedError('Unsupported workflow file location: {}. Must be local or HTTP(S).'.format(workflow_path))\n else:\n raise TypeError('Unsupported workflow type: .{}. Must be {}.'.format(file_type, '.py, .cwl, or .wdl'))\n return version, file_type.upper()\n"
] |
import os
import json
import schema_salad.ref_resolver
from subprocess32 import check_call, DEVNULL, CalledProcessError
import yaml
import glob
import requests
import logging
from wes_service.util import visit
from future.standard_library import hooks
with hooks():
from urllib.request import urlopen, pathname2url
def two_seven_compatible(filePath):
"""Determines if a python file is 2.7 compatible by seeing if it compiles in a subprocess"""
try:
check_call(['python2', '-m', 'py_compile', filePath], stderr=DEVNULL)
except CalledProcessError:
raise RuntimeError('Python files must be 2.7 compatible')
return True
def get_version(extension, workflow_file):
'''Determines the version of a .py, .wdl, or .cwl file.'''
if extension == 'py' and two_seven_compatible(workflow_file):
return '2.7'
elif extension == 'cwl':
return yaml.load(open(workflow_file))['cwlVersion']
else: # Must be a wdl file.
# Borrowed from https://github.com/Sage-Bionetworks/synapse-orchestrator/blob/develop/synorchestrator/util.py#L142
try:
return [l.lstrip('version') for l in workflow_file.splitlines() if 'version' in l.split(' ')][0]
except IndexError:
return 'draft-2'
def wf_info(workflow_path):
"""
Returns the version of the file and the file extension.
Assumes that the file path is to the file directly ie, ends with a valid file extension.Supports checking local
files as well as files at http:// and https:// locations. Files at these remote locations are recreated locally to
enable our approach to version checking, then removed after version is extracted.
"""
supported_formats = ['py', 'wdl', 'cwl']
file_type = workflow_path.lower().split('.')[-1] # Grab the file extension
workflow_path = workflow_path if ':' in workflow_path else 'file://' + workflow_path
if file_type in supported_formats:
if workflow_path.startswith('file://'):
version = get_version(file_type, workflow_path[7:])
elif workflow_path.startswith('https://') or workflow_path.startswith('http://'):
# If file not local go fetch it.
html = urlopen(workflow_path).read()
local_loc = os.path.join(os.getcwd(), 'fetchedFromRemote.' + file_type)
with open(local_loc, 'w') as f:
f.write(html.decode())
version = wf_info('file://' + local_loc)[0] # Don't take the file_type here, found it above.
os.remove(local_loc) # TODO: Find a way to avoid recreating file before version determination.
else:
raise NotImplementedError('Unsupported workflow file location: {}. Must be local or HTTP(S).'.format(workflow_path))
else:
raise TypeError('Unsupported workflow type: .{}. Must be {}.'.format(file_type, '.py, .cwl, or .wdl'))
return version, file_type.upper()
def modify_jsonyaml_paths(jsonyaml_file):
"""
Changes relative paths in a json/yaml file to be relative
to where the json/yaml file is located.
:param jsonyaml_file: Path to a json/yaml file.
"""
loader = schema_salad.ref_resolver.Loader({
"location": {"@type": "@id"},
"path": {"@type": "@id"}
})
input_dict, _ = loader.resolve_ref(jsonyaml_file, checklinks=False)
basedir = os.path.dirname(jsonyaml_file)
def fixpaths(d):
"""Make sure all paths have a URI scheme."""
if isinstance(d, dict):
if "path" in d:
if ":" not in d["path"]:
local_path = os.path.normpath(os.path.join(os.getcwd(), basedir, d["path"]))
d["location"] = pathname2url(local_path)
else:
d["location"] = d["path"]
del d["path"]
visit(input_dict, fixpaths)
return json.dumps(input_dict)
def expand_globs(attachments):
expanded_list = []
for filepath in attachments:
if 'file://' in filepath:
for f in glob.glob(filepath[7:]):
expanded_list += ['file://' + os.path.abspath(f)]
elif ':' not in filepath:
for f in glob.glob(filepath):
expanded_list += ['file://' + os.path.abspath(f)]
else:
expanded_list += [filepath]
return set(expanded_list)
def wes_reponse(postresult):
if postresult.status_code != 200:
error = str(json.loads(postresult.text))
logging.error(error)
raise Exception(error)
return json.loads(postresult.text)
class WESClient(object):
def __init__(self, service):
self.auth = service['auth']
self.proto = service['proto']
self.host = service['host']
def get_service_info(self):
"""
Get information about Workflow Execution Service. May
include information related (but not limited to) the
workflow descriptor formats, versions supported, the
WES API versions supported, and information about general
the service availability.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/service-info" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def list_runs(self):
"""
List the workflows, this endpoint will list the workflows
in order of oldest to newest. There is no guarantee of
live updates as the user traverses the pages, the behavior
should be decided (and documented) by each implementation.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def run(self, wf, jsonyaml, attachments):
"""
Composes and sends a post request that signals the wes server to run a workflow.
:param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file.
:param str jsonyaml: A local path to a json or yaml file.
:param list attachments: A list of local paths to files that will be uploaded to the server.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the post result as a dictionary.
"""
attachments = list(expand_globs(attachments))
parts = build_wes_request(wf, jsonyaml, attachments)
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
files=parts,
headers=self.auth)
return wes_reponse(postresult)
def cancel(self, run_id):
"""
Cancel a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the delete result as a dictionary.
"""
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs/%s/cancel" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_log(self, run_id):
"""
Get detailed info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_status(self, run_id):
"""
Get quick status info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s/status" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
|
common-workflow-language/workflow-service
|
wes_client/util.py
|
WESClient.get_service_info
|
python
|
def get_service_info(self):
postresult = requests.get("%s://%s/ga4gh/wes/v1/service-info" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
|
Get information about Workflow Execution Service. May
include information related (but not limited to) the
workflow descriptor formats, versions supported, the
WES API versions supported, and information about general
the service availability.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L180-L195
|
[
"def wes_reponse(postresult):\n if postresult.status_code != 200:\n error = str(json.loads(postresult.text))\n logging.error(error)\n raise Exception(error)\n\n return json.loads(postresult.text)\n"
] |
class WESClient(object):
def __init__(self, service):
self.auth = service['auth']
self.proto = service['proto']
self.host = service['host']
def list_runs(self):
"""
List the workflows, this endpoint will list the workflows
in order of oldest to newest. There is no guarantee of
live updates as the user traverses the pages, the behavior
should be decided (and documented) by each implementation.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def run(self, wf, jsonyaml, attachments):
"""
Composes and sends a post request that signals the wes server to run a workflow.
:param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file.
:param str jsonyaml: A local path to a json or yaml file.
:param list attachments: A list of local paths to files that will be uploaded to the server.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the post result as a dictionary.
"""
attachments = list(expand_globs(attachments))
parts = build_wes_request(wf, jsonyaml, attachments)
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
files=parts,
headers=self.auth)
return wes_reponse(postresult)
def cancel(self, run_id):
"""
Cancel a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the delete result as a dictionary.
"""
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs/%s/cancel" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_log(self, run_id):
"""
Get detailed info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_status(self, run_id):
"""
Get quick status info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s/status" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
|
common-workflow-language/workflow-service
|
wes_client/util.py
|
WESClient.run
|
python
|
def run(self, wf, jsonyaml, attachments):
attachments = list(expand_globs(attachments))
parts = build_wes_request(wf, jsonyaml, attachments)
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
files=parts,
headers=self.auth)
return wes_reponse(postresult)
|
Composes and sends a post request that signals the wes server to run a workflow.
:param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file.
:param str jsonyaml: A local path to a json or yaml file.
:param list attachments: A list of local paths to files that will be uploaded to the server.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the post result as a dictionary.
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L213-L231
|
[
"def expand_globs(attachments):\n expanded_list = []\n for filepath in attachments:\n if 'file://' in filepath:\n for f in glob.glob(filepath[7:]):\n expanded_list += ['file://' + os.path.abspath(f)]\n elif ':' not in filepath:\n for f in glob.glob(filepath):\n expanded_list += ['file://' + os.path.abspath(f)]\n else:\n expanded_list += [filepath]\n return set(expanded_list)\n",
"def build_wes_request(workflow_file, json_path, attachments=None):\n \"\"\"\n :param str workflow_file: Path to cwl/wdl file. Can be http/https/file.\n :param json_path: Path to accompanying json file.\n :param attachments: Any other files needing to be uploaded to the server.\n\n :return: A list of tuples formatted to be sent in a post to the wes-server (Swagger API).\n \"\"\"\n workflow_file = \"file://\" + workflow_file if \":\" not in workflow_file else workflow_file\n wfbase = None\n if json_path.startswith(\"file://\"):\n wfbase = os.path.dirname(json_path[7:])\n json_path = json_path[7:]\n with open(json_path) as f:\n wf_params = json.dumps(json.load(f))\n elif json_path.startswith(\"http\"):\n wf_params = modify_jsonyaml_paths(json_path)\n else:\n wf_params = json_path\n wf_version, wf_type = wf_info(workflow_file)\n\n parts = [(\"workflow_params\", wf_params),\n (\"workflow_type\", wf_type),\n (\"workflow_type_version\", wf_version)]\n\n if workflow_file.startswith(\"file://\"):\n if wfbase is None:\n wfbase = os.path.dirname(workflow_file[7:])\n parts.append((\"workflow_attachment\", (os.path.basename(workflow_file[7:]), open(workflow_file[7:], \"rb\"))))\n parts.append((\"workflow_url\", os.path.basename(workflow_file[7:])))\n else:\n parts.append((\"workflow_url\", workflow_file))\n\n if wfbase is None:\n wfbase = os.getcwd()\n if attachments:\n for attachment in attachments:\n if attachment.startswith(\"file://\"):\n attachment = attachment[7:]\n attach_f = open(attachment, \"rb\")\n relpath = os.path.relpath(attachment, wfbase)\n elif attachment.startswith(\"http\"):\n attach_f = urlopen(attachment)\n relpath = os.path.basename(attach_f)\n\n parts.append((\"workflow_attachment\", (relpath, attach_f)))\n\n return parts\n",
"def wes_reponse(postresult):\n if postresult.status_code != 200:\n error = str(json.loads(postresult.text))\n logging.error(error)\n raise Exception(error)\n\n return json.loads(postresult.text)\n"
] |
class WESClient(object):
def __init__(self, service):
self.auth = service['auth']
self.proto = service['proto']
self.host = service['host']
def get_service_info(self):
"""
Get information about Workflow Execution Service. May
include information related (but not limited to) the
workflow descriptor formats, versions supported, the
WES API versions supported, and information about general
the service availability.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/service-info" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def list_runs(self):
"""
List the workflows, this endpoint will list the workflows
in order of oldest to newest. There is no guarantee of
live updates as the user traverses the pages, the behavior
should be decided (and documented) by each implementation.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def cancel(self, run_id):
"""
Cancel a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the delete result as a dictionary.
"""
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs/%s/cancel" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_log(self, run_id):
"""
Get detailed info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_status(self, run_id):
"""
Get quick status info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s/status" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
|
common-workflow-language/workflow-service
|
wes_client/util.py
|
WESClient.cancel
|
python
|
def cancel(self, run_id):
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs/%s/cancel" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
|
Cancel a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the delete result as a dictionary.
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L233-L245
|
[
"def wes_reponse(postresult):\n if postresult.status_code != 200:\n error = str(json.loads(postresult.text))\n logging.error(error)\n raise Exception(error)\n\n return json.loads(postresult.text)\n"
] |
class WESClient(object):
def __init__(self, service):
self.auth = service['auth']
self.proto = service['proto']
self.host = service['host']
def get_service_info(self):
"""
Get information about Workflow Execution Service. May
include information related (but not limited to) the
workflow descriptor formats, versions supported, the
WES API versions supported, and information about general
the service availability.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/service-info" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def list_runs(self):
"""
List the workflows, this endpoint will list the workflows
in order of oldest to newest. There is no guarantee of
live updates as the user traverses the pages, the behavior
should be decided (and documented) by each implementation.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def run(self, wf, jsonyaml, attachments):
"""
Composes and sends a post request that signals the wes server to run a workflow.
:param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file.
:param str jsonyaml: A local path to a json or yaml file.
:param list attachments: A list of local paths to files that will be uploaded to the server.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the post result as a dictionary.
"""
attachments = list(expand_globs(attachments))
parts = build_wes_request(wf, jsonyaml, attachments)
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
files=parts,
headers=self.auth)
return wes_reponse(postresult)
def get_run_log(self, run_id):
"""
Get detailed info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_status(self, run_id):
"""
Get quick status info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s/status" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
|
common-workflow-language/workflow-service
|
wes_client/util.py
|
WESClient.get_run_log
|
python
|
def get_run_log(self, run_id):
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
|
Get detailed info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L247-L259
|
[
"def wes_reponse(postresult):\n if postresult.status_code != 200:\n error = str(json.loads(postresult.text))\n logging.error(error)\n raise Exception(error)\n\n return json.loads(postresult.text)\n"
] |
class WESClient(object):
def __init__(self, service):
self.auth = service['auth']
self.proto = service['proto']
self.host = service['host']
def get_service_info(self):
"""
Get information about Workflow Execution Service. May
include information related (but not limited to) the
workflow descriptor formats, versions supported, the
WES API versions supported, and information about general
the service availability.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/service-info" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def list_runs(self):
"""
List the workflows, this endpoint will list the workflows
in order of oldest to newest. There is no guarantee of
live updates as the user traverses the pages, the behavior
should be decided (and documented) by each implementation.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
headers=self.auth)
return wes_reponse(postresult)
def run(self, wf, jsonyaml, attachments):
"""
Composes and sends a post request that signals the wes server to run a workflow.
:param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file.
:param str jsonyaml: A local path to a json or yaml file.
:param list attachments: A list of local paths to files that will be uploaded to the server.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the post result as a dictionary.
"""
attachments = list(expand_globs(attachments))
parts = build_wes_request(wf, jsonyaml, attachments)
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host),
files=parts,
headers=self.auth)
return wes_reponse(postresult)
def cancel(self, run_id):
"""
Cancel a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the delete result as a dictionary.
"""
postresult = requests.post("%s://%s/ga4gh/wes/v1/runs/%s/cancel" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
def get_run_status(self, run_id):
"""
Get quick status info about a running workflow.
:param run_id: String (typically a uuid) identifying the run.
:param str auth: String to send in the auth header.
:param proto: Schema where the server resides (http, https)
:param host: Port where the post request will be sent and the wes server listens at (default 8080)
:return: The body of the get result as a dictionary.
"""
postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s/status" % (self.proto, self.host, run_id),
headers=self.auth)
return wes_reponse(postresult)
|
common-workflow-language/workflow-service
|
wes_service/cwl_runner.py
|
Workflow.run
|
python
|
def run(self, request, tempdir, opts):
with open(os.path.join(self.workdir, "request.json"), "w") as f:
json.dump(request, f)
with open(os.path.join(self.workdir, "cwl.input.json"), "w") as inputtemp:
json.dump(request["workflow_params"], inputtemp)
workflow_url = request.get("workflow_url") # Will always be local path to descriptor cwl, or url.
output = open(os.path.join(self.workdir, "cwl.output.json"), "w")
stderr = open(os.path.join(self.workdir, "stderr"), "w")
runner = opts.getopt("runner", default="cwl-runner")
extra = opts.getoptlist("extra")
# replace any locally specified outdir with the default
for e in extra:
if e.startswith('--outdir='):
extra.remove(e)
extra.append('--outdir=' + self.outdir)
# link the cwl and json into the tempdir/cwd
if workflow_url.startswith('file://'):
os.symlink(workflow_url[7:], os.path.join(tempdir, "wes_workflow.cwl"))
workflow_url = os.path.join(tempdir, "wes_workflow.cwl")
os.symlink(inputtemp.name, os.path.join(tempdir, "cwl.input.json"))
jsonpath = os.path.join(tempdir, "cwl.input.json")
# build args and run
command_args = [runner] + extra + [workflow_url, jsonpath]
proc = subprocess.Popen(command_args,
stdout=output,
stderr=stderr,
close_fds=True,
cwd=tempdir)
output.close()
stderr.close()
with open(os.path.join(self.workdir, "pid"), "w") as pid:
pid.write(str(proc.pid))
return self.getstatus()
|
Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state}
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_service/cwl_runner.py#L19-L79
|
[
"def getstatus(self):\n state, exit_code = self.getstate()\n\n return {\n \"run_id\": self.run_id,\n \"state\": state\n }\n",
"def getopt(self, p, default=None):\n \"\"\"Returns the first option value stored that matches p or default.\"\"\"\n for k, v in self.pairs:\n if k == p:\n return v\n return default\n",
"def getoptlist(self, p):\n \"\"\"Returns all option values stored that match p as a list.\"\"\"\n optlist = []\n for k, v in self.pairs:\n if k == p:\n optlist.append(v)\n return optlist\n"
] |
class Workflow(object):
def __init__(self, run_id):
super(Workflow, self).__init__()
self.run_id = run_id
self.workdir = os.path.join(os.getcwd(), "workflows", self.run_id)
self.outdir = os.path.join(self.workdir, 'outdir')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
def getstate(self):
"""
Returns RUNNING, -1
COMPLETE, 0
or
EXECUTOR_ERROR, 255
"""
state = "RUNNING"
exit_code = -1
exitcode_file = os.path.join(self.workdir, "exit_code")
pid_file = os.path.join(self.workdir, "pid")
if os.path.exists(exitcode_file):
with open(exitcode_file) as f:
exit_code = int(f.read())
elif os.path.exists(pid_file):
with open(pid_file, "r") as pid:
pid = int(pid.read())
try:
(_pid, exit_status) = os.waitpid(pid, os.WNOHANG)
if _pid != 0:
exit_code = exit_status >> 8
with open(exitcode_file, "w") as f:
f.write(str(exit_code))
os.unlink(pid_file)
except OSError:
os.unlink(pid_file)
exit_code = 255
if exit_code == 0:
state = "COMPLETE"
elif exit_code != -1:
state = "EXECUTOR_ERROR"
return state, exit_code
def getstatus(self):
state, exit_code = self.getstate()
return {
"run_id": self.run_id,
"state": state
}
def getlog(self):
state, exit_code = self.getstate()
with open(os.path.join(self.workdir, "request.json"), "r") as f:
request = json.load(f)
with open(os.path.join(self.workdir, "stderr"), "r") as f:
stderr = f.read()
outputobj = {}
if state == "COMPLETE":
output_path = os.path.join(self.workdir, "cwl.output.json")
with open(output_path, "r") as outputtemp:
outputobj = json.load(outputtemp)
return {
"run_id": self.run_id,
"request": request,
"state": state,
"run_log": {
"cmd": [""],
"start_time": "",
"end_time": "",
"stdout": "",
"stderr": stderr,
"exit_code": exit_code
},
"task_logs": [],
"outputs": outputobj
}
def cancel(self):
pass
|
common-workflow-language/workflow-service
|
wes_service/cwl_runner.py
|
Workflow.getstate
|
python
|
def getstate(self):
state = "RUNNING"
exit_code = -1
exitcode_file = os.path.join(self.workdir, "exit_code")
pid_file = os.path.join(self.workdir, "pid")
if os.path.exists(exitcode_file):
with open(exitcode_file) as f:
exit_code = int(f.read())
elif os.path.exists(pid_file):
with open(pid_file, "r") as pid:
pid = int(pid.read())
try:
(_pid, exit_status) = os.waitpid(pid, os.WNOHANG)
if _pid != 0:
exit_code = exit_status >> 8
with open(exitcode_file, "w") as f:
f.write(str(exit_code))
os.unlink(pid_file)
except OSError:
os.unlink(pid_file)
exit_code = 255
if exit_code == 0:
state = "COMPLETE"
elif exit_code != -1:
state = "EXECUTOR_ERROR"
return state, exit_code
|
Returns RUNNING, -1
COMPLETE, 0
or
EXECUTOR_ERROR, 255
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_service/cwl_runner.py#L81-L116
| null |
class Workflow(object):
def __init__(self, run_id):
super(Workflow, self).__init__()
self.run_id = run_id
self.workdir = os.path.join(os.getcwd(), "workflows", self.run_id)
self.outdir = os.path.join(self.workdir, 'outdir')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
def run(self, request, tempdir, opts):
"""
Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state}
"""
with open(os.path.join(self.workdir, "request.json"), "w") as f:
json.dump(request, f)
with open(os.path.join(self.workdir, "cwl.input.json"), "w") as inputtemp:
json.dump(request["workflow_params"], inputtemp)
workflow_url = request.get("workflow_url") # Will always be local path to descriptor cwl, or url.
output = open(os.path.join(self.workdir, "cwl.output.json"), "w")
stderr = open(os.path.join(self.workdir, "stderr"), "w")
runner = opts.getopt("runner", default="cwl-runner")
extra = opts.getoptlist("extra")
# replace any locally specified outdir with the default
for e in extra:
if e.startswith('--outdir='):
extra.remove(e)
extra.append('--outdir=' + self.outdir)
# link the cwl and json into the tempdir/cwd
if workflow_url.startswith('file://'):
os.symlink(workflow_url[7:], os.path.join(tempdir, "wes_workflow.cwl"))
workflow_url = os.path.join(tempdir, "wes_workflow.cwl")
os.symlink(inputtemp.name, os.path.join(tempdir, "cwl.input.json"))
jsonpath = os.path.join(tempdir, "cwl.input.json")
# build args and run
command_args = [runner] + extra + [workflow_url, jsonpath]
proc = subprocess.Popen(command_args,
stdout=output,
stderr=stderr,
close_fds=True,
cwd=tempdir)
output.close()
stderr.close()
with open(os.path.join(self.workdir, "pid"), "w") as pid:
pid.write(str(proc.pid))
return self.getstatus()
def getstatus(self):
state, exit_code = self.getstate()
return {
"run_id": self.run_id,
"state": state
}
def getlog(self):
state, exit_code = self.getstate()
with open(os.path.join(self.workdir, "request.json"), "r") as f:
request = json.load(f)
with open(os.path.join(self.workdir, "stderr"), "r") as f:
stderr = f.read()
outputobj = {}
if state == "COMPLETE":
output_path = os.path.join(self.workdir, "cwl.output.json")
with open(output_path, "r") as outputtemp:
outputobj = json.load(outputtemp)
return {
"run_id": self.run_id,
"request": request,
"state": state,
"run_log": {
"cmd": [""],
"start_time": "",
"end_time": "",
"stdout": "",
"stderr": stderr,
"exit_code": exit_code
},
"task_logs": [],
"outputs": outputobj
}
def cancel(self):
pass
|
common-workflow-language/workflow-service
|
wes_service/toil_wes.py
|
ToilWorkflow.write_workflow
|
python
|
def write_workflow(self, request, opts, cwd, wftype='cwl'):
workflow_url = request.get("workflow_url")
# link the cwl and json into the cwd
if workflow_url.startswith('file://'):
os.link(workflow_url[7:], os.path.join(cwd, "wes_workflow." + wftype))
workflow_url = os.path.join(cwd, "wes_workflow." + wftype)
os.link(self.input_json, os.path.join(cwd, "wes_input.json"))
self.input_json = os.path.join(cwd, "wes_input.json")
extra_options = self.sort_toil_options(opts.getoptlist("extra"))
if wftype == 'cwl':
command_args = ['toil-cwl-runner'] + extra_options + [workflow_url, self.input_json]
elif wftype == 'wdl':
command_args = ['toil-wdl-runner'] + extra_options + [workflow_url, self.input_json]
elif wftype == 'py':
command_args = ['python'] + extra_options + [workflow_url]
else:
raise RuntimeError('workflow_type is not "cwl", "wdl", or "py": ' + str(wftype))
return command_args
|
Writes a cwl, wdl, or python file as appropriate from the request dictionary.
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_service/toil_wes.py#L68-L90
|
[
"def sort_toil_options(self, extra):\n # determine jobstore and set a new default if the user did not set one\n cloud = False\n for e in extra:\n if e.startswith('--jobStore='):\n self.jobstore = e[11:]\n if self.jobstore.startswith(('aws', 'google', 'azure')):\n cloud = True\n if e.startswith(('--outdir=', '-o=')):\n extra.remove(e)\n if not cloud:\n extra.append('--outdir=' + self.outdir)\n if not self.jobstore:\n extra.append('--jobStore=' + self.jobstore_default)\n self.jobstore = self.jobstore_default\n\n # store the jobstore location\n with open(self.jobstorefile, 'w') as f:\n f.write(self.jobstore)\n\n return extra\n"
] |
class ToilWorkflow(object):
def __init__(self, run_id):
"""
Represents a toil workflow.
:param str run_id: A uuid string. Used to name the folder that contains
all of the files containing this particular workflow instance's information.
"""
super(ToilWorkflow, self).__init__()
self.run_id = run_id
self.workdir = os.path.join(os.getcwd(), 'workflows', self.run_id)
self.outdir = os.path.join(self.workdir, 'outdir')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.outfile = os.path.join(self.workdir, 'stdout')
self.errfile = os.path.join(self.workdir, 'stderr')
self.starttime = os.path.join(self.workdir, 'starttime')
self.endtime = os.path.join(self.workdir, 'endtime')
self.pidfile = os.path.join(self.workdir, 'pid')
self.statcompletefile = os.path.join(self.workdir, 'status_completed')
self.staterrorfile = os.path.join(self.workdir, 'status_error')
self.cmdfile = os.path.join(self.workdir, 'cmd')
self.jobstorefile = os.path.join(self.workdir, 'jobstore')
self.request_json = os.path.join(self.workdir, 'request.json')
self.input_json = os.path.join(self.workdir, "wes_input.json")
self.jobstore_default = 'file:' + os.path.join(self.workdir, 'toiljobstore')
self.jobstore = None
def sort_toil_options(self, extra):
# determine jobstore and set a new default if the user did not set one
cloud = False
for e in extra:
if e.startswith('--jobStore='):
self.jobstore = e[11:]
if self.jobstore.startswith(('aws', 'google', 'azure')):
cloud = True
if e.startswith(('--outdir=', '-o=')):
extra.remove(e)
if not cloud:
extra.append('--outdir=' + self.outdir)
if not self.jobstore:
extra.append('--jobStore=' + self.jobstore_default)
self.jobstore = self.jobstore_default
# store the jobstore location
with open(self.jobstorefile, 'w') as f:
f.write(self.jobstore)
return extra
def write_json(self, request_dict):
input_json = os.path.join(self.workdir, 'input.json')
with open(input_json, 'w') as f:
json.dump(request_dict['workflow_params'], f)
return input_json
def call_cmd(self, cmd, cwd):
"""
Calls a command with Popen.
Writes stdout, stderr, and the command to separate files.
:param cmd: A string or array of strings.
:param tempdir:
:return: The pid of the command.
"""
with open(self.cmdfile, 'w') as f:
f.write(str(cmd))
stdout = open(self.outfile, 'w')
stderr = open(self.errfile, 'w')
logging.info('Calling: ' + ' '.join(cmd))
process = subprocess.Popen(cmd,
stdout=stdout,
stderr=stderr,
close_fds=True,
cwd=cwd)
stdout.close()
stderr.close()
return process.pid
def cancel(self):
pass
def fetch(self, filename):
if os.path.exists(filename):
with open(filename, 'r') as f:
return f.read()
return ''
def getlog(self):
state, exit_code = self.getstate()
with open(self.request_json, 'r') as f:
request = json.load(f)
with open(self.jobstorefile, 'r') as f:
self.jobstore = f.read()
stderr = self.fetch(self.errfile)
starttime = self.fetch(self.starttime)
endtime = self.fetch(self.endtime)
cmd = [self.fetch(self.cmdfile)]
outputobj = {}
if state == "COMPLETE":
# only tested locally
if self.jobstore.startswith('file:'):
for f in os.listdir(self.outdir):
if f.startswith('out_tmpdir'):
shutil.rmtree(os.path.join(self.outdir, f))
for f in os.listdir(self.outdir):
outputobj[f] = {'location': os.path.join(self.outdir, f),
'size': os.stat(os.path.join(self.outdir, f)).st_size,
'class': 'File'}
return {
"run_id": self.run_id,
"request": request,
"state": state,
"run_log": {
"cmd": cmd,
"start_time": starttime,
"end_time": endtime,
"stdout": "",
"stderr": stderr,
"exit_code": exit_code
},
"task_logs": [],
"outputs": outputobj
}
def run(self, request, tempdir, opts):
"""
Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param str tempdir: Folder where input files have been staged and the cwd to run at.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state}
"""
wftype = request['workflow_type'].lower().strip()
version = request['workflow_type_version']
if version != 'v1.0' and wftype == 'cwl':
raise RuntimeError('workflow_type "cwl" requires '
'"workflow_type_version" to be "v1.0": ' + str(version))
if version != '2.7' and wftype == 'py':
raise RuntimeError('workflow_type "py" requires '
'"workflow_type_version" to be "2.7": ' + str(version))
logging.info('Beginning Toil Workflow ID: ' + str(self.run_id))
with open(self.starttime, 'w') as f:
f.write(str(time.time()))
with open(self.request_json, 'w') as f:
json.dump(request, f)
with open(self.input_json, "w") as inputtemp:
json.dump(request["workflow_params"], inputtemp)
command_args = self.write_workflow(request, opts, tempdir, wftype=wftype)
pid = self.call_cmd(command_args, tempdir)
with open(self.endtime, 'w') as f:
f.write(str(time.time()))
with open(self.pidfile, 'w') as f:
f.write(str(pid))
return self.getstatus()
def getstate(self):
"""
Returns QUEUED, -1
INITIALIZING, -1
RUNNING, -1
COMPLETE, 0
or
EXECUTOR_ERROR, 255
"""
# the jobstore never existed
if not os.path.exists(self.jobstorefile):
logging.info('Workflow ' + self.run_id + ': QUEUED')
return "QUEUED", -1
# completed earlier
if os.path.exists(self.statcompletefile):
logging.info('Workflow ' + self.run_id + ': COMPLETE')
return "COMPLETE", 0
# errored earlier
if os.path.exists(self.staterrorfile):
logging.info('Workflow ' + self.run_id + ': EXECUTOR_ERROR')
return "EXECUTOR_ERROR", 255
# the workflow is staged but has not run yet
if not os.path.exists(self.errfile):
logging.info('Workflow ' + self.run_id + ': INITIALIZING')
return "INITIALIZING", -1
# TODO: Query with "toil status"
completed = False
with open(self.errfile, 'r') as f:
for line in f:
if 'Traceback (most recent call last)' in line:
logging.info('Workflow ' + self.run_id + ': EXECUTOR_ERROR')
open(self.staterrorfile, 'a').close()
return "EXECUTOR_ERROR", 255
# run can complete successfully but fail to upload outputs to cloud buckets
# so save the completed status and make sure there was no error elsewhere
if 'Finished toil run successfully.' in line:
completed = True
if completed:
logging.info('Workflow ' + self.run_id + ': COMPLETE')
open(self.statcompletefile, 'a').close()
return "COMPLETE", 0
logging.info('Workflow ' + self.run_id + ': RUNNING')
return "RUNNING", -1
def getstatus(self):
state, exit_code = self.getstate()
return {
"run_id": self.run_id,
"state": state
}
|
common-workflow-language/workflow-service
|
wes_service/toil_wes.py
|
ToilWorkflow.call_cmd
|
python
|
def call_cmd(self, cmd, cwd):
with open(self.cmdfile, 'w') as f:
f.write(str(cmd))
stdout = open(self.outfile, 'w')
stderr = open(self.errfile, 'w')
logging.info('Calling: ' + ' '.join(cmd))
process = subprocess.Popen(cmd,
stdout=stdout,
stderr=stderr,
close_fds=True,
cwd=cwd)
stdout.close()
stderr.close()
return process.pid
|
Calls a command with Popen.
Writes stdout, stderr, and the command to separate files.
:param cmd: A string or array of strings.
:param tempdir:
:return: The pid of the command.
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_service/toil_wes.py#L98-L120
| null |
class ToilWorkflow(object):
def __init__(self, run_id):
"""
Represents a toil workflow.
:param str run_id: A uuid string. Used to name the folder that contains
all of the files containing this particular workflow instance's information.
"""
super(ToilWorkflow, self).__init__()
self.run_id = run_id
self.workdir = os.path.join(os.getcwd(), 'workflows', self.run_id)
self.outdir = os.path.join(self.workdir, 'outdir')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.outfile = os.path.join(self.workdir, 'stdout')
self.errfile = os.path.join(self.workdir, 'stderr')
self.starttime = os.path.join(self.workdir, 'starttime')
self.endtime = os.path.join(self.workdir, 'endtime')
self.pidfile = os.path.join(self.workdir, 'pid')
self.statcompletefile = os.path.join(self.workdir, 'status_completed')
self.staterrorfile = os.path.join(self.workdir, 'status_error')
self.cmdfile = os.path.join(self.workdir, 'cmd')
self.jobstorefile = os.path.join(self.workdir, 'jobstore')
self.request_json = os.path.join(self.workdir, 'request.json')
self.input_json = os.path.join(self.workdir, "wes_input.json")
self.jobstore_default = 'file:' + os.path.join(self.workdir, 'toiljobstore')
self.jobstore = None
def sort_toil_options(self, extra):
# determine jobstore and set a new default if the user did not set one
cloud = False
for e in extra:
if e.startswith('--jobStore='):
self.jobstore = e[11:]
if self.jobstore.startswith(('aws', 'google', 'azure')):
cloud = True
if e.startswith(('--outdir=', '-o=')):
extra.remove(e)
if not cloud:
extra.append('--outdir=' + self.outdir)
if not self.jobstore:
extra.append('--jobStore=' + self.jobstore_default)
self.jobstore = self.jobstore_default
# store the jobstore location
with open(self.jobstorefile, 'w') as f:
f.write(self.jobstore)
return extra
def write_workflow(self, request, opts, cwd, wftype='cwl'):
"""Writes a cwl, wdl, or python file as appropriate from the request dictionary."""
workflow_url = request.get("workflow_url")
# link the cwl and json into the cwd
if workflow_url.startswith('file://'):
os.link(workflow_url[7:], os.path.join(cwd, "wes_workflow." + wftype))
workflow_url = os.path.join(cwd, "wes_workflow." + wftype)
os.link(self.input_json, os.path.join(cwd, "wes_input.json"))
self.input_json = os.path.join(cwd, "wes_input.json")
extra_options = self.sort_toil_options(opts.getoptlist("extra"))
if wftype == 'cwl':
command_args = ['toil-cwl-runner'] + extra_options + [workflow_url, self.input_json]
elif wftype == 'wdl':
command_args = ['toil-wdl-runner'] + extra_options + [workflow_url, self.input_json]
elif wftype == 'py':
command_args = ['python'] + extra_options + [workflow_url]
else:
raise RuntimeError('workflow_type is not "cwl", "wdl", or "py": ' + str(wftype))
return command_args
def write_json(self, request_dict):
input_json = os.path.join(self.workdir, 'input.json')
with open(input_json, 'w') as f:
json.dump(request_dict['workflow_params'], f)
return input_json
def cancel(self):
pass
def fetch(self, filename):
if os.path.exists(filename):
with open(filename, 'r') as f:
return f.read()
return ''
def getlog(self):
state, exit_code = self.getstate()
with open(self.request_json, 'r') as f:
request = json.load(f)
with open(self.jobstorefile, 'r') as f:
self.jobstore = f.read()
stderr = self.fetch(self.errfile)
starttime = self.fetch(self.starttime)
endtime = self.fetch(self.endtime)
cmd = [self.fetch(self.cmdfile)]
outputobj = {}
if state == "COMPLETE":
# only tested locally
if self.jobstore.startswith('file:'):
for f in os.listdir(self.outdir):
if f.startswith('out_tmpdir'):
shutil.rmtree(os.path.join(self.outdir, f))
for f in os.listdir(self.outdir):
outputobj[f] = {'location': os.path.join(self.outdir, f),
'size': os.stat(os.path.join(self.outdir, f)).st_size,
'class': 'File'}
return {
"run_id": self.run_id,
"request": request,
"state": state,
"run_log": {
"cmd": cmd,
"start_time": starttime,
"end_time": endtime,
"stdout": "",
"stderr": stderr,
"exit_code": exit_code
},
"task_logs": [],
"outputs": outputobj
}
def run(self, request, tempdir, opts):
"""
Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param str tempdir: Folder where input files have been staged and the cwd to run at.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state}
"""
wftype = request['workflow_type'].lower().strip()
version = request['workflow_type_version']
if version != 'v1.0' and wftype == 'cwl':
raise RuntimeError('workflow_type "cwl" requires '
'"workflow_type_version" to be "v1.0": ' + str(version))
if version != '2.7' and wftype == 'py':
raise RuntimeError('workflow_type "py" requires '
'"workflow_type_version" to be "2.7": ' + str(version))
logging.info('Beginning Toil Workflow ID: ' + str(self.run_id))
with open(self.starttime, 'w') as f:
f.write(str(time.time()))
with open(self.request_json, 'w') as f:
json.dump(request, f)
with open(self.input_json, "w") as inputtemp:
json.dump(request["workflow_params"], inputtemp)
command_args = self.write_workflow(request, opts, tempdir, wftype=wftype)
pid = self.call_cmd(command_args, tempdir)
with open(self.endtime, 'w') as f:
f.write(str(time.time()))
with open(self.pidfile, 'w') as f:
f.write(str(pid))
return self.getstatus()
def getstate(self):
"""
Returns QUEUED, -1
INITIALIZING, -1
RUNNING, -1
COMPLETE, 0
or
EXECUTOR_ERROR, 255
"""
# the jobstore never existed
if not os.path.exists(self.jobstorefile):
logging.info('Workflow ' + self.run_id + ': QUEUED')
return "QUEUED", -1
# completed earlier
if os.path.exists(self.statcompletefile):
logging.info('Workflow ' + self.run_id + ': COMPLETE')
return "COMPLETE", 0
# errored earlier
if os.path.exists(self.staterrorfile):
logging.info('Workflow ' + self.run_id + ': EXECUTOR_ERROR')
return "EXECUTOR_ERROR", 255
# the workflow is staged but has not run yet
if not os.path.exists(self.errfile):
logging.info('Workflow ' + self.run_id + ': INITIALIZING')
return "INITIALIZING", -1
# TODO: Query with "toil status"
completed = False
with open(self.errfile, 'r') as f:
for line in f:
if 'Traceback (most recent call last)' in line:
logging.info('Workflow ' + self.run_id + ': EXECUTOR_ERROR')
open(self.staterrorfile, 'a').close()
return "EXECUTOR_ERROR", 255
# run can complete successfully but fail to upload outputs to cloud buckets
# so save the completed status and make sure there was no error elsewhere
if 'Finished toil run successfully.' in line:
completed = True
if completed:
logging.info('Workflow ' + self.run_id + ': COMPLETE')
open(self.statcompletefile, 'a').close()
return "COMPLETE", 0
logging.info('Workflow ' + self.run_id + ': RUNNING')
return "RUNNING", -1
def getstatus(self):
state, exit_code = self.getstate()
return {
"run_id": self.run_id,
"state": state
}
|
common-workflow-language/workflow-service
|
wes_service/toil_wes.py
|
ToilWorkflow.run
|
python
|
def run(self, request, tempdir, opts):
wftype = request['workflow_type'].lower().strip()
version = request['workflow_type_version']
if version != 'v1.0' and wftype == 'cwl':
raise RuntimeError('workflow_type "cwl" requires '
'"workflow_type_version" to be "v1.0": ' + str(version))
if version != '2.7' and wftype == 'py':
raise RuntimeError('workflow_type "py" requires '
'"workflow_type_version" to be "2.7": ' + str(version))
logging.info('Beginning Toil Workflow ID: ' + str(self.run_id))
with open(self.starttime, 'w') as f:
f.write(str(time.time()))
with open(self.request_json, 'w') as f:
json.dump(request, f)
with open(self.input_json, "w") as inputtemp:
json.dump(request["workflow_params"], inputtemp)
command_args = self.write_workflow(request, opts, tempdir, wftype=wftype)
pid = self.call_cmd(command_args, tempdir)
with open(self.endtime, 'w') as f:
f.write(str(time.time()))
with open(self.pidfile, 'w') as f:
f.write(str(pid))
return self.getstatus()
|
Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param str tempdir: Folder where input files have been staged and the cwd to run at.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state}
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_service/toil_wes.py#L173-L222
|
[
"def write_workflow(self, request, opts, cwd, wftype='cwl'):\n \"\"\"Writes a cwl, wdl, or python file as appropriate from the request dictionary.\"\"\"\n\n workflow_url = request.get(\"workflow_url\")\n\n # link the cwl and json into the cwd\n if workflow_url.startswith('file://'):\n os.link(workflow_url[7:], os.path.join(cwd, \"wes_workflow.\" + wftype))\n workflow_url = os.path.join(cwd, \"wes_workflow.\" + wftype)\n os.link(self.input_json, os.path.join(cwd, \"wes_input.json\"))\n self.input_json = os.path.join(cwd, \"wes_input.json\")\n\n extra_options = self.sort_toil_options(opts.getoptlist(\"extra\"))\n if wftype == 'cwl':\n command_args = ['toil-cwl-runner'] + extra_options + [workflow_url, self.input_json]\n elif wftype == 'wdl':\n command_args = ['toil-wdl-runner'] + extra_options + [workflow_url, self.input_json]\n elif wftype == 'py':\n command_args = ['python'] + extra_options + [workflow_url]\n else:\n raise RuntimeError('workflow_type is not \"cwl\", \"wdl\", or \"py\": ' + str(wftype))\n\n return command_args\n",
"def call_cmd(self, cmd, cwd):\n \"\"\"\n Calls a command with Popen.\n Writes stdout, stderr, and the command to separate files.\n\n :param cmd: A string or array of strings.\n :param tempdir:\n :return: The pid of the command.\n \"\"\"\n with open(self.cmdfile, 'w') as f:\n f.write(str(cmd))\n stdout = open(self.outfile, 'w')\n stderr = open(self.errfile, 'w')\n logging.info('Calling: ' + ' '.join(cmd))\n process = subprocess.Popen(cmd,\n stdout=stdout,\n stderr=stderr,\n close_fds=True,\n cwd=cwd)\n stdout.close()\n stderr.close()\n\n return process.pid\n",
"def getstatus(self):\n state, exit_code = self.getstate()\n\n return {\n \"run_id\": self.run_id,\n \"state\": state\n }\n"
] |
class ToilWorkflow(object):
def __init__(self, run_id):
"""
Represents a toil workflow.
:param str run_id: A uuid string. Used to name the folder that contains
all of the files containing this particular workflow instance's information.
"""
super(ToilWorkflow, self).__init__()
self.run_id = run_id
self.workdir = os.path.join(os.getcwd(), 'workflows', self.run_id)
self.outdir = os.path.join(self.workdir, 'outdir')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.outfile = os.path.join(self.workdir, 'stdout')
self.errfile = os.path.join(self.workdir, 'stderr')
self.starttime = os.path.join(self.workdir, 'starttime')
self.endtime = os.path.join(self.workdir, 'endtime')
self.pidfile = os.path.join(self.workdir, 'pid')
self.statcompletefile = os.path.join(self.workdir, 'status_completed')
self.staterrorfile = os.path.join(self.workdir, 'status_error')
self.cmdfile = os.path.join(self.workdir, 'cmd')
self.jobstorefile = os.path.join(self.workdir, 'jobstore')
self.request_json = os.path.join(self.workdir, 'request.json')
self.input_json = os.path.join(self.workdir, "wes_input.json")
self.jobstore_default = 'file:' + os.path.join(self.workdir, 'toiljobstore')
self.jobstore = None
def sort_toil_options(self, extra):
# determine jobstore and set a new default if the user did not set one
cloud = False
for e in extra:
if e.startswith('--jobStore='):
self.jobstore = e[11:]
if self.jobstore.startswith(('aws', 'google', 'azure')):
cloud = True
if e.startswith(('--outdir=', '-o=')):
extra.remove(e)
if not cloud:
extra.append('--outdir=' + self.outdir)
if not self.jobstore:
extra.append('--jobStore=' + self.jobstore_default)
self.jobstore = self.jobstore_default
# store the jobstore location
with open(self.jobstorefile, 'w') as f:
f.write(self.jobstore)
return extra
def write_workflow(self, request, opts, cwd, wftype='cwl'):
"""Writes a cwl, wdl, or python file as appropriate from the request dictionary."""
workflow_url = request.get("workflow_url")
# link the cwl and json into the cwd
if workflow_url.startswith('file://'):
os.link(workflow_url[7:], os.path.join(cwd, "wes_workflow." + wftype))
workflow_url = os.path.join(cwd, "wes_workflow." + wftype)
os.link(self.input_json, os.path.join(cwd, "wes_input.json"))
self.input_json = os.path.join(cwd, "wes_input.json")
extra_options = self.sort_toil_options(opts.getoptlist("extra"))
if wftype == 'cwl':
command_args = ['toil-cwl-runner'] + extra_options + [workflow_url, self.input_json]
elif wftype == 'wdl':
command_args = ['toil-wdl-runner'] + extra_options + [workflow_url, self.input_json]
elif wftype == 'py':
command_args = ['python'] + extra_options + [workflow_url]
else:
raise RuntimeError('workflow_type is not "cwl", "wdl", or "py": ' + str(wftype))
return command_args
def write_json(self, request_dict):
input_json = os.path.join(self.workdir, 'input.json')
with open(input_json, 'w') as f:
json.dump(request_dict['workflow_params'], f)
return input_json
def call_cmd(self, cmd, cwd):
"""
Calls a command with Popen.
Writes stdout, stderr, and the command to separate files.
:param cmd: A string or array of strings.
:param tempdir:
:return: The pid of the command.
"""
with open(self.cmdfile, 'w') as f:
f.write(str(cmd))
stdout = open(self.outfile, 'w')
stderr = open(self.errfile, 'w')
logging.info('Calling: ' + ' '.join(cmd))
process = subprocess.Popen(cmd,
stdout=stdout,
stderr=stderr,
close_fds=True,
cwd=cwd)
stdout.close()
stderr.close()
return process.pid
def cancel(self):
pass
def fetch(self, filename):
if os.path.exists(filename):
with open(filename, 'r') as f:
return f.read()
return ''
def getlog(self):
state, exit_code = self.getstate()
with open(self.request_json, 'r') as f:
request = json.load(f)
with open(self.jobstorefile, 'r') as f:
self.jobstore = f.read()
stderr = self.fetch(self.errfile)
starttime = self.fetch(self.starttime)
endtime = self.fetch(self.endtime)
cmd = [self.fetch(self.cmdfile)]
outputobj = {}
if state == "COMPLETE":
# only tested locally
if self.jobstore.startswith('file:'):
for f in os.listdir(self.outdir):
if f.startswith('out_tmpdir'):
shutil.rmtree(os.path.join(self.outdir, f))
for f in os.listdir(self.outdir):
outputobj[f] = {'location': os.path.join(self.outdir, f),
'size': os.stat(os.path.join(self.outdir, f)).st_size,
'class': 'File'}
return {
"run_id": self.run_id,
"request": request,
"state": state,
"run_log": {
"cmd": cmd,
"start_time": starttime,
"end_time": endtime,
"stdout": "",
"stderr": stderr,
"exit_code": exit_code
},
"task_logs": [],
"outputs": outputobj
}
def getstate(self):
"""
Returns QUEUED, -1
INITIALIZING, -1
RUNNING, -1
COMPLETE, 0
or
EXECUTOR_ERROR, 255
"""
# the jobstore never existed
if not os.path.exists(self.jobstorefile):
logging.info('Workflow ' + self.run_id + ': QUEUED')
return "QUEUED", -1
# completed earlier
if os.path.exists(self.statcompletefile):
logging.info('Workflow ' + self.run_id + ': COMPLETE')
return "COMPLETE", 0
# errored earlier
if os.path.exists(self.staterrorfile):
logging.info('Workflow ' + self.run_id + ': EXECUTOR_ERROR')
return "EXECUTOR_ERROR", 255
# the workflow is staged but has not run yet
if not os.path.exists(self.errfile):
logging.info('Workflow ' + self.run_id + ': INITIALIZING')
return "INITIALIZING", -1
# TODO: Query with "toil status"
completed = False
with open(self.errfile, 'r') as f:
for line in f:
if 'Traceback (most recent call last)' in line:
logging.info('Workflow ' + self.run_id + ': EXECUTOR_ERROR')
open(self.staterrorfile, 'a').close()
return "EXECUTOR_ERROR", 255
# run can complete successfully but fail to upload outputs to cloud buckets
# so save the completed status and make sure there was no error elsewhere
if 'Finished toil run successfully.' in line:
completed = True
if completed:
logging.info('Workflow ' + self.run_id + ': COMPLETE')
open(self.statcompletefile, 'a').close()
return "COMPLETE", 0
logging.info('Workflow ' + self.run_id + ': RUNNING')
return "RUNNING", -1
def getstatus(self):
state, exit_code = self.getstate()
return {
"run_id": self.run_id,
"state": state
}
|
common-workflow-language/workflow-service
|
wes_service/toil_wes.py
|
ToilWorkflow.getstate
|
python
|
def getstate(self):
# the jobstore never existed
if not os.path.exists(self.jobstorefile):
logging.info('Workflow ' + self.run_id + ': QUEUED')
return "QUEUED", -1
# completed earlier
if os.path.exists(self.statcompletefile):
logging.info('Workflow ' + self.run_id + ': COMPLETE')
return "COMPLETE", 0
# errored earlier
if os.path.exists(self.staterrorfile):
logging.info('Workflow ' + self.run_id + ': EXECUTOR_ERROR')
return "EXECUTOR_ERROR", 255
# the workflow is staged but has not run yet
if not os.path.exists(self.errfile):
logging.info('Workflow ' + self.run_id + ': INITIALIZING')
return "INITIALIZING", -1
# TODO: Query with "toil status"
completed = False
with open(self.errfile, 'r') as f:
for line in f:
if 'Traceback (most recent call last)' in line:
logging.info('Workflow ' + self.run_id + ': EXECUTOR_ERROR')
open(self.staterrorfile, 'a').close()
return "EXECUTOR_ERROR", 255
# run can complete successfully but fail to upload outputs to cloud buckets
# so save the completed status and make sure there was no error elsewhere
if 'Finished toil run successfully.' in line:
completed = True
if completed:
logging.info('Workflow ' + self.run_id + ': COMPLETE')
open(self.statcompletefile, 'a').close()
return "COMPLETE", 0
logging.info('Workflow ' + self.run_id + ': RUNNING')
return "RUNNING", -1
|
Returns QUEUED, -1
INITIALIZING, -1
RUNNING, -1
COMPLETE, 0
or
EXECUTOR_ERROR, 255
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_service/toil_wes.py#L224-L271
| null |
class ToilWorkflow(object):
def __init__(self, run_id):
"""
Represents a toil workflow.
:param str run_id: A uuid string. Used to name the folder that contains
all of the files containing this particular workflow instance's information.
"""
super(ToilWorkflow, self).__init__()
self.run_id = run_id
self.workdir = os.path.join(os.getcwd(), 'workflows', self.run_id)
self.outdir = os.path.join(self.workdir, 'outdir')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.outfile = os.path.join(self.workdir, 'stdout')
self.errfile = os.path.join(self.workdir, 'stderr')
self.starttime = os.path.join(self.workdir, 'starttime')
self.endtime = os.path.join(self.workdir, 'endtime')
self.pidfile = os.path.join(self.workdir, 'pid')
self.statcompletefile = os.path.join(self.workdir, 'status_completed')
self.staterrorfile = os.path.join(self.workdir, 'status_error')
self.cmdfile = os.path.join(self.workdir, 'cmd')
self.jobstorefile = os.path.join(self.workdir, 'jobstore')
self.request_json = os.path.join(self.workdir, 'request.json')
self.input_json = os.path.join(self.workdir, "wes_input.json")
self.jobstore_default = 'file:' + os.path.join(self.workdir, 'toiljobstore')
self.jobstore = None
def sort_toil_options(self, extra):
# determine jobstore and set a new default if the user did not set one
cloud = False
for e in extra:
if e.startswith('--jobStore='):
self.jobstore = e[11:]
if self.jobstore.startswith(('aws', 'google', 'azure')):
cloud = True
if e.startswith(('--outdir=', '-o=')):
extra.remove(e)
if not cloud:
extra.append('--outdir=' + self.outdir)
if not self.jobstore:
extra.append('--jobStore=' + self.jobstore_default)
self.jobstore = self.jobstore_default
# store the jobstore location
with open(self.jobstorefile, 'w') as f:
f.write(self.jobstore)
return extra
def write_workflow(self, request, opts, cwd, wftype='cwl'):
"""Writes a cwl, wdl, or python file as appropriate from the request dictionary."""
workflow_url = request.get("workflow_url")
# link the cwl and json into the cwd
if workflow_url.startswith('file://'):
os.link(workflow_url[7:], os.path.join(cwd, "wes_workflow." + wftype))
workflow_url = os.path.join(cwd, "wes_workflow." + wftype)
os.link(self.input_json, os.path.join(cwd, "wes_input.json"))
self.input_json = os.path.join(cwd, "wes_input.json")
extra_options = self.sort_toil_options(opts.getoptlist("extra"))
if wftype == 'cwl':
command_args = ['toil-cwl-runner'] + extra_options + [workflow_url, self.input_json]
elif wftype == 'wdl':
command_args = ['toil-wdl-runner'] + extra_options + [workflow_url, self.input_json]
elif wftype == 'py':
command_args = ['python'] + extra_options + [workflow_url]
else:
raise RuntimeError('workflow_type is not "cwl", "wdl", or "py": ' + str(wftype))
return command_args
def write_json(self, request_dict):
input_json = os.path.join(self.workdir, 'input.json')
with open(input_json, 'w') as f:
json.dump(request_dict['workflow_params'], f)
return input_json
def call_cmd(self, cmd, cwd):
"""
Calls a command with Popen.
Writes stdout, stderr, and the command to separate files.
:param cmd: A string or array of strings.
:param tempdir:
:return: The pid of the command.
"""
with open(self.cmdfile, 'w') as f:
f.write(str(cmd))
stdout = open(self.outfile, 'w')
stderr = open(self.errfile, 'w')
logging.info('Calling: ' + ' '.join(cmd))
process = subprocess.Popen(cmd,
stdout=stdout,
stderr=stderr,
close_fds=True,
cwd=cwd)
stdout.close()
stderr.close()
return process.pid
def cancel(self):
pass
def fetch(self, filename):
if os.path.exists(filename):
with open(filename, 'r') as f:
return f.read()
return ''
def getlog(self):
state, exit_code = self.getstate()
with open(self.request_json, 'r') as f:
request = json.load(f)
with open(self.jobstorefile, 'r') as f:
self.jobstore = f.read()
stderr = self.fetch(self.errfile)
starttime = self.fetch(self.starttime)
endtime = self.fetch(self.endtime)
cmd = [self.fetch(self.cmdfile)]
outputobj = {}
if state == "COMPLETE":
# only tested locally
if self.jobstore.startswith('file:'):
for f in os.listdir(self.outdir):
if f.startswith('out_tmpdir'):
shutil.rmtree(os.path.join(self.outdir, f))
for f in os.listdir(self.outdir):
outputobj[f] = {'location': os.path.join(self.outdir, f),
'size': os.stat(os.path.join(self.outdir, f)).st_size,
'class': 'File'}
return {
"run_id": self.run_id,
"request": request,
"state": state,
"run_log": {
"cmd": cmd,
"start_time": starttime,
"end_time": endtime,
"stdout": "",
"stderr": stderr,
"exit_code": exit_code
},
"task_logs": [],
"outputs": outputobj
}
def run(self, request, tempdir, opts):
"""
Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param str tempdir: Folder where input files have been staged and the cwd to run at.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state}
"""
wftype = request['workflow_type'].lower().strip()
version = request['workflow_type_version']
if version != 'v1.0' and wftype == 'cwl':
raise RuntimeError('workflow_type "cwl" requires '
'"workflow_type_version" to be "v1.0": ' + str(version))
if version != '2.7' and wftype == 'py':
raise RuntimeError('workflow_type "py" requires '
'"workflow_type_version" to be "2.7": ' + str(version))
logging.info('Beginning Toil Workflow ID: ' + str(self.run_id))
with open(self.starttime, 'w') as f:
f.write(str(time.time()))
with open(self.request_json, 'w') as f:
json.dump(request, f)
with open(self.input_json, "w") as inputtemp:
json.dump(request["workflow_params"], inputtemp)
command_args = self.write_workflow(request, opts, tempdir, wftype=wftype)
pid = self.call_cmd(command_args, tempdir)
with open(self.endtime, 'w') as f:
f.write(str(time.time()))
with open(self.pidfile, 'w') as f:
f.write(str(pid))
return self.getstatus()
def getstatus(self):
state, exit_code = self.getstate()
return {
"run_id": self.run_id,
"state": state
}
|
common-workflow-language/workflow-service
|
wes_service/util.py
|
visit
|
python
|
def visit(d, op):
op(d)
if isinstance(d, list):
for i in d:
visit(i, op)
elif isinstance(d, dict):
for i in itervalues(d):
visit(i, op)
|
Recursively call op(d) for all list subelements and dictionary 'values' that d may have.
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_service/util.py#L11-L19
|
[
"def visit(d, op):\n \"\"\"Recursively call op(d) for all list subelements and dictionary 'values' that d may have.\"\"\"\n op(d)\n if isinstance(d, list):\n for i in d:\n visit(i, op)\n elif isinstance(d, dict):\n for i in itervalues(d):\n visit(i, op)\n",
"def fixpaths(d):\n \"\"\"Make sure all paths have a URI scheme.\"\"\"\n if isinstance(d, dict):\n if \"path\" in d:\n if \":\" not in d[\"path\"]:\n local_path = os.path.normpath(os.path.join(os.getcwd(), basedir, d[\"path\"]))\n d[\"location\"] = pathname2url(local_path)\n else:\n d[\"location\"] = d[\"path\"]\n del d[\"path\"]\n"
] |
import tempfile
import json
import os
import logging
from six import itervalues, iterlists
import connexion
from werkzeug.utils import secure_filename
class WESBackend(object):
"""Stores and retrieves options. Intended to be inherited."""
def __init__(self, opts):
"""Parse and store options as a list of tuples."""
self.pairs = []
for o in opts if opts else []:
k, v = o.split("=", 1)
self.pairs.append((k, v))
def getopt(self, p, default=None):
"""Returns the first option value stored that matches p or default."""
for k, v in self.pairs:
if k == p:
return v
return default
def getoptlist(self, p):
"""Returns all option values stored that match p as a list."""
optlist = []
for k, v in self.pairs:
if k == p:
optlist.append(v)
return optlist
def log_for_run(self, run_id, message):
logging.info("Workflow %s: %s", run_id, message)
def collect_attachments(self, run_id=None):
tempdir = tempfile.mkdtemp()
body = {}
has_attachments = False
for k, ls in iterlists(connexion.request.files):
try:
for v in ls:
if k == "workflow_attachment":
sp = v.filename.split("/")
fn = []
for p in sp:
if p not in ("", ".", ".."):
fn.append(secure_filename(p))
dest = os.path.join(tempdir, *fn)
if not os.path.isdir(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
self.log_for_run(run_id, "Staging attachment '%s' to '%s'" % (v.filename, dest))
v.save(dest)
has_attachments = True
body[k] = "file://%s" % tempdir # Reference to temp working dir.
elif k in ("workflow_params", "tags", "workflow_engine_parameters"):
content = v.read()
body[k] = json.loads(content.decode("utf-8"))
else:
body[k] = v.read().decode()
except Exception as e:
raise ValueError("Error reading parameter '%s': %s" % (k, e))
for k, ls in iterlists(connexion.request.form):
try:
for v in ls:
if not v:
continue
if k in ("workflow_params", "tags", "workflow_engine_parameters"):
body[k] = json.loads(v)
else:
body[k] = v
except Exception as e:
raise ValueError("Error reading parameter '%s': %s" % (k, e))
if "workflow_url" in body:
if ":" not in body["workflow_url"]:
if not has_attachments:
raise ValueError("Relative 'workflow_url' but missing 'workflow_attachment'")
body["workflow_url"] = "file://%s" % os.path.join(tempdir, secure_filename(body["workflow_url"]))
self.log_for_run(run_id, "Using workflow_url '%s'" % body.get("workflow_url"))
else:
raise ValueError("Missing 'workflow_url' in submission")
if "workflow_params" not in body:
raise ValueError("Missing 'workflow_params' in submission")
return tempdir, body
|
common-workflow-language/workflow-service
|
wes_service/util.py
|
WESBackend.getopt
|
python
|
def getopt(self, p, default=None):
for k, v in self.pairs:
if k == p:
return v
return default
|
Returns the first option value stored that matches p or default.
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_service/util.py#L31-L36
| null |
class WESBackend(object):
"""Stores and retrieves options. Intended to be inherited."""
def __init__(self, opts):
"""Parse and store options as a list of tuples."""
self.pairs = []
for o in opts if opts else []:
k, v = o.split("=", 1)
self.pairs.append((k, v))
def getoptlist(self, p):
"""Returns all option values stored that match p as a list."""
optlist = []
for k, v in self.pairs:
if k == p:
optlist.append(v)
return optlist
def log_for_run(self, run_id, message):
logging.info("Workflow %s: %s", run_id, message)
def collect_attachments(self, run_id=None):
tempdir = tempfile.mkdtemp()
body = {}
has_attachments = False
for k, ls in iterlists(connexion.request.files):
try:
for v in ls:
if k == "workflow_attachment":
sp = v.filename.split("/")
fn = []
for p in sp:
if p not in ("", ".", ".."):
fn.append(secure_filename(p))
dest = os.path.join(tempdir, *fn)
if not os.path.isdir(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
self.log_for_run(run_id, "Staging attachment '%s' to '%s'" % (v.filename, dest))
v.save(dest)
has_attachments = True
body[k] = "file://%s" % tempdir # Reference to temp working dir.
elif k in ("workflow_params", "tags", "workflow_engine_parameters"):
content = v.read()
body[k] = json.loads(content.decode("utf-8"))
else:
body[k] = v.read().decode()
except Exception as e:
raise ValueError("Error reading parameter '%s': %s" % (k, e))
for k, ls in iterlists(connexion.request.form):
try:
for v in ls:
if not v:
continue
if k in ("workflow_params", "tags", "workflow_engine_parameters"):
body[k] = json.loads(v)
else:
body[k] = v
except Exception as e:
raise ValueError("Error reading parameter '%s': %s" % (k, e))
if "workflow_url" in body:
if ":" not in body["workflow_url"]:
if not has_attachments:
raise ValueError("Relative 'workflow_url' but missing 'workflow_attachment'")
body["workflow_url"] = "file://%s" % os.path.join(tempdir, secure_filename(body["workflow_url"]))
self.log_for_run(run_id, "Using workflow_url '%s'" % body.get("workflow_url"))
else:
raise ValueError("Missing 'workflow_url' in submission")
if "workflow_params" not in body:
raise ValueError("Missing 'workflow_params' in submission")
return tempdir, body
|
common-workflow-language/workflow-service
|
wes_service/util.py
|
WESBackend.getoptlist
|
python
|
def getoptlist(self, p):
optlist = []
for k, v in self.pairs:
if k == p:
optlist.append(v)
return optlist
|
Returns all option values stored that match p as a list.
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_service/util.py#L38-L44
| null |
class WESBackend(object):
"""Stores and retrieves options. Intended to be inherited."""
def __init__(self, opts):
"""Parse and store options as a list of tuples."""
self.pairs = []
for o in opts if opts else []:
k, v = o.split("=", 1)
self.pairs.append((k, v))
def getopt(self, p, default=None):
"""Returns the first option value stored that matches p or default."""
for k, v in self.pairs:
if k == p:
return v
return default
def log_for_run(self, run_id, message):
logging.info("Workflow %s: %s", run_id, message)
def collect_attachments(self, run_id=None):
tempdir = tempfile.mkdtemp()
body = {}
has_attachments = False
for k, ls in iterlists(connexion.request.files):
try:
for v in ls:
if k == "workflow_attachment":
sp = v.filename.split("/")
fn = []
for p in sp:
if p not in ("", ".", ".."):
fn.append(secure_filename(p))
dest = os.path.join(tempdir, *fn)
if not os.path.isdir(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
self.log_for_run(run_id, "Staging attachment '%s' to '%s'" % (v.filename, dest))
v.save(dest)
has_attachments = True
body[k] = "file://%s" % tempdir # Reference to temp working dir.
elif k in ("workflow_params", "tags", "workflow_engine_parameters"):
content = v.read()
body[k] = json.loads(content.decode("utf-8"))
else:
body[k] = v.read().decode()
except Exception as e:
raise ValueError("Error reading parameter '%s': %s" % (k, e))
for k, ls in iterlists(connexion.request.form):
try:
for v in ls:
if not v:
continue
if k in ("workflow_params", "tags", "workflow_engine_parameters"):
body[k] = json.loads(v)
else:
body[k] = v
except Exception as e:
raise ValueError("Error reading parameter '%s': %s" % (k, e))
if "workflow_url" in body:
if ":" not in body["workflow_url"]:
if not has_attachments:
raise ValueError("Relative 'workflow_url' but missing 'workflow_attachment'")
body["workflow_url"] = "file://%s" % os.path.join(tempdir, secure_filename(body["workflow_url"]))
self.log_for_run(run_id, "Using workflow_url '%s'" % body.get("workflow_url"))
else:
raise ValueError("Missing 'workflow_url' in submission")
if "workflow_params" not in body:
raise ValueError("Missing 'workflow_params' in submission")
return tempdir, body
|
common-workflow-language/workflow-service
|
wes_service/arvados_wes.py
|
catch_exceptions
|
python
|
def catch_exceptions(orig_func):
@functools.wraps(orig_func)
def catch_exceptions_wrapper(self, *args, **kwargs):
try:
return orig_func(self, *args, **kwargs)
except arvados.errors.ApiError as e:
logging.exception("Failure")
return {"msg": e._get_reason(), "status_code": e.resp.status}, int(e.resp.status)
except subprocess.CalledProcessError as e:
return {"msg": str(e), "status_code": 500}, 500
except MissingAuthorization:
return {"msg": "'Authorization' header is missing or empty, expecting Arvados API token", "status_code": 401}, 401
except ValueError as e:
return {"msg": str(e), "status_code": 400}, 400
except Exception as e:
return {"msg": str(e), "status_code": 500}, 500
return catch_exceptions_wrapper
|
Catch uncaught exceptions and turn them into http errors
|
train
|
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_service/arvados_wes.py#L46-L65
| null |
import arvados
import arvados.util
import arvados.collection
import arvados.errors
import os
import connexion
import json
import subprocess
import tempfile
import functools
import threading
import logging
import shutil
from wes_service.util import visit, WESBackend
class MissingAuthorization(Exception):
pass
def get_api(authtoken=None):
if authtoken is None:
if not connexion.request.headers.get('Authorization'):
raise MissingAuthorization()
authtoken = connexion.request.headers['Authorization']
if not authtoken.startswith("Bearer ") or authtoken.startswith("OAuth2 "):
raise ValueError("Authorization token must start with 'Bearer '")
authtoken = authtoken[7:]
return arvados.api_from_config(version="v1", apiconfig={
"ARVADOS_API_HOST": os.environ["ARVADOS_API_HOST"],
"ARVADOS_API_TOKEN": authtoken,
"ARVADOS_API_HOST_INSECURE": os.environ.get("ARVADOS_API_HOST_INSECURE", "false"), # NOQA
})
statemap = {
"Queued": "QUEUED",
"Locked": "INITIALIZING",
"Running": "RUNNING",
"Complete": "COMPLETE",
"Cancelled": "CANCELED"
}
class ArvadosBackend(WESBackend):
def GetServiceInfo(self):
stdout, stderr = subprocess.Popen(["arvados-cwl-runner", "--version"], stderr=subprocess.PIPE).communicate()
return {
"workflow_type_versions": {
"CWL": {"workflow_type_version": ["v1.0"]}
},
"supported_wes_versions": ["0.3.0", "1.0.0"],
"supported_filesystem_protocols": ["http", "https", "keep"],
"workflow_engine_versions": {
"arvados-cwl-runner": str(stderr)
},
"default_workflow_engine_parameters": [],
"system_state_counts": {},
"auth_instructions_url": "http://doc.arvados.org/user/reference/api-tokens.html",
"tags": {
"ARVADOS_API_HOST": os.environ["ARVADOS_API_HOST"]
}
}
@catch_exceptions
def ListRuns(self, page_size=None, page_token=None, state_search=None):
api = get_api()
paging = []
if page_token:
paging = [["uuid", ">", page_token]]
requests = api.container_requests().list(
filters=[["requesting_container_uuid", "=", None],
["container_uuid", "!=", None]] + paging,
select=["uuid", "command", "container_uuid"],
order=["uuid"],
limit=page_size).execute()["items"]
containers = api.containers().list(
filters=[["uuid", "in", [w["container_uuid"] for w in requests]]],
select=["uuid", "state"]).execute()["items"]
uuidmap = {c["uuid"]: statemap[c["state"]] for c in containers}
workflow_list = [{"run_id": cr["uuid"],
"state": uuidmap.get(cr["container_uuid"])}
for cr in requests
if cr["command"] and cr["command"][0] == "arvados-cwl-runner"]
return {
"workflows": workflow_list,
"next_page_token": workflow_list[-1]["run_id"] if workflow_list else ""
}
def log_for_run(self, run_id, message, authtoken=None):
get_api(authtoken).logs().create(body={"log": {"object_uuid": run_id,
"event_type": "stderr",
"properties": {"text": message+"\n"}}}).execute()
def invoke_cwl_runner(self, cr_uuid, workflow_url, workflow_params,
env, project_uuid,
tempdir):
api = arvados.api_from_config(version="v1", apiconfig={
"ARVADOS_API_HOST": env["ARVADOS_API_HOST"],
"ARVADOS_API_TOKEN": env['ARVADOS_API_TOKEN'],
"ARVADOS_API_HOST_INSECURE": env["ARVADOS_API_HOST_INSECURE"] # NOQA
})
try:
with tempfile.NamedTemporaryFile("wt", dir=tempdir, suffix=".json") as inputtemp:
json.dump(workflow_params, inputtemp)
inputtemp.flush()
msg = ""
for dirpath, dirs, files in os.walk(tempdir):
for f in files:
msg += " " + dirpath + "/" + f + "\n"
self.log_for_run(cr_uuid, "Contents of %s:\n%s" % (tempdir, msg),
env['ARVADOS_API_TOKEN'])
# TODO: run submission process in a container to prevent
# a-c-r submission processes from seeing each other.
cmd = ["arvados-cwl-runner", "--submit-request-uuid="+cr_uuid,
"--submit", "--no-wait", "--api=containers", "--debug"]
if project_uuid:
cmd.append("--project-uuid="+project_uuid)
cmd.append(workflow_url)
cmd.append(inputtemp.name)
self.log_for_run(cr_uuid, "Executing %s" % cmd, env['ARVADOS_API_TOKEN'])
proc = subprocess.Popen(cmd, env=env,
cwd=tempdir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = proc.communicate()
if proc.returncode != 0:
api.container_requests().update(uuid=cr_uuid, body={"priority": 0}).execute()
self.log_for_run(cr_uuid, stderrdata.decode("utf-8"), env['ARVADOS_API_TOKEN'])
if tempdir:
shutil.rmtree(tempdir)
except subprocess.CalledProcessError as e:
api.container_requests().update(uuid=cr_uuid, body={"priority": 0,
"name": "Cancelled container request",
"properties": {"arvados-cwl-runner-log": str(e)}}).execute()
@catch_exceptions
def RunWorkflow(self, **args):
if not connexion.request.headers.get('Authorization'):
raise MissingAuthorization()
authtoken = connexion.request.headers['Authorization']
if authtoken.startswith("Bearer ") or authtoken.startswith("OAuth2 "):
authtoken = authtoken[7:]
env = {
"PATH": os.environ["PATH"],
"ARVADOS_API_HOST": os.environ["ARVADOS_API_HOST"],
"ARVADOS_API_TOKEN": authtoken,
"ARVADOS_API_HOST_INSECURE": os.environ.get("ARVADOS_API_HOST_INSECURE", "false") # NOQA
}
api = get_api()
cr = api.container_requests().create(body={"container_request":
{"command": [""],
"container_image": "n/a",
"state": "Uncommitted",
"output_path": "n/a",
"priority": 500}}).execute()
success = False
try:
tempdir, body = self.collect_attachments(cr["uuid"])
workflow_engine_parameters = body.get("workflow_engine_parameters", {})
project_uuid = None
if workflow_engine_parameters:
project_uuid = workflow_engine_parameters.get("project_uuid")
threading.Thread(target=self.invoke_cwl_runner, args=(cr["uuid"],
body["workflow_url"],
body["workflow_params"],
env,
project_uuid,
tempdir)).start()
success = True
except ValueError as e:
self.log_for_run(cr["uuid"], "Bad request: " + str(e))
cr = api.container_requests().update(uuid=cr["uuid"],
body={"container_request": {
"name": "Cancelled container request",
"priority": 0}}).execute()
return {"msg": str(e), "status_code": 400}, 400
except Exception as e:
logging.exception("Error")
self.log_for_run(cr["uuid"], "An exception ocurred while handling your request: " + str(e))
cr = api.container_requests().update(uuid=cr["uuid"],
body={"container_request": {
"name": "Cancelled container request",
"priority": 0}}).execute()
return {"msg": str(e), "status_code": 500}, 500
else:
return {"run_id": cr["uuid"]}
@catch_exceptions
def GetRunLog(self, run_id):
api = get_api()
request = api.container_requests().get(uuid=run_id).execute()
if request["container_uuid"]:
container = api.containers().get(uuid=request["container_uuid"]).execute() # NOQA
task_reqs = arvados.util.list_all(api.container_requests().list, filters=[["requesting_container_uuid", "=", container["uuid"]]])
tasks = arvados.util.list_all(api.containers().list, filters=[["uuid", "in", [tr["container_uuid"] for tr in task_reqs]]])
containers_map = {c["uuid"]: c for c in tasks}
containers_map[container["uuid"]] = container
else:
container = {
"state": "Queued" if request["priority"] > 0 else "Cancelled",
"exit_code": None,
"log": None
}
tasks = []
containers_map = {}
task_reqs = []
outputobj = {}
if request["output_uuid"]:
c = arvados.collection.CollectionReader(request["output_uuid"], api_client=api)
with c.open("cwl.output.json") as f:
try:
outputobj = json.load(f)
except ValueError:
pass
def keepref(d):
if isinstance(d, dict) and "location" in d:
d["location"] = "%sc=%s/_/%s" % (api._resourceDesc["keepWebServiceUrl"], c.portable_data_hash(), d["location"]) # NOQA
visit(outputobj, keepref)
def log_object(cr):
if cr["container_uuid"]:
containerlog = containers_map[cr["container_uuid"]]
else:
containerlog = {"started_at": "",
"finished_at": "",
"exit_code": None,
"log": ""}
r = {
"name": cr["name"] or "",
"cmd": cr["command"],
"start_time": containerlog["started_at"] or "",
"end_time": containerlog["finished_at"] or "",
"stdout": "",
"stderr": "",
"exit_code": containerlog["exit_code"] or 0
}
if containerlog["log"]:
r["stdout_keep"] = "%sc=%s/_/%s" % (api._resourceDesc["keepWebServiceUrl"], containerlog["log"], "stdout.txt") # NOQA
r["stderr_keep"] = "%sc=%s/_/%s" % (api._resourceDesc["keepWebServiceUrl"], containerlog["log"], "stderr.txt") # NOQA
r["stdout"] = "%s/x-dynamic-logs/stdout" % (connexion.request.url)
r["stderr"] = "%s/x-dynamic-logs/stderr" % (connexion.request.url)
return r
r = {
"run_id": request["uuid"],
"request": {
"workflow_url": "",
"workflow_params": request["mounts"].get("/var/lib/cwl/cwl.input.json", {}).get("content", {})
},
"state": statemap[container["state"]],
"run_log": log_object(request),
"task_logs": [log_object(t) for t in task_reqs],
"outputs": outputobj
}
return r
@catch_exceptions
def CancelRun(self, run_id): # NOQA
api = get_api()
request = api.container_requests().update(uuid=run_id, body={"priority": 0}).execute() # NOQA
return {"run_id": request["uuid"]}
@catch_exceptions
def GetRunStatus(self, run_id):
api = get_api()
request = api.container_requests().get(uuid=run_id).execute()
if request["container_uuid"]:
container = api.containers().get(uuid=request["container_uuid"]).execute() # NOQA
elif request["priority"] == 0:
container = {"state": "Cancelled"}
else:
container = {"state": "Queued"}
return {"run_id": request["uuid"],
"state": statemap[container["state"]]}
def dynamic_logs(run_id, logstream):
api = get_api()
cr = api.container_requests().get(uuid=run_id).execute()
l1 = [t["properties"]["text"]
for t in api.logs().list(filters=[["object_uuid", "=", run_id],
["event_type", "=", logstream]],
order="created_at desc",
limit=100).execute()["items"]]
if cr["container_uuid"]:
l2 = [t["properties"]["text"]
for t in api.logs().list(filters=[["object_uuid", "=", cr["container_uuid"]],
["event_type", "=", logstream]],
order="created_at desc",
limit=100).execute()["items"]]
else:
l2 = []
return "".join(reversed(l1)) + "".join(reversed(l2))
def create_backend(app, opts):
ab = ArvadosBackend(opts)
app.app.route('/ga4gh/wes/v1/runs/<run_id>/x-dynamic-logs/<logstream>')(dynamic_logs)
return ab
|
box/genty
|
genty/genty_repeat.py
|
genty_repeat
|
python
|
def genty_repeat(count):
if count < 0:
raise ValueError(
"Really? Can't have {0} iterations. Please pick a value >= 0."
.format(count)
)
def wrap(test_method):
test_method.genty_repeat_count = count
return test_method
return wrap
|
To use in conjunction with a TestClass wrapped with @genty.
Runs the wrapped test 'count' times:
@genty_repeat(count)
def test_some_function(self)
...
Can also wrap a test already decorated with @genty_dataset
@genty_repeat(3)
@genty_dataset(True, False)
def test_some__other_function(self, bool_value):
...
This will run 6 tests in total, 3 each of the True and False cases.
:param count:
The number of times to run the test.
:type count:
`int`
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty_repeat.py#L6-L36
| null |
# coding: utf-8
from __future__ import unicode_literals
|
box/genty
|
genty/genty.py
|
genty
|
python
|
def genty(target_cls):
tests = _expand_tests(target_cls)
tests_with_datasets = _expand_datasets(tests)
tests_with_datasets_and_repeats = _expand_repeats(tests_with_datasets)
_add_new_test_methods(target_cls, tests_with_datasets_and_repeats)
return target_cls
|
This decorator takes the information provided by @genty_dataset,
@genty_dataprovider, and @genty_repeat and generates the corresponding
test methods.
:param target_cls:
Test class whose test methods have been decorated.
:type target_cls:
`class`
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty.py#L21-L38
|
[
"def _expand_tests(target_cls):\n \"\"\"\n Generator of all the test unbound functions in the given class.\n\n :param target_cls:\n Target test class.\n :type target_cls:\n `class`\n :return:\n Generator of all the test_methods in the given class yielding\n tuples of method name and unbound function.\n :rtype:\n `generator` of `tuple` of (`unicode`, `function`)\n \"\"\"\n entries = dict(six.iteritems(target_cls.__dict__))\n for key, value in six.iteritems(entries):\n if key.startswith('test') and isinstance(value, types.FunctionType):\n if not hasattr(value, 'genty_generated_test'):\n yield key, value\n",
"def _expand_datasets(test_functions):\n \"\"\"\n Generator producing test_methods, with an optional dataset.\n\n :param test_functions:\n Iterator over tuples of test name and test unbound function.\n :type test_functions:\n `iterator` of `tuple` of (`unicode`, `function`)\n :return:\n Generator yielding a tuple of\n - method_name : Name of the test method\n - unbound function : Unbound function that will be the test method.\n - dataset name : String representation of the given dataset\n - dataset : Tuple representing the args for a test\n - param factory : Function that returns params for the test method\n :rtype:\n `generator` of `tuple` of (\n `unicode`,\n `function`,\n `unicode` or None,\n `tuple` or None,\n `function` or None,\n )\n \"\"\"\n for name, func in test_functions:\n\n dataset_tuples = chain(\n [(None, getattr(func, 'genty_datasets', {}))],\n getattr(func, 'genty_dataproviders', []),\n )\n\n no_datasets = True\n for dataprovider, datasets in dataset_tuples:\n for dataset_name, dataset in six.iteritems(datasets):\n no_datasets = False\n yield name, func, dataset_name, dataset, dataprovider\n\n if no_datasets:\n # yield the original test method, unaltered\n yield name, func, None, None, None\n",
"def _expand_repeats(test_functions):\n \"\"\"\n Generator producing test_methods, with any repeat count unrolled.\n\n :param test_functions:\n Sequence of tuples of\n - method_name : Name of the test method\n - unbound function : Unbound function that will be the test method.\n - dataset name : String representation of the given dataset\n - dataset : Tuple representing the args for a test\n - param factory : Function that returns params for the test method\n :type test_functions:\n `iterator` of `tuple` of\n (`unicode`, `function`, `unicode` or None, `tuple` or None, `function`)\n :return:\n Generator yielding a tuple of\n (method_name, unbound function, dataset, name dataset, repeat_suffix)\n :rtype:\n `generator` of `tuple` of (`unicode`, `function`,\n `unicode` or None, `tuple` or None, `function`, `unicode`)\n \"\"\"\n for name, func, dataset_name, dataset, dataprovider in test_functions:\n repeat_count = getattr(func, 'genty_repeat_count', 0)\n if repeat_count:\n for i in range(1, repeat_count + 1):\n repeat_suffix = _build_repeat_suffix(i, repeat_count)\n yield (\n name,\n func,\n dataset_name,\n dataset,\n dataprovider,\n repeat_suffix,\n )\n else:\n yield name, func, dataset_name, dataset, dataprovider, None\n",
"def _add_new_test_methods(target_cls, tests_with_datasets_and_repeats):\n \"\"\"Define the given tests in the given class.\n\n :param target_cls:\n Test class where to define the given test methods.\n :type target_cls:\n `class`\n :param tests_with_datasets_and_repeats:\n Sequence of tuples describing the new test to add to the class.\n (method_name, unbound function, dataset name, dataset,\n dataprovider, repeat_suffix)\n :type tests_with_datasets_and_repeats:\n Sequence of `tuple` of (`unicode`, `function`,\n `unicode` or None, `tuple` or None, `function`, `unicode`)\n \"\"\"\n for test_info in tests_with_datasets_and_repeats:\n (\n method_name,\n func,\n dataset_name,\n dataset,\n dataprovider,\n repeat_suffix,\n ) = test_info\n\n # Remove the original test_method as it's superseded by this\n # generated method.\n is_first_reference = _delete_original_test_method(\n target_cls,\n method_name,\n )\n\n # However, if that test_method is referenced by name in sys.argv\n # Then take 1 of the generated methods (we take the first) and\n # give that generated method the original name... so that the reference\n # can find an actual test method.\n if is_first_reference and _is_referenced_in_argv(method_name):\n dataset_name = None\n repeat_suffix = None\n\n _add_method_to_class(\n target_cls,\n method_name,\n func,\n dataset_name,\n dataset,\n dataprovider,\n repeat_suffix,\n )\n"
] |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import functools
from itertools import chain
import math
import re
import sys
import types
import six
from .genty_args import GentyArgs
from .private import encode_non_ascii_string
REPLACE_FOR_PERIOD_CHAR = '\xb7'
def _expand_tests(target_cls):
"""
Generator of all the test unbound functions in the given class.
:param target_cls:
Target test class.
:type target_cls:
`class`
:return:
Generator of all the test_methods in the given class yielding
tuples of method name and unbound function.
:rtype:
`generator` of `tuple` of (`unicode`, `function`)
"""
entries = dict(six.iteritems(target_cls.__dict__))
for key, value in six.iteritems(entries):
if key.startswith('test') and isinstance(value, types.FunctionType):
if not hasattr(value, 'genty_generated_test'):
yield key, value
def _expand_datasets(test_functions):
"""
Generator producing test_methods, with an optional dataset.
:param test_functions:
Iterator over tuples of test name and test unbound function.
:type test_functions:
`iterator` of `tuple` of (`unicode`, `function`)
:return:
Generator yielding a tuple of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:rtype:
`generator` of `tuple` of (
`unicode`,
`function`,
`unicode` or None,
`tuple` or None,
`function` or None,
)
"""
for name, func in test_functions:
dataset_tuples = chain(
[(None, getattr(func, 'genty_datasets', {}))],
getattr(func, 'genty_dataproviders', []),
)
no_datasets = True
for dataprovider, datasets in dataset_tuples:
for dataset_name, dataset in six.iteritems(datasets):
no_datasets = False
yield name, func, dataset_name, dataset, dataprovider
if no_datasets:
# yield the original test method, unaltered
yield name, func, None, None, None
def _expand_repeats(test_functions):
"""
Generator producing test_methods, with any repeat count unrolled.
:param test_functions:
Sequence of tuples of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:type test_functions:
`iterator` of `tuple` of
(`unicode`, `function`, `unicode` or None, `tuple` or None, `function`)
:return:
Generator yielding a tuple of
(method_name, unbound function, dataset, name dataset, repeat_suffix)
:rtype:
`generator` of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for name, func, dataset_name, dataset, dataprovider in test_functions:
repeat_count = getattr(func, 'genty_repeat_count', 0)
if repeat_count:
for i in range(1, repeat_count + 1):
repeat_suffix = _build_repeat_suffix(i, repeat_count)
yield (
name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
else:
yield name, func, dataset_name, dataset, dataprovider, None
def _add_new_test_methods(target_cls, tests_with_datasets_and_repeats):
"""Define the given tests in the given class.
:param target_cls:
Test class where to define the given test methods.
:type target_cls:
`class`
:param tests_with_datasets_and_repeats:
Sequence of tuples describing the new test to add to the class.
(method_name, unbound function, dataset name, dataset,
dataprovider, repeat_suffix)
:type tests_with_datasets_and_repeats:
Sequence of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for test_info in tests_with_datasets_and_repeats:
(
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
) = test_info
# Remove the original test_method as it's superseded by this
# generated method.
is_first_reference = _delete_original_test_method(
target_cls,
method_name,
)
# However, if that test_method is referenced by name in sys.argv
# Then take 1 of the generated methods (we take the first) and
# give that generated method the original name... so that the reference
# can find an actual test method.
if is_first_reference and _is_referenced_in_argv(method_name):
dataset_name = None
repeat_suffix = None
_add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
def _is_referenced_in_argv(method_name):
"""
Various test runners allow one to run a specific test like so:
python -m unittest -v <test_module>.<test_name>
Return True is the given method name is so referenced.
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:return:
Is the given method referenced by the command line.
:rtype:
`bool`
"""
expr = '.*[:.]{0}$'.format(method_name)
regex = re.compile(expr)
return any(regex.match(arg) for arg in sys.argv)
def _build_repeat_suffix(iteration, count):
"""
Return the suffix string to identify iteration X out of Y.
For example, with a count of 100, this will build strings like
"iteration_053" or "iteration_008".
:param iteration:
Current iteration.
:type iteration:
`int`
:param count:
Total number of iterations.
:type count:
`int`
:return:
Repeat suffix.
:rtype:
`unicode`
"""
format_width = int(math.ceil(math.log(count + 1, 10)))
new_suffix = 'iteration_{0:0{width}d}'.format(
iteration,
width=format_width
)
return new_suffix
def _delete_original_test_method(target_cls, name):
"""
Delete an original test method with the given name.
:param target_cls:
Target class.
:type target_cls:
`class`
:param name:
Name of the method to remove.
:type name:
`unicode`
:return:
True if the original method existed
:rtype:
`bool`
"""
attribute = getattr(target_cls, name, None)
if attribute and not getattr(attribute, 'genty_generated_test', None):
try:
delattr(target_cls, name)
except AttributeError:
pass
return True
else:
return False
def _build_final_method_name(
method_name,
dataset_name,
dataprovider_name,
repeat_suffix,
):
"""
Return a nice human friendly name, that almost looks like code.
Example: a test called 'test_something' with a dataset of (5, 'hello')
Return: "test_something(5, 'hello')"
Example: a test called 'test_other_stuff' with dataset of (9) and repeats
Return: "test_other_stuff(9) iteration_<X>"
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataprovider_name:
If there's a dataprovider involved, then this is its name.
:type dataprovider_name:
`unicode` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:return:
The fully composed name of the generated test method.
:rtype:
`unicode`
"""
# For tests using a dataprovider, append "_<dataprovider_name>" to
# the test method name
suffix = ''
if dataprovider_name:
suffix = '_{0}'.format(dataprovider_name)
if not dataset_name and not repeat_suffix:
return '{0}{1}'.format(method_name, suffix)
if dataset_name:
# Nosetest multi-processing code parses the full test name
# to discern package/module names. Thus any periods in the test-name
# causes that code to fail. So replace any periods with the unicode
# middle-dot character. Yes, this change is applied independent
# of the test runner being used... and that's fine since there is
# no real contract as to how the fabricated tests are named.
dataset_name = dataset_name.replace('.', REPLACE_FOR_PERIOD_CHAR)
# Place data_set info inside parens, as if it were a function call
suffix = '{0}({1})'.format(suffix, dataset_name or "")
if repeat_suffix:
suffix = '{0} {1}'.format(suffix, repeat_suffix)
test_method_name_for_dataset = "{0}{1}".format(
method_name,
suffix,
)
return test_method_name_for_dataset
def _build_dataset_method(method, dataset):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
test_method = lambda my_self: method(
my_self,
*dataset.args,
**dataset.kwargs
)
else:
test_method = lambda my_self: method(
my_self,
*dataset
)
return test_method
def _build_dataprovider_method(method, dataset, dataprovider):
"""
Return a fabricated method that calls the dataprovider with the given
dataset, and marshals the return value from that into params to the
underlying test 'method'.
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function.
:type dataprovider:
`callable`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
final_args = dataset.args
final_kwargs = dataset.kwargs
else:
final_args = dataset
final_kwargs = {}
def test_method_wrapper(my_self):
args = dataprovider(
my_self,
*final_args,
**final_kwargs
)
kwargs = {}
if isinstance(args, GentyArgs):
kwargs = args.kwargs
args = args.args
elif not isinstance(args, (tuple, list)):
args = (args, )
return method(my_self, *args, **kwargs)
return test_method_wrapper
def _build_test_method(method, dataset, dataprovider=None):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None
:type dataprovider:
`callable` or None
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if dataprovider:
test_method = _build_dataprovider_method(method, dataset, dataprovider)
elif dataset:
test_method = _build_dataset_method(method, dataset)
else:
test_method = method
return test_method
def _add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
):
"""
Add the described method to the given class.
:param target_cls:
Test class to which to add a method.
:type target_cls:
`class`
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param func:
The underlying test function to call.
:type func:
`callable`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataset:
Tuple containing the args of the dataset.
:type dataset:
`tuple` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None.
:type dataprovider:
`callable`
"""
# pylint: disable=too-many-arguments
test_method_name_for_dataset = _build_final_method_name(
method_name,
dataset_name,
dataprovider.__name__ if dataprovider else None,
repeat_suffix,
)
test_method_for_dataset = _build_test_method(func, dataset, dataprovider)
test_method_for_dataset = functools.update_wrapper(
test_method_for_dataset,
func,
)
test_method_name_for_dataset = encode_non_ascii_string(
test_method_name_for_dataset,
)
test_method_for_dataset.__name__ = test_method_name_for_dataset
test_method_for_dataset.genty_generated_test = True
# Add the method to the class under the proper name
setattr(target_cls, test_method_name_for_dataset, test_method_for_dataset)
|
box/genty
|
genty/genty.py
|
_expand_datasets
|
python
|
def _expand_datasets(test_functions):
for name, func in test_functions:
dataset_tuples = chain(
[(None, getattr(func, 'genty_datasets', {}))],
getattr(func, 'genty_dataproviders', []),
)
no_datasets = True
for dataprovider, datasets in dataset_tuples:
for dataset_name, dataset in six.iteritems(datasets):
no_datasets = False
yield name, func, dataset_name, dataset, dataprovider
if no_datasets:
# yield the original test method, unaltered
yield name, func, None, None, None
|
Generator producing test_methods, with an optional dataset.
:param test_functions:
Iterator over tuples of test name and test unbound function.
:type test_functions:
`iterator` of `tuple` of (`unicode`, `function`)
:return:
Generator yielding a tuple of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:rtype:
`generator` of `tuple` of (
`unicode`,
`function`,
`unicode` or None,
`tuple` or None,
`function` or None,
)
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty.py#L62-L101
| null |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import functools
from itertools import chain
import math
import re
import sys
import types
import six
from .genty_args import GentyArgs
from .private import encode_non_ascii_string
REPLACE_FOR_PERIOD_CHAR = '\xb7'
def genty(target_cls):
"""
This decorator takes the information provided by @genty_dataset,
@genty_dataprovider, and @genty_repeat and generates the corresponding
test methods.
:param target_cls:
Test class whose test methods have been decorated.
:type target_cls:
`class`
"""
tests = _expand_tests(target_cls)
tests_with_datasets = _expand_datasets(tests)
tests_with_datasets_and_repeats = _expand_repeats(tests_with_datasets)
_add_new_test_methods(target_cls, tests_with_datasets_and_repeats)
return target_cls
def _expand_tests(target_cls):
"""
Generator of all the test unbound functions in the given class.
:param target_cls:
Target test class.
:type target_cls:
`class`
:return:
Generator of all the test_methods in the given class yielding
tuples of method name and unbound function.
:rtype:
`generator` of `tuple` of (`unicode`, `function`)
"""
entries = dict(six.iteritems(target_cls.__dict__))
for key, value in six.iteritems(entries):
if key.startswith('test') and isinstance(value, types.FunctionType):
if not hasattr(value, 'genty_generated_test'):
yield key, value
def _expand_repeats(test_functions):
"""
Generator producing test_methods, with any repeat count unrolled.
:param test_functions:
Sequence of tuples of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:type test_functions:
`iterator` of `tuple` of
(`unicode`, `function`, `unicode` or None, `tuple` or None, `function`)
:return:
Generator yielding a tuple of
(method_name, unbound function, dataset, name dataset, repeat_suffix)
:rtype:
`generator` of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for name, func, dataset_name, dataset, dataprovider in test_functions:
repeat_count = getattr(func, 'genty_repeat_count', 0)
if repeat_count:
for i in range(1, repeat_count + 1):
repeat_suffix = _build_repeat_suffix(i, repeat_count)
yield (
name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
else:
yield name, func, dataset_name, dataset, dataprovider, None
def _add_new_test_methods(target_cls, tests_with_datasets_and_repeats):
"""Define the given tests in the given class.
:param target_cls:
Test class where to define the given test methods.
:type target_cls:
`class`
:param tests_with_datasets_and_repeats:
Sequence of tuples describing the new test to add to the class.
(method_name, unbound function, dataset name, dataset,
dataprovider, repeat_suffix)
:type tests_with_datasets_and_repeats:
Sequence of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for test_info in tests_with_datasets_and_repeats:
(
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
) = test_info
# Remove the original test_method as it's superseded by this
# generated method.
is_first_reference = _delete_original_test_method(
target_cls,
method_name,
)
# However, if that test_method is referenced by name in sys.argv
# Then take 1 of the generated methods (we take the first) and
# give that generated method the original name... so that the reference
# can find an actual test method.
if is_first_reference and _is_referenced_in_argv(method_name):
dataset_name = None
repeat_suffix = None
_add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
def _is_referenced_in_argv(method_name):
"""
Various test runners allow one to run a specific test like so:
python -m unittest -v <test_module>.<test_name>
Return True is the given method name is so referenced.
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:return:
Is the given method referenced by the command line.
:rtype:
`bool`
"""
expr = '.*[:.]{0}$'.format(method_name)
regex = re.compile(expr)
return any(regex.match(arg) for arg in sys.argv)
def _build_repeat_suffix(iteration, count):
"""
Return the suffix string to identify iteration X out of Y.
For example, with a count of 100, this will build strings like
"iteration_053" or "iteration_008".
:param iteration:
Current iteration.
:type iteration:
`int`
:param count:
Total number of iterations.
:type count:
`int`
:return:
Repeat suffix.
:rtype:
`unicode`
"""
format_width = int(math.ceil(math.log(count + 1, 10)))
new_suffix = 'iteration_{0:0{width}d}'.format(
iteration,
width=format_width
)
return new_suffix
def _delete_original_test_method(target_cls, name):
"""
Delete an original test method with the given name.
:param target_cls:
Target class.
:type target_cls:
`class`
:param name:
Name of the method to remove.
:type name:
`unicode`
:return:
True if the original method existed
:rtype:
`bool`
"""
attribute = getattr(target_cls, name, None)
if attribute and not getattr(attribute, 'genty_generated_test', None):
try:
delattr(target_cls, name)
except AttributeError:
pass
return True
else:
return False
def _build_final_method_name(
method_name,
dataset_name,
dataprovider_name,
repeat_suffix,
):
"""
Return a nice human friendly name, that almost looks like code.
Example: a test called 'test_something' with a dataset of (5, 'hello')
Return: "test_something(5, 'hello')"
Example: a test called 'test_other_stuff' with dataset of (9) and repeats
Return: "test_other_stuff(9) iteration_<X>"
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataprovider_name:
If there's a dataprovider involved, then this is its name.
:type dataprovider_name:
`unicode` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:return:
The fully composed name of the generated test method.
:rtype:
`unicode`
"""
# For tests using a dataprovider, append "_<dataprovider_name>" to
# the test method name
suffix = ''
if dataprovider_name:
suffix = '_{0}'.format(dataprovider_name)
if not dataset_name and not repeat_suffix:
return '{0}{1}'.format(method_name, suffix)
if dataset_name:
# Nosetest multi-processing code parses the full test name
# to discern package/module names. Thus any periods in the test-name
# causes that code to fail. So replace any periods with the unicode
# middle-dot character. Yes, this change is applied independent
# of the test runner being used... and that's fine since there is
# no real contract as to how the fabricated tests are named.
dataset_name = dataset_name.replace('.', REPLACE_FOR_PERIOD_CHAR)
# Place data_set info inside parens, as if it were a function call
suffix = '{0}({1})'.format(suffix, dataset_name or "")
if repeat_suffix:
suffix = '{0} {1}'.format(suffix, repeat_suffix)
test_method_name_for_dataset = "{0}{1}".format(
method_name,
suffix,
)
return test_method_name_for_dataset
def _build_dataset_method(method, dataset):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
test_method = lambda my_self: method(
my_self,
*dataset.args,
**dataset.kwargs
)
else:
test_method = lambda my_self: method(
my_self,
*dataset
)
return test_method
def _build_dataprovider_method(method, dataset, dataprovider):
"""
Return a fabricated method that calls the dataprovider with the given
dataset, and marshals the return value from that into params to the
underlying test 'method'.
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function.
:type dataprovider:
`callable`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
final_args = dataset.args
final_kwargs = dataset.kwargs
else:
final_args = dataset
final_kwargs = {}
def test_method_wrapper(my_self):
args = dataprovider(
my_self,
*final_args,
**final_kwargs
)
kwargs = {}
if isinstance(args, GentyArgs):
kwargs = args.kwargs
args = args.args
elif not isinstance(args, (tuple, list)):
args = (args, )
return method(my_self, *args, **kwargs)
return test_method_wrapper
def _build_test_method(method, dataset, dataprovider=None):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None
:type dataprovider:
`callable` or None
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if dataprovider:
test_method = _build_dataprovider_method(method, dataset, dataprovider)
elif dataset:
test_method = _build_dataset_method(method, dataset)
else:
test_method = method
return test_method
def _add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
):
"""
Add the described method to the given class.
:param target_cls:
Test class to which to add a method.
:type target_cls:
`class`
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param func:
The underlying test function to call.
:type func:
`callable`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataset:
Tuple containing the args of the dataset.
:type dataset:
`tuple` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None.
:type dataprovider:
`callable`
"""
# pylint: disable=too-many-arguments
test_method_name_for_dataset = _build_final_method_name(
method_name,
dataset_name,
dataprovider.__name__ if dataprovider else None,
repeat_suffix,
)
test_method_for_dataset = _build_test_method(func, dataset, dataprovider)
test_method_for_dataset = functools.update_wrapper(
test_method_for_dataset,
func,
)
test_method_name_for_dataset = encode_non_ascii_string(
test_method_name_for_dataset,
)
test_method_for_dataset.__name__ = test_method_name_for_dataset
test_method_for_dataset.genty_generated_test = True
# Add the method to the class under the proper name
setattr(target_cls, test_method_name_for_dataset, test_method_for_dataset)
|
box/genty
|
genty/genty.py
|
_expand_repeats
|
python
|
def _expand_repeats(test_functions):
for name, func, dataset_name, dataset, dataprovider in test_functions:
repeat_count = getattr(func, 'genty_repeat_count', 0)
if repeat_count:
for i in range(1, repeat_count + 1):
repeat_suffix = _build_repeat_suffix(i, repeat_count)
yield (
name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
else:
yield name, func, dataset_name, dataset, dataprovider, None
|
Generator producing test_methods, with any repeat count unrolled.
:param test_functions:
Sequence of tuples of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:type test_functions:
`iterator` of `tuple` of
(`unicode`, `function`, `unicode` or None, `tuple` or None, `function`)
:return:
Generator yielding a tuple of
(method_name, unbound function, dataset, name dataset, repeat_suffix)
:rtype:
`generator` of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty.py#L104-L139
|
[
"def _build_repeat_suffix(iteration, count):\n \"\"\"\n Return the suffix string to identify iteration X out of Y.\n\n For example, with a count of 100, this will build strings like\n \"iteration_053\" or \"iteration_008\".\n\n :param iteration:\n Current iteration.\n :type iteration:\n `int`\n :param count:\n Total number of iterations.\n :type count:\n `int`\n :return:\n Repeat suffix.\n :rtype:\n `unicode`\n \"\"\"\n format_width = int(math.ceil(math.log(count + 1, 10)))\n new_suffix = 'iteration_{0:0{width}d}'.format(\n iteration,\n width=format_width\n )\n return new_suffix\n"
] |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import functools
from itertools import chain
import math
import re
import sys
import types
import six
from .genty_args import GentyArgs
from .private import encode_non_ascii_string
REPLACE_FOR_PERIOD_CHAR = '\xb7'
def genty(target_cls):
"""
This decorator takes the information provided by @genty_dataset,
@genty_dataprovider, and @genty_repeat and generates the corresponding
test methods.
:param target_cls:
Test class whose test methods have been decorated.
:type target_cls:
`class`
"""
tests = _expand_tests(target_cls)
tests_with_datasets = _expand_datasets(tests)
tests_with_datasets_and_repeats = _expand_repeats(tests_with_datasets)
_add_new_test_methods(target_cls, tests_with_datasets_and_repeats)
return target_cls
def _expand_tests(target_cls):
"""
Generator of all the test unbound functions in the given class.
:param target_cls:
Target test class.
:type target_cls:
`class`
:return:
Generator of all the test_methods in the given class yielding
tuples of method name and unbound function.
:rtype:
`generator` of `tuple` of (`unicode`, `function`)
"""
entries = dict(six.iteritems(target_cls.__dict__))
for key, value in six.iteritems(entries):
if key.startswith('test') and isinstance(value, types.FunctionType):
if not hasattr(value, 'genty_generated_test'):
yield key, value
def _expand_datasets(test_functions):
"""
Generator producing test_methods, with an optional dataset.
:param test_functions:
Iterator over tuples of test name and test unbound function.
:type test_functions:
`iterator` of `tuple` of (`unicode`, `function`)
:return:
Generator yielding a tuple of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:rtype:
`generator` of `tuple` of (
`unicode`,
`function`,
`unicode` or None,
`tuple` or None,
`function` or None,
)
"""
for name, func in test_functions:
dataset_tuples = chain(
[(None, getattr(func, 'genty_datasets', {}))],
getattr(func, 'genty_dataproviders', []),
)
no_datasets = True
for dataprovider, datasets in dataset_tuples:
for dataset_name, dataset in six.iteritems(datasets):
no_datasets = False
yield name, func, dataset_name, dataset, dataprovider
if no_datasets:
# yield the original test method, unaltered
yield name, func, None, None, None
def _add_new_test_methods(target_cls, tests_with_datasets_and_repeats):
"""Define the given tests in the given class.
:param target_cls:
Test class where to define the given test methods.
:type target_cls:
`class`
:param tests_with_datasets_and_repeats:
Sequence of tuples describing the new test to add to the class.
(method_name, unbound function, dataset name, dataset,
dataprovider, repeat_suffix)
:type tests_with_datasets_and_repeats:
Sequence of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for test_info in tests_with_datasets_and_repeats:
(
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
) = test_info
# Remove the original test_method as it's superseded by this
# generated method.
is_first_reference = _delete_original_test_method(
target_cls,
method_name,
)
# However, if that test_method is referenced by name in sys.argv
# Then take 1 of the generated methods (we take the first) and
# give that generated method the original name... so that the reference
# can find an actual test method.
if is_first_reference and _is_referenced_in_argv(method_name):
dataset_name = None
repeat_suffix = None
_add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
def _is_referenced_in_argv(method_name):
"""
Various test runners allow one to run a specific test like so:
python -m unittest -v <test_module>.<test_name>
Return True is the given method name is so referenced.
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:return:
Is the given method referenced by the command line.
:rtype:
`bool`
"""
expr = '.*[:.]{0}$'.format(method_name)
regex = re.compile(expr)
return any(regex.match(arg) for arg in sys.argv)
def _build_repeat_suffix(iteration, count):
"""
Return the suffix string to identify iteration X out of Y.
For example, with a count of 100, this will build strings like
"iteration_053" or "iteration_008".
:param iteration:
Current iteration.
:type iteration:
`int`
:param count:
Total number of iterations.
:type count:
`int`
:return:
Repeat suffix.
:rtype:
`unicode`
"""
format_width = int(math.ceil(math.log(count + 1, 10)))
new_suffix = 'iteration_{0:0{width}d}'.format(
iteration,
width=format_width
)
return new_suffix
def _delete_original_test_method(target_cls, name):
"""
Delete an original test method with the given name.
:param target_cls:
Target class.
:type target_cls:
`class`
:param name:
Name of the method to remove.
:type name:
`unicode`
:return:
True if the original method existed
:rtype:
`bool`
"""
attribute = getattr(target_cls, name, None)
if attribute and not getattr(attribute, 'genty_generated_test', None):
try:
delattr(target_cls, name)
except AttributeError:
pass
return True
else:
return False
def _build_final_method_name(
method_name,
dataset_name,
dataprovider_name,
repeat_suffix,
):
"""
Return a nice human friendly name, that almost looks like code.
Example: a test called 'test_something' with a dataset of (5, 'hello')
Return: "test_something(5, 'hello')"
Example: a test called 'test_other_stuff' with dataset of (9) and repeats
Return: "test_other_stuff(9) iteration_<X>"
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataprovider_name:
If there's a dataprovider involved, then this is its name.
:type dataprovider_name:
`unicode` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:return:
The fully composed name of the generated test method.
:rtype:
`unicode`
"""
# For tests using a dataprovider, append "_<dataprovider_name>" to
# the test method name
suffix = ''
if dataprovider_name:
suffix = '_{0}'.format(dataprovider_name)
if not dataset_name and not repeat_suffix:
return '{0}{1}'.format(method_name, suffix)
if dataset_name:
# Nosetest multi-processing code parses the full test name
# to discern package/module names. Thus any periods in the test-name
# causes that code to fail. So replace any periods with the unicode
# middle-dot character. Yes, this change is applied independent
# of the test runner being used... and that's fine since there is
# no real contract as to how the fabricated tests are named.
dataset_name = dataset_name.replace('.', REPLACE_FOR_PERIOD_CHAR)
# Place data_set info inside parens, as if it were a function call
suffix = '{0}({1})'.format(suffix, dataset_name or "")
if repeat_suffix:
suffix = '{0} {1}'.format(suffix, repeat_suffix)
test_method_name_for_dataset = "{0}{1}".format(
method_name,
suffix,
)
return test_method_name_for_dataset
def _build_dataset_method(method, dataset):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
test_method = lambda my_self: method(
my_self,
*dataset.args,
**dataset.kwargs
)
else:
test_method = lambda my_self: method(
my_self,
*dataset
)
return test_method
def _build_dataprovider_method(method, dataset, dataprovider):
"""
Return a fabricated method that calls the dataprovider with the given
dataset, and marshals the return value from that into params to the
underlying test 'method'.
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function.
:type dataprovider:
`callable`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
final_args = dataset.args
final_kwargs = dataset.kwargs
else:
final_args = dataset
final_kwargs = {}
def test_method_wrapper(my_self):
args = dataprovider(
my_self,
*final_args,
**final_kwargs
)
kwargs = {}
if isinstance(args, GentyArgs):
kwargs = args.kwargs
args = args.args
elif not isinstance(args, (tuple, list)):
args = (args, )
return method(my_self, *args, **kwargs)
return test_method_wrapper
def _build_test_method(method, dataset, dataprovider=None):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None
:type dataprovider:
`callable` or None
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if dataprovider:
test_method = _build_dataprovider_method(method, dataset, dataprovider)
elif dataset:
test_method = _build_dataset_method(method, dataset)
else:
test_method = method
return test_method
def _add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
):
"""
Add the described method to the given class.
:param target_cls:
Test class to which to add a method.
:type target_cls:
`class`
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param func:
The underlying test function to call.
:type func:
`callable`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataset:
Tuple containing the args of the dataset.
:type dataset:
`tuple` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None.
:type dataprovider:
`callable`
"""
# pylint: disable=too-many-arguments
test_method_name_for_dataset = _build_final_method_name(
method_name,
dataset_name,
dataprovider.__name__ if dataprovider else None,
repeat_suffix,
)
test_method_for_dataset = _build_test_method(func, dataset, dataprovider)
test_method_for_dataset = functools.update_wrapper(
test_method_for_dataset,
func,
)
test_method_name_for_dataset = encode_non_ascii_string(
test_method_name_for_dataset,
)
test_method_for_dataset.__name__ = test_method_name_for_dataset
test_method_for_dataset.genty_generated_test = True
# Add the method to the class under the proper name
setattr(target_cls, test_method_name_for_dataset, test_method_for_dataset)
|
box/genty
|
genty/genty.py
|
_is_referenced_in_argv
|
python
|
def _is_referenced_in_argv(method_name):
expr = '.*[:.]{0}$'.format(method_name)
regex = re.compile(expr)
return any(regex.match(arg) for arg in sys.argv)
|
Various test runners allow one to run a specific test like so:
python -m unittest -v <test_module>.<test_name>
Return True is the given method name is so referenced.
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:return:
Is the given method referenced by the command line.
:rtype:
`bool`
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty.py#L193-L211
| null |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import functools
from itertools import chain
import math
import re
import sys
import types
import six
from .genty_args import GentyArgs
from .private import encode_non_ascii_string
REPLACE_FOR_PERIOD_CHAR = '\xb7'
def genty(target_cls):
"""
This decorator takes the information provided by @genty_dataset,
@genty_dataprovider, and @genty_repeat and generates the corresponding
test methods.
:param target_cls:
Test class whose test methods have been decorated.
:type target_cls:
`class`
"""
tests = _expand_tests(target_cls)
tests_with_datasets = _expand_datasets(tests)
tests_with_datasets_and_repeats = _expand_repeats(tests_with_datasets)
_add_new_test_methods(target_cls, tests_with_datasets_and_repeats)
return target_cls
def _expand_tests(target_cls):
"""
Generator of all the test unbound functions in the given class.
:param target_cls:
Target test class.
:type target_cls:
`class`
:return:
Generator of all the test_methods in the given class yielding
tuples of method name and unbound function.
:rtype:
`generator` of `tuple` of (`unicode`, `function`)
"""
entries = dict(six.iteritems(target_cls.__dict__))
for key, value in six.iteritems(entries):
if key.startswith('test') and isinstance(value, types.FunctionType):
if not hasattr(value, 'genty_generated_test'):
yield key, value
def _expand_datasets(test_functions):
"""
Generator producing test_methods, with an optional dataset.
:param test_functions:
Iterator over tuples of test name and test unbound function.
:type test_functions:
`iterator` of `tuple` of (`unicode`, `function`)
:return:
Generator yielding a tuple of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:rtype:
`generator` of `tuple` of (
`unicode`,
`function`,
`unicode` or None,
`tuple` or None,
`function` or None,
)
"""
for name, func in test_functions:
dataset_tuples = chain(
[(None, getattr(func, 'genty_datasets', {}))],
getattr(func, 'genty_dataproviders', []),
)
no_datasets = True
for dataprovider, datasets in dataset_tuples:
for dataset_name, dataset in six.iteritems(datasets):
no_datasets = False
yield name, func, dataset_name, dataset, dataprovider
if no_datasets:
# yield the original test method, unaltered
yield name, func, None, None, None
def _expand_repeats(test_functions):
"""
Generator producing test_methods, with any repeat count unrolled.
:param test_functions:
Sequence of tuples of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:type test_functions:
`iterator` of `tuple` of
(`unicode`, `function`, `unicode` or None, `tuple` or None, `function`)
:return:
Generator yielding a tuple of
(method_name, unbound function, dataset, name dataset, repeat_suffix)
:rtype:
`generator` of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for name, func, dataset_name, dataset, dataprovider in test_functions:
repeat_count = getattr(func, 'genty_repeat_count', 0)
if repeat_count:
for i in range(1, repeat_count + 1):
repeat_suffix = _build_repeat_suffix(i, repeat_count)
yield (
name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
else:
yield name, func, dataset_name, dataset, dataprovider, None
def _add_new_test_methods(target_cls, tests_with_datasets_and_repeats):
"""Define the given tests in the given class.
:param target_cls:
Test class where to define the given test methods.
:type target_cls:
`class`
:param tests_with_datasets_and_repeats:
Sequence of tuples describing the new test to add to the class.
(method_name, unbound function, dataset name, dataset,
dataprovider, repeat_suffix)
:type tests_with_datasets_and_repeats:
Sequence of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for test_info in tests_with_datasets_and_repeats:
(
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
) = test_info
# Remove the original test_method as it's superseded by this
# generated method.
is_first_reference = _delete_original_test_method(
target_cls,
method_name,
)
# However, if that test_method is referenced by name in sys.argv
# Then take 1 of the generated methods (we take the first) and
# give that generated method the original name... so that the reference
# can find an actual test method.
if is_first_reference and _is_referenced_in_argv(method_name):
dataset_name = None
repeat_suffix = None
_add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
def _build_repeat_suffix(iteration, count):
"""
Return the suffix string to identify iteration X out of Y.
For example, with a count of 100, this will build strings like
"iteration_053" or "iteration_008".
:param iteration:
Current iteration.
:type iteration:
`int`
:param count:
Total number of iterations.
:type count:
`int`
:return:
Repeat suffix.
:rtype:
`unicode`
"""
format_width = int(math.ceil(math.log(count + 1, 10)))
new_suffix = 'iteration_{0:0{width}d}'.format(
iteration,
width=format_width
)
return new_suffix
def _delete_original_test_method(target_cls, name):
"""
Delete an original test method with the given name.
:param target_cls:
Target class.
:type target_cls:
`class`
:param name:
Name of the method to remove.
:type name:
`unicode`
:return:
True if the original method existed
:rtype:
`bool`
"""
attribute = getattr(target_cls, name, None)
if attribute and not getattr(attribute, 'genty_generated_test', None):
try:
delattr(target_cls, name)
except AttributeError:
pass
return True
else:
return False
def _build_final_method_name(
method_name,
dataset_name,
dataprovider_name,
repeat_suffix,
):
"""
Return a nice human friendly name, that almost looks like code.
Example: a test called 'test_something' with a dataset of (5, 'hello')
Return: "test_something(5, 'hello')"
Example: a test called 'test_other_stuff' with dataset of (9) and repeats
Return: "test_other_stuff(9) iteration_<X>"
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataprovider_name:
If there's a dataprovider involved, then this is its name.
:type dataprovider_name:
`unicode` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:return:
The fully composed name of the generated test method.
:rtype:
`unicode`
"""
# For tests using a dataprovider, append "_<dataprovider_name>" to
# the test method name
suffix = ''
if dataprovider_name:
suffix = '_{0}'.format(dataprovider_name)
if not dataset_name and not repeat_suffix:
return '{0}{1}'.format(method_name, suffix)
if dataset_name:
# Nosetest multi-processing code parses the full test name
# to discern package/module names. Thus any periods in the test-name
# causes that code to fail. So replace any periods with the unicode
# middle-dot character. Yes, this change is applied independent
# of the test runner being used... and that's fine since there is
# no real contract as to how the fabricated tests are named.
dataset_name = dataset_name.replace('.', REPLACE_FOR_PERIOD_CHAR)
# Place data_set info inside parens, as if it were a function call
suffix = '{0}({1})'.format(suffix, dataset_name or "")
if repeat_suffix:
suffix = '{0} {1}'.format(suffix, repeat_suffix)
test_method_name_for_dataset = "{0}{1}".format(
method_name,
suffix,
)
return test_method_name_for_dataset
def _build_dataset_method(method, dataset):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
test_method = lambda my_self: method(
my_self,
*dataset.args,
**dataset.kwargs
)
else:
test_method = lambda my_self: method(
my_self,
*dataset
)
return test_method
def _build_dataprovider_method(method, dataset, dataprovider):
"""
Return a fabricated method that calls the dataprovider with the given
dataset, and marshals the return value from that into params to the
underlying test 'method'.
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function.
:type dataprovider:
`callable`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
final_args = dataset.args
final_kwargs = dataset.kwargs
else:
final_args = dataset
final_kwargs = {}
def test_method_wrapper(my_self):
args = dataprovider(
my_self,
*final_args,
**final_kwargs
)
kwargs = {}
if isinstance(args, GentyArgs):
kwargs = args.kwargs
args = args.args
elif not isinstance(args, (tuple, list)):
args = (args, )
return method(my_self, *args, **kwargs)
return test_method_wrapper
def _build_test_method(method, dataset, dataprovider=None):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None
:type dataprovider:
`callable` or None
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if dataprovider:
test_method = _build_dataprovider_method(method, dataset, dataprovider)
elif dataset:
test_method = _build_dataset_method(method, dataset)
else:
test_method = method
return test_method
def _add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
):
"""
Add the described method to the given class.
:param target_cls:
Test class to which to add a method.
:type target_cls:
`class`
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param func:
The underlying test function to call.
:type func:
`callable`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataset:
Tuple containing the args of the dataset.
:type dataset:
`tuple` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None.
:type dataprovider:
`callable`
"""
# pylint: disable=too-many-arguments
test_method_name_for_dataset = _build_final_method_name(
method_name,
dataset_name,
dataprovider.__name__ if dataprovider else None,
repeat_suffix,
)
test_method_for_dataset = _build_test_method(func, dataset, dataprovider)
test_method_for_dataset = functools.update_wrapper(
test_method_for_dataset,
func,
)
test_method_name_for_dataset = encode_non_ascii_string(
test_method_name_for_dataset,
)
test_method_for_dataset.__name__ = test_method_name_for_dataset
test_method_for_dataset.genty_generated_test = True
# Add the method to the class under the proper name
setattr(target_cls, test_method_name_for_dataset, test_method_for_dataset)
|
box/genty
|
genty/genty.py
|
_build_repeat_suffix
|
python
|
def _build_repeat_suffix(iteration, count):
format_width = int(math.ceil(math.log(count + 1, 10)))
new_suffix = 'iteration_{0:0{width}d}'.format(
iteration,
width=format_width
)
return new_suffix
|
Return the suffix string to identify iteration X out of Y.
For example, with a count of 100, this will build strings like
"iteration_053" or "iteration_008".
:param iteration:
Current iteration.
:type iteration:
`int`
:param count:
Total number of iterations.
:type count:
`int`
:return:
Repeat suffix.
:rtype:
`unicode`
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty.py#L214-L239
| null |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import functools
from itertools import chain
import math
import re
import sys
import types
import six
from .genty_args import GentyArgs
from .private import encode_non_ascii_string
REPLACE_FOR_PERIOD_CHAR = '\xb7'
def genty(target_cls):
"""
This decorator takes the information provided by @genty_dataset,
@genty_dataprovider, and @genty_repeat and generates the corresponding
test methods.
:param target_cls:
Test class whose test methods have been decorated.
:type target_cls:
`class`
"""
tests = _expand_tests(target_cls)
tests_with_datasets = _expand_datasets(tests)
tests_with_datasets_and_repeats = _expand_repeats(tests_with_datasets)
_add_new_test_methods(target_cls, tests_with_datasets_and_repeats)
return target_cls
def _expand_tests(target_cls):
"""
Generator of all the test unbound functions in the given class.
:param target_cls:
Target test class.
:type target_cls:
`class`
:return:
Generator of all the test_methods in the given class yielding
tuples of method name and unbound function.
:rtype:
`generator` of `tuple` of (`unicode`, `function`)
"""
entries = dict(six.iteritems(target_cls.__dict__))
for key, value in six.iteritems(entries):
if key.startswith('test') and isinstance(value, types.FunctionType):
if not hasattr(value, 'genty_generated_test'):
yield key, value
def _expand_datasets(test_functions):
"""
Generator producing test_methods, with an optional dataset.
:param test_functions:
Iterator over tuples of test name and test unbound function.
:type test_functions:
`iterator` of `tuple` of (`unicode`, `function`)
:return:
Generator yielding a tuple of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:rtype:
`generator` of `tuple` of (
`unicode`,
`function`,
`unicode` or None,
`tuple` or None,
`function` or None,
)
"""
for name, func in test_functions:
dataset_tuples = chain(
[(None, getattr(func, 'genty_datasets', {}))],
getattr(func, 'genty_dataproviders', []),
)
no_datasets = True
for dataprovider, datasets in dataset_tuples:
for dataset_name, dataset in six.iteritems(datasets):
no_datasets = False
yield name, func, dataset_name, dataset, dataprovider
if no_datasets:
# yield the original test method, unaltered
yield name, func, None, None, None
def _expand_repeats(test_functions):
"""
Generator producing test_methods, with any repeat count unrolled.
:param test_functions:
Sequence of tuples of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:type test_functions:
`iterator` of `tuple` of
(`unicode`, `function`, `unicode` or None, `tuple` or None, `function`)
:return:
Generator yielding a tuple of
(method_name, unbound function, dataset, name dataset, repeat_suffix)
:rtype:
`generator` of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for name, func, dataset_name, dataset, dataprovider in test_functions:
repeat_count = getattr(func, 'genty_repeat_count', 0)
if repeat_count:
for i in range(1, repeat_count + 1):
repeat_suffix = _build_repeat_suffix(i, repeat_count)
yield (
name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
else:
yield name, func, dataset_name, dataset, dataprovider, None
def _add_new_test_methods(target_cls, tests_with_datasets_and_repeats):
"""Define the given tests in the given class.
:param target_cls:
Test class where to define the given test methods.
:type target_cls:
`class`
:param tests_with_datasets_and_repeats:
Sequence of tuples describing the new test to add to the class.
(method_name, unbound function, dataset name, dataset,
dataprovider, repeat_suffix)
:type tests_with_datasets_and_repeats:
Sequence of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for test_info in tests_with_datasets_and_repeats:
(
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
) = test_info
# Remove the original test_method as it's superseded by this
# generated method.
is_first_reference = _delete_original_test_method(
target_cls,
method_name,
)
# However, if that test_method is referenced by name in sys.argv
# Then take 1 of the generated methods (we take the first) and
# give that generated method the original name... so that the reference
# can find an actual test method.
if is_first_reference and _is_referenced_in_argv(method_name):
dataset_name = None
repeat_suffix = None
_add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
def _is_referenced_in_argv(method_name):
"""
Various test runners allow one to run a specific test like so:
python -m unittest -v <test_module>.<test_name>
Return True is the given method name is so referenced.
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:return:
Is the given method referenced by the command line.
:rtype:
`bool`
"""
expr = '.*[:.]{0}$'.format(method_name)
regex = re.compile(expr)
return any(regex.match(arg) for arg in sys.argv)
def _delete_original_test_method(target_cls, name):
"""
Delete an original test method with the given name.
:param target_cls:
Target class.
:type target_cls:
`class`
:param name:
Name of the method to remove.
:type name:
`unicode`
:return:
True if the original method existed
:rtype:
`bool`
"""
attribute = getattr(target_cls, name, None)
if attribute and not getattr(attribute, 'genty_generated_test', None):
try:
delattr(target_cls, name)
except AttributeError:
pass
return True
else:
return False
def _build_final_method_name(
method_name,
dataset_name,
dataprovider_name,
repeat_suffix,
):
"""
Return a nice human friendly name, that almost looks like code.
Example: a test called 'test_something' with a dataset of (5, 'hello')
Return: "test_something(5, 'hello')"
Example: a test called 'test_other_stuff' with dataset of (9) and repeats
Return: "test_other_stuff(9) iteration_<X>"
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataprovider_name:
If there's a dataprovider involved, then this is its name.
:type dataprovider_name:
`unicode` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:return:
The fully composed name of the generated test method.
:rtype:
`unicode`
"""
# For tests using a dataprovider, append "_<dataprovider_name>" to
# the test method name
suffix = ''
if dataprovider_name:
suffix = '_{0}'.format(dataprovider_name)
if not dataset_name and not repeat_suffix:
return '{0}{1}'.format(method_name, suffix)
if dataset_name:
# Nosetest multi-processing code parses the full test name
# to discern package/module names. Thus any periods in the test-name
# causes that code to fail. So replace any periods with the unicode
# middle-dot character. Yes, this change is applied independent
# of the test runner being used... and that's fine since there is
# no real contract as to how the fabricated tests are named.
dataset_name = dataset_name.replace('.', REPLACE_FOR_PERIOD_CHAR)
# Place data_set info inside parens, as if it were a function call
suffix = '{0}({1})'.format(suffix, dataset_name or "")
if repeat_suffix:
suffix = '{0} {1}'.format(suffix, repeat_suffix)
test_method_name_for_dataset = "{0}{1}".format(
method_name,
suffix,
)
return test_method_name_for_dataset
def _build_dataset_method(method, dataset):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
test_method = lambda my_self: method(
my_self,
*dataset.args,
**dataset.kwargs
)
else:
test_method = lambda my_self: method(
my_self,
*dataset
)
return test_method
def _build_dataprovider_method(method, dataset, dataprovider):
"""
Return a fabricated method that calls the dataprovider with the given
dataset, and marshals the return value from that into params to the
underlying test 'method'.
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function.
:type dataprovider:
`callable`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
final_args = dataset.args
final_kwargs = dataset.kwargs
else:
final_args = dataset
final_kwargs = {}
def test_method_wrapper(my_self):
args = dataprovider(
my_self,
*final_args,
**final_kwargs
)
kwargs = {}
if isinstance(args, GentyArgs):
kwargs = args.kwargs
args = args.args
elif not isinstance(args, (tuple, list)):
args = (args, )
return method(my_self, *args, **kwargs)
return test_method_wrapper
def _build_test_method(method, dataset, dataprovider=None):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None
:type dataprovider:
`callable` or None
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if dataprovider:
test_method = _build_dataprovider_method(method, dataset, dataprovider)
elif dataset:
test_method = _build_dataset_method(method, dataset)
else:
test_method = method
return test_method
def _add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
):
"""
Add the described method to the given class.
:param target_cls:
Test class to which to add a method.
:type target_cls:
`class`
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param func:
The underlying test function to call.
:type func:
`callable`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataset:
Tuple containing the args of the dataset.
:type dataset:
`tuple` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None.
:type dataprovider:
`callable`
"""
# pylint: disable=too-many-arguments
test_method_name_for_dataset = _build_final_method_name(
method_name,
dataset_name,
dataprovider.__name__ if dataprovider else None,
repeat_suffix,
)
test_method_for_dataset = _build_test_method(func, dataset, dataprovider)
test_method_for_dataset = functools.update_wrapper(
test_method_for_dataset,
func,
)
test_method_name_for_dataset = encode_non_ascii_string(
test_method_name_for_dataset,
)
test_method_for_dataset.__name__ = test_method_name_for_dataset
test_method_for_dataset.genty_generated_test = True
# Add the method to the class under the proper name
setattr(target_cls, test_method_name_for_dataset, test_method_for_dataset)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.