text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
:mod:`bitdeli.widgets`: Dashboard toolkit
=========================================
The :mod:`bitdeli.widgets` module contains *widgets* that are used to compose
a dashboard in the :ref:`card-script`.
Add a card called *Widget Gallery* to see a live example of all widgets. The
source code is available on `Github at bitdeli/bd-toydata-widget-gallery
<https://github.com/bitdeli/bd-toydata-widget-gallery>`_.
Dashboard Layout
----------------
The widgets are laid out on a grid that has 12 columns and infinite
rows. The size attribute of a widget corresponds to these grid units,
making 12 the maximum width of a widget.
Note that :ref:`editor` shows a ruler in the preview that helps you to design
dashboards that fit fully in a 16:9 display in the full-screen mode.
The widgets are shown on the board in the order they are created in the script.
You can override the default order with the :class:`Group` object.
.. autoclass:: Group
Themes
------
.. autofunction:: set_theme
.. autofunction:: get_theme
.. autofunction:: get_themes
Title & Description
-------------------
In addition to a dashboard, a card can generate a title and a description based
on the current :ref:`profiles`. This allows cards to be used as paragraphs in a
dynamically generated report.
Both :class:`Title` and :class:`Description` require one parameter,
*template* that specifies a template for the text. The dictionary
*values* is used to populate the template using the `string.format()
<http://docs.python.org/2/library/stdtypes.html#str.format>`_ function.
If a placeholder defined in the template has not been
defined in *values*, the *default* value is used instead.
See also :mod:`bitdeli.textutil` for utilities that help generating readable
descriptions.
.. autoclass:: Title
.. autoclass:: Description
Widgets
-------
These keyword arguments are common to all widgets:
- **label**: A string that will be shown in the top left corner of a widget.
- **data**: The data to be displayed, format depends on the widget type.
- **size**: The size of the widget on the board: `size=(w, h)` where *0 < w < 13*
and *h > 0*.
- **color**: An integer between 1-3, picks a color from the current theme.
- **group**: Define the widget :class:`Group` for this widget.
.. autoclass:: Bar
.. autoclass:: Line
.. autoclass:: Map
.. autoclass:: Table
.. autoclass:: Text
.. autoclass:: Timeline
.. autoclass:: Users
Utilities
---------
.. autofunction:: make_widget
.. autofunction:: gravatar_hash
"""
from collections import Mapping, OrderedDict
from itertools import chain
from uuid import uuid4
from bencode import BenJson
import json
import inspect
import re
import md5
MAIN = '/tmp/worker/__main__.py'
THEMES =\
["bluered",
"phosphor",
"dream",
"beach",
"builder",
"june",
"i3",
"lime",
"arctic",
"lipstick",
"eighties",
"safari",
"bright",
"bank",
"sail",
"casino",
"clouds",
"valentine",
"fed",
"space",
"purple",
"playground",
"vintage",
"gray",
"flamingo"]
_widgets = OrderedDict()
_title = None
_description = None
_meta = {'description': 'No description',
'title': 'Untitled',
'theme': 'bluered'}
def set_text(title=None, description=None):
if title != None:
_meta['title'] = title
if description != None:
_meta['description'] = description
def get_text():
return _meta['title'], _meta['description']
def set_theme(theme):
"""
Sets the current theme (color scheme). The parameter *theme* is one
of the following predefined themes:
.. code-block:: python
bluered, phosphor, dream, beach, builder, june, i3, lime, arctic, lipstick,
eighties, safari, bright, bank, sail, casino, clouds, valentine, fed, space,
purple, playground, vintage, gray, flamingo.
"""
if theme in THEMES:
_meta['theme'] = theme
else:
raise ValueError("Unknown theme")
def get_theme():
"""
Get the current theme.
"""
return _meta['theme']
def get_themes():
"""
Get a list of available themes.
"""
return THEMES
def make_widget(wtype, *args, **kwargs):
"""
Make a widget of the type *wtype* that is a lowercase name of the widget
(string). The other parameters are passed to the widget as is.
"""
return TYPES[wtype](*args, **kwargs)
def flush(output):
def encode(x):
return BenJson(json.dumps(x))
if _widgets:
set_text(title=_title.flush() if _title else None,
description=_description.flush() if _description else None)
output(map(encode, chain([_meta], _widgets.itervalues())))
def line_number():
return [frame[2] for frame in inspect.stack() if frame[1] == MAIN][0]
class Summary(object):
def __init__(self, template, values={}, default='[none]'):
self.template = template
self.values = values
self.default = default
def flush(self):
if callable(self.template):
return self.template()
else:
for key in re.findall('{(\w+).*?}', self.template):
if key not in self.values:
self.values[key] = self.default
return self.template.format(**self.values)
class Title(Summary):
"""
Sets the title of the card.
"""
def __init__(self, template, values={}, default='[none]'):
global _title
_title = self
super(Title, self).__init__(template, values, default)
class Description(Summary):
"""
Sets the description of the card.
"""
def __init__(self, template, values={}, default='[none]'):
global _description
_description = self
super(Description, self).__init__(template, values, default)
class Widget(object):
defaults = {}
def __init__(self, **kwargs):
kwargs['id'] = kwargs.get('id', uuid4().hex)
kwargs['type'] = self.__class__.__name__.lower()
if '_line_no' not in kwargs:
kwargs['_line_no'] = line_number()
for k, v in self.defaults.iteritems():
kwargs[k] = kwargs.get(k, v)
self.output(kwargs)
def output(self, kwargs):
group = kwargs.pop('group', None)
if group:
group._add(kwargs)
else:
_widgets[kwargs['id']] = kwargs
class Group(Widget):
"""
A widget :class:`Group` can be used to take better control of how
the widgets are positioned on a board. A widget group behaves like a
single widget in the board layout.
To add widgets to a group, use the group option when creating
other widgets.
Note that the size of a group is determined by its contents and can
not be manually set.
:param layout: 'vertical' or 'horizontal'
"""
def __init__(self, group=None, id=None, layout='horizontal'):
self.id = id if id else uuid4().hex
self.group = group
self.layout = layout
self.widgets = OrderedDict()
self._output()
def _add(self, widget):
self.widgets[widget['id']] = widget
self._output()
def _output(self):
self.output({'id': self.id,
'type': 'group',
'group': self.group,
'layout': self.layout,
'data': self.widgets.values()})
class Map(Widget):
"""
Displays a map with countries colored according to given data.
The color scale and map position are determined automatically.
- **data:**
A dictionary where keys are `2-letter country codes
<http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2>`_
and values are numbers.
"""
defaults = {'size': [3,3]}
class Line(Widget):
"""
Displays a line chart of a time series.
- **data:**
A list of `(timestamp, value)` tuples, where timestamp is a string
in the `ISO 8601 format <http://en.wikipedia.org/wiki/ISO_8601>`_
and value is a number.
OR to show multiple series on the same chart:
A list of `{"label": label, "data": data}` objects, where
*label* is a string shown in the chart legend
and *data* is a list of tuples as defined above.
"""
defaults = {'size': [3,3]}
class Users(Widget):
"""
Displays a list of users using avatar images from
`Gravatar <http://gravatar.com>`_.
:param data: A list of *user dictionaries*.
:param large: If *True*, use double-size avatars (default: *False*).
*User dictionary:*
- **gravatar_hash**: A MD5 hash of the user's email address (use :func:`gravatar_hash`).
- **username**: A string shown when hovering over the avatar.
"""
defaults = {'size': [3,3]}
class Timeline(Widget):
"""
Displays a list of messages with optional avatars and timestamps.
- **data:** A list of timeline *event dictionaries*.
*Event dictionary:*
- **gravatar_hash**: A MD5 hash of the user's email address (use :func:`gravatar_hash`).
- **username**:A string shown before the message.
- **message**: A string that describes the event.
- **color**: A theme color (integer between 1-3).
- **timestamp**: An `ISO 8601 timestamp <http://en.wikipedia.org/wiki/ISO_8601>`_
"""
defaults = {'size': [3,3]}
class Text(Widget):
"""
Displays a large colored text and/or a paragraph.
:param head: A string that will be colored and fitted to be
as large as the widget size allows.
:param text: A string that will be shown as a normal-sized paragraph
below the heading.
"""
defaults = {'color': 1,
'size': [3,3]}
def __init__(self, **kwargs):
fields = ('text', 'head')
kwargs.setdefault('data', {}).update((k, kwargs.pop(k))
for k in fields if k in kwargs)
super(Text, self).__init__(**kwargs)
class Bar(Widget):
"""
Displays an ordinal bar chart.
- **data:** A list of `(label, value)` tuples, where
label is the label for each bar on the x-axis
value is a number determining the height of the bar
OR a Python dictionary.
"""
defaults = {'size': [3,2],
'data': []}
def __init__(self, **kwargs):
if isinstance(kwargs.get('data', None), Mapping):
kwargs['data'] = sorted([[k, v] for k, v in kwargs['data'].items()])
super(Bar, self).__init__(**kwargs)
class Table(Widget):
"""
Displays a table of dictionaries with keys as headers and values
as cell contents.
The :class:`Table` widget can be used to export data from a card
either as CSV or JSON. Choose the desired format by setting either
`json_export` and/or `csv_export` true.
You can export rows of a table either manually by clicking the export
button on the table or by fetching data programmatically from the URL
linked to the export button. The `id` of the table is used as the
file name for the exported data.
:param data: A list of dictionaries.
:param json_export: (boolean) enable exporting rows of the table as JSON.
:param csv_export: (boolean) enable exporting rows of the table as CSV.
:param chart: To visualize numbers inside the table, provide
a dictionary with `{header_name: chart_type}` pairings.
The values in the corresponding column must be
normalized between 0 and 1. The only allowed type
for *chart_type* is currently `bar`.
"""
defaults = {'size': [3,2]}
def gravatar_hash(email):
"""
Return a `Gravatar <http://gravatar.com>`_ hash for the given email address.
"""
return md5.md5(email.lower().strip()).hexdigest()
TYPES = {'map': Map,
'line': Line,
'text': Text,
'bar': Bar,
'table': Table,
'users': Users,
'timeline': Timeline}
if __name__ == '__main__':
MAIN = 'widgets.py'
g = Group()
g1 = Group(group=g)
Text(data='top-level')
g2 = Group(group=g, id='subgroup')
Text(group=g2, data='subgroup-text')
g3 = Group(group=g2)
Text(group=g3, data='subsubgroup-text')
print json.dumps(_widgets.values())
|
{
"content_hash": "6aaff2d5109e1fa45ad3d72eb3839e5c",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 93,
"avg_line_length": 29.758454106280194,
"alnum_prop": 0.6122564935064935,
"repo_name": "bitdeli/bitdeli-py",
"id": "2a3725c15de160832012b7052db6ebe8458a6961",
"size": "12320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bitdeli/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51390"
}
],
"symlink_target": ""
}
|
import argparse
__author__ = 'kinpa200296'
def check_if_sorted(filename, ascending=True):
with open(filename, 'r') as f:
s = f.readline()
if s == '':
return True
prev = int(s)
s = f.readline()
while s != '':
cur = int(s)
if ascending:
if cur >= prev:
prev = cur
else:
return False
else:
if cur <= prev:
prev = cur
else:
return False
s = f.readline()
return True
def execute_from_command_line():
p = argparse.ArgumentParser()
p.description = 'checks if number in specified file are sorted in ascending order'
p.add_argument('input_file', help='specify input file')
args = p.parse_args()
print 'Starting check of file {}...'.format(args.input_file)
if check_if_sorted(args.input_file):
print 'Numbers are sorted ascending'
else:
print 'Numbers are not sorted ascending'
if __name__ == '__main__':
execute_from_command_line()
|
{
"content_hash": "6ca5ac783083815ab0b8a875fcf39cf7",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 86,
"avg_line_length": 25.266666666666666,
"alnum_prop": 0.5083553210202286,
"repo_name": "kinpa200296/python_labs",
"id": "6426a7aa1144603b703bbace9e0249804b2e5c54",
"size": "1160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab2/myutils/lab2_task1/checker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "734"
},
{
"name": "Python",
"bytes": "52669"
}
],
"symlink_target": ""
}
|
"""\
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
directory and generates an inventory based on them.
"""
from __future__ import unicode_literals, print_function
import argparse
from collections import defaultdict
from functools import wraps
import json
import os
import re
def tfstates(root=None):
root = root or os.getcwd()
for dirpath, _, filenames in os.walk(root):
for name in filenames:
if os.path.splitext(name)[-1] == '.tfstate':
yield os.path.join(dirpath, name)
def iterresources(filenames):
for filename in filenames:
with open(filename, 'r') as json_file:
state = json.load(json_file)
for module in state['modules']:
for key, resource in module['resources'].items():
yield key, resource
## READ RESOURCES
PARSERS = {}
def _clean_dc(dcname):
# Consul DCs are strictly alphanumeric with underscores and hyphens -
# ensure that the consul_dc attribute meets these requirements.
return re.sub('[^\w_\-]', '-', dcname)
def iterhosts(resources):
'''yield host tuples of (name, attributes, groups)'''
for key, resource in resources:
resource_type, name = key.split('.', 1)
try:
parser = PARSERS[resource_type]
except KeyError:
continue
yield parser(resource)
def parses(prefix):
def inner(func):
PARSERS[prefix] = func
return func
return inner
def calculate_mi_vars(func):
"""calculate microservices-infrastructure vars"""
@wraps(func)
def inner(*args, **kwargs):
name, attrs, groups = func(*args, **kwargs)
# attrs
if attrs.get('role', '') == 'control':
attrs['consul_is_server'] = True
else:
attrs['consul_is_server'] = False
# groups
if attrs.get('publicly_routable', False):
groups.append('publicly_routable')
return name, attrs, groups
return inner
def _parse_prefix(source, prefix, sep='.'):
for compkey, value in source.items():
try:
curprefix, rest = compkey.split(sep, 1)
except ValueError:
continue
if curprefix != prefix or rest == '#':
continue
yield rest, value
def parse_attr_list(source, prefix, sep='.'):
size_key = '%s%s#' % (prefix, sep)
try:
size = int(source[size_key])
except KeyError:
return []
attrs = [{} for _ in range(size)]
for compkey, value in _parse_prefix(source, prefix, sep):
nth, key = compkey.split(sep, 1)
attrs[int(nth)][key] = value
return attrs
def parse_dict(source, prefix, sep='.'):
return dict(_parse_prefix(source, prefix, sep))
def parse_list(source, prefix, sep='.'):
return [value for _, value in _parse_prefix(source, prefix, sep)]
def parse_bool(string_form):
token = string_form.lower()[0]
if token == 't':
return True
elif token == 'f':
return False
else:
raise ValueError('could not convert %r to a bool' % string_form)
@parses('openstack_compute_instance_v2')
@calculate_mi_vars
def openstack_host(resource, tfvars=None):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'access_ip_v4': raw_attrs['access_ip_v4'],
'access_ip_v6': raw_attrs['access_ip_v6'],
'flavor': parse_dict(raw_attrs, 'flavor',
sep='_'),
'id': raw_attrs['id'],
'image': parse_dict(raw_attrs, 'image',
sep='_'),
'key_pair': raw_attrs['key_pair'],
'metadata': parse_dict(raw_attrs, 'metadata'),
'network': parse_attr_list(raw_attrs, 'network'),
'region': raw_attrs['region'],
'security_groups': parse_list(raw_attrs, 'security_groups'),
#ansible
'ansible_ssh_port': 22,
'ansible_ssh_user': 'centos',
# workaround for an OpenStack bug where hosts have a different domain
# after they're restarted
'host_domain': 'novalocal',
'use_host_domain': True,
}
try:
attrs.update({
'ansible_ssh_host': raw_attrs['access_ip_v4'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
'role': attrs['metadata'].get('role', 'none')
})
# add groups based on attrs
groups.append('os_image=' + attrs['image']['name'])
groups.append('os_flavor=' + attrs['flavor']['name'])
groups.extend('os_metadata_%s=%s' % item
for item in attrs['metadata'].items())
groups.append('os_region=' + attrs['region'])
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['metadata'].get('role', 'none'))
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('aws_instance')
@calculate_mi_vars
def aws_host(resource, tfvars=None):
name = resource['primary']['attributes']['tags.Name']
raw_attrs = resource['primary']['attributes']
groups = []
attrs = {
'ami': raw_attrs['ami'],
'availability_zone': raw_attrs['availability_zone'],
'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'),
'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']),
'ephemeral_block_device': parse_attr_list(raw_attrs,
'ephemeral_block_device'),
'id': raw_attrs['id'],
'key_name': raw_attrs['key_name'],
'private': parse_dict(raw_attrs, 'private',
sep='_'),
'public': parse_dict(raw_attrs, 'public',
sep='_'),
'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'),
'security_groups': parse_attr_list(raw_attrs, 'security_groups'),
'subnet': parse_dict(raw_attrs, 'subnet',
sep='_'),
'tags': parse_dict(raw_attrs, 'tags'),
'tenancy': raw_attrs['tenancy'],
'vpc_security_group_ids': parse_list(raw_attrs,
'vpc_security_group_ids'),
# ansible-specific
'ansible_ssh_port': 22,
'ansible_ssh_user': raw_attrs['tags.sshUser'],
'ansible_ssh_host': raw_attrs['public_ip'],
}
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': attrs['tags'].get('dc'),
'role': attrs['tags'].get('role', 'none')
})
# groups specific to microservices-infrastructure
groups.extend(['aws_ami=' + attrs['ami'],
'aws_az=' + attrs['availability_zone'],
'aws_key_name=' + attrs['key_name'],
'aws_tenancy=' + attrs['tenancy']])
groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items())
groups.extend('aws_vpc_security_group=' + group
for group in attrs['vpc_security_group_ids'])
groups.extend('aws_subnet_%s=%s' % subnet
for subnet in attrs['subnet'].items())
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('google_compute_instance')
@calculate_mi_vars
def gce_host(resource, tfvars=None):
name = resource['primary']['id']
raw_attrs = resource['primary']['attributes']
groups = []
# network interfaces
interfaces = parse_attr_list(raw_attrs, 'network_interface')
for interface in interfaces:
interface['access_config'] = parse_attr_list(interface,
'access_config')
for key in interface.keys():
if '.' in key:
del interface[key]
# general attrs
attrs = {
'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',
'disks': parse_attr_list(raw_attrs, 'disk'),
'machine_type': raw_attrs['machine_type'],
'metadata': parse_dict(raw_attrs, 'metadata'),
'network': parse_attr_list(raw_attrs, 'network'),
'network_interface': interfaces,
'self_link': raw_attrs['self_link'],
'service_account': parse_attr_list(raw_attrs, 'service_account'),
'tags': parse_list(raw_attrs, 'tags'),
'zone': raw_attrs['zone'],
# ansible
'ansible_ssh_port': 22,
'ansible_ssh_user': 'deploy',
}
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['zone'])),
'role': attrs['metadata'].get('role', 'none')
})
try:
attrs.update({
'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
# add groups based on attrs
groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])
groups.append('gce_machine_type=' + attrs['machine_type'])
groups.extend('gce_metadata_%s=%s' % (key, value)
for (key, value) in attrs['metadata'].items()
if key not in set(['sshKeys']))
groups.extend('gce_tag=' + tag for tag in attrs['tags'])
groups.append('gce_zone=' + attrs['zone'])
if attrs['can_ip_forward']:
groups.append('gce_ip_forward')
if attrs['publicly_routable']:
groups.append('gce_publicly_routable')
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['metadata'].get('role', 'none'))
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
## QUERY TYPES
def query_host(hosts, target):
for name, attrs, _ in hosts:
if name == target:
return attrs
return {}
def query_list(hosts):
groups = defaultdict(dict)
meta = {}
for name, attrs, hostgroups in hosts:
for group in set(hostgroups):
groups[group].setdefault('hosts', [])
groups[group]['hosts'].append(name)
meta[name] = attrs
groups['_meta'] = {'hostvars': meta}
return groups
def main():
parser = argparse.ArgumentParser(
__file__,
__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
modes = parser.add_mutually_exclusive_group(required=True)
modes.add_argument('--list',
action='store_true',
help='list all variables')
modes.add_argument('--host', help='list variables for a single host')
parser.add_argument('--pretty',
action='store_true',
help='pretty-print output JSON')
parser.add_argument('--nometa',
action='store_true',
help='with --list, exclude hostvars')
default_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', ))
parser.add_argument('--root',
default=default_root,
help='custom root to search for `.tfstate`s in')
args = parser.parse_args()
hosts = iterhosts(iterresources(tfstates(args.root)))
if args.list:
output = query_list(hosts)
if args.nometa:
del output['_meta']
else:
output = query_host(hosts, args.host)
print(json.dumps(output, indent=4 if args.pretty else None))
parser.exit()
if __name__ == '__main__':
main()
|
{
"content_hash": "98e97c2d4b61150c8b83c0f90e34d4dc",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 78,
"avg_line_length": 31.68617021276596,
"alnum_prop": 0.5683229813664596,
"repo_name": "cmgc/microservices-infrastructure",
"id": "ad6c8ac44a4d220921eacb468fb1932e5e1d06a8",
"size": "11936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/inventory/terraform.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "82749"
},
{
"name": "Ruby",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "17519"
}
],
"symlink_target": ""
}
|
"""Command line tool to run the Google API library generator.
Usage:
$ PYTHONPATH=$(/bin/pwd)/src \
$(/bin/pwd)/src/googleapis/codegen/generate_library.py \
--api_name=plus --api_version=v1 --output_dir=plus_lib
"""
__author__ = 'aiuto@google.com (Tony Aiuto)'
import collections
import json
import logging
import os
import httplib2
from google.apputils import app
import gflags as flags
from google.apputils import resources
from googleapis.codegen import generator_lookup
from googleapis.codegen.filesys import package_writer_foundry
from googleapis.codegen.targets import Targets
FLAGS = flags.FLAGS
flags.DEFINE_string(
'api_name',
None,
'API to generate for. E.g. "plus".')
flags.DEFINE_string(
'api_version',
None,
'version of "api_name" to generate for. E.g. "v1".')
flags.DEFINE_string(
'discovery_server',
'www.googleapis.com',
'The discovery server to use for loading "api_name"')
flags.DEFINE_string(
'discovery_version',
'v1',
'The discovery version to use for loading "api_name"')
flags.DEFINE_boolean(
'include_timestamp',
False,
'Adds a timestamp to the generated source files.')
flags.DEFINE_string(
'input',
None,
'A discovery document captured from a discovery service.')
flags.DEFINE_enum(
'language',
'java',
generator_lookup.SupportedLanguages(),
'Target language for the generated library')
flags.DEFINE_string(
'language_variant',
'default',
'which variant of "language" to generate for. E.g. "stable" vs. "head".')
flags.DEFINE_string(
'output_dir',
None,
'A path to a directory where the generated files will be created.')
flags.DEFINE_string(
'output_file',
None,
'An output file path to contain the archive for the generated library.'
' The contents of the file are determined by the output_format parameter')
flags.DEFINE_enum(
'output_format',
'zip',
['zip', 'tgz', 'tar'],
'What format to use for --output_file.')
flags.DEFINE_enum(
'output_type',
'plain',
['plain', 'full'],
'What kind of output to make.'
' plain=just the source,'
' full=turn on all the optional parts (useful for testing the generator).'
)
flags.DEFINE_string(
'package_path',
None,
'Use an alternate path for the generated code. This must be a file path'
' using "/" as a separator, not "."'
)
flags.DEFINE_bool('version_package', False, 'Put API version in package paths')
flags.DEFINE_bool('verbose', False, 'Enable verbose logging')
flags.DECLARE_key_flag('api_name')
flags.DECLARE_key_flag('api_version')
flags.DECLARE_key_flag('include_timestamp')
flags.DECLARE_key_flag('input')
flags.DECLARE_key_flag('language')
flags.DECLARE_key_flag('language_variant')
flags.DECLARE_key_flag('output_dir')
flags.DECLARE_key_flag('output_file')
flags.DECLARE_key_flag('output_format')
flags.DECLARE_key_flag('output_type')
flags.DECLARE_key_flag('package_path')
flags.DECLARE_key_flag('version_package')
def main(unused_argv):
if not (FLAGS.api_name or FLAGS.input):
raise app.UsageError('You must specify one of --api_name or --input')
if not (FLAGS.output_dir or FLAGS.output_file):
raise app.UsageError(
'You must specify one of --output_dir or --output_file')
if FLAGS.output_dir and FLAGS.output_file:
raise app.UsageError(
'You can only specify one of --output_dir or --output_file')
if FLAGS.verbose:
logging.basicConfig(level=logging.DEBUG)
# Get the discovery document
if FLAGS.api_name:
if not FLAGS.api_version:
raise app.UsageError('You must specify --api_version with --api_name')
content = GetApiDiscovery(FLAGS.api_name, FLAGS.api_version)
else:
f = open(FLAGS.input)
content = f.read()
f.close()
discovery_doc = json.loads(content, object_pairs_hook=collections.OrderedDict)
package_writer = package_writer_foundry.GetPackageWriter(
output_dir=FLAGS.output_dir, output_file=FLAGS.output_file,
output_format=FLAGS.output_format)
Generate(discovery_doc=discovery_doc,
package_writer=package_writer,
include_timestamp=FLAGS.include_timestamp,
version_package=FLAGS.version_package,
package_path=FLAGS.package_path,
output_type=FLAGS.output_type,
language=FLAGS.language,
language_variant=FLAGS.language_variant)
return 0
def Generate(discovery_doc, package_writer,
include_timestamp=False,
version_package=False,
package_path=None,
output_type='plain',
language='java',
language_variant='default',
callback=None):
"""Generate a library package from discovery and options."""
options = {
# Include other files needed to compile (e.g. base jar files)
'include_dependencies': False,
# Include the timestamp in the generated library
'include_timestamp': include_timestamp,
# Put API version in the package
'version_package': version_package,
# Custom package name
'package_path': package_path,
}
if output_type == 'full':
options['include_dependencies'] = True
# determine language version from language variant.
language_variations = Targets().VariationsForLanguage(language)
if not language_variations:
raise app.UsageError('Language %s missing from '
'apiserving/libgen/gen/targets.json' %
language)
features = language_variations.GetFeatures(language_variant)
if not features:
raise app.UsageError('Unsupported language variant: '
'%s/%s/features.json is missing' %
language, language_variant)
try:
generator_class = generator_lookup.GetGeneratorByLanguage(
features.get('generator', language))
except ValueError:
raise app.UsageError('Unsupported language: %s' % language)
generator = generator_class(discovery_doc, options=options)
generator.SetTemplateDir(features.template_dir)
generator.SetFeatures(features)
generator.GeneratePackage(package_writer)
package_writer.DoneWritingArchive()
if callback:
callback(discovery_doc=discovery_doc,
package_writer=package_writer,
include_timestamp=include_timestamp,
version_package=version_package,
package_path=package_path,
output_type=output_type,
language=language,
language_variant=language_variant)
def GetApiDiscovery(api_name, api_version):
"""Get a discovery doc from the discovery server."""
api_path = 'apis/%s/%s/rest' % (api_name, api_version)
discovery_url = 'https://%s/discovery/%s/%s' % (
FLAGS.discovery_server, FLAGS.discovery_version, api_path)
http = httplib2.Http()
_, content = http.request(discovery_url)
discovery_doc = json.loads(content)
error = discovery_doc.get('error')
if error:
raise app.Error(error)
return content
if __name__ == '__main__':
app.run()
|
{
"content_hash": "a68f5e33c2fb7c8598b427641209bd8c",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 80,
"avg_line_length": 32.53917050691244,
"alnum_prop": 0.6765330689704008,
"repo_name": "Byclosure/google-apis-client-generator",
"id": "72e4adeecd0c3966bcfcc74e99e827a098ac206b",
"size": "7689",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/googleapis/codegen/generate_library.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "547044"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class URLDecode(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the URLDecode Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(URLDecode, self).__init__(temboo_session, '/Library/Utilities/Encoding/URLDecode')
def new_input_set(self):
return URLDecodeInputSet()
def _make_result_set(self, result, path):
return URLDecodeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return URLDecodeChoreographyExecution(session, exec_id, path)
class URLDecodeInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the URLDecode
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Text(self, value):
"""
Set the value of the Text input for this Choreo. ((required, string) The text that should be URL decoded.)
"""
super(URLDecodeInputSet, self)._set_input('Text', value)
class URLDecodeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the URLDecode Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_URLDecodedText(self):
"""
Retrieve the value for the "URLDecodedText" output from this Choreo execution. ((string) The URL decoded text.)
"""
return self._output.get('URLDecodedText', None)
class URLDecodeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return URLDecodeResultSet(response, path)
|
{
"content_hash": "0182124105c41933b731e2fdd3a18f13",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 119,
"avg_line_length": 35.714285714285715,
"alnum_prop": 0.704,
"repo_name": "jordanemedlock/psychtruths",
"id": "df6c27d0286e024ecd425781b2c466b1672edf13",
"size": "2867",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/Library/Utilities/Encoding/URLDecode.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
"""Abortable Tasks.
Abortable tasks overview
=========================
For long-running :class:`Task`'s, it can be desirable to support
aborting during execution. Of course, these tasks should be built to
support abortion specifically.
The :class:`AbortableTask` serves as a base class for all :class:`Task`
objects that should support abortion by producers.
* Producers may invoke the :meth:`abort` method on
:class:`AbortableAsyncResult` instances, to request abortion.
* Consumers (workers) should periodically check (and honor!) the
:meth:`is_aborted` method at controlled points in their task's
:meth:`run` method. The more often, the better.
The necessary intermediate communication is dealt with by the
:class:`AbortableTask` implementation.
Usage example
-------------
In the consumer:
.. code-block:: python
from __future__ import absolute_import
from celery.contrib.abortable import AbortableTask
from celery.utils.log import get_task_logger
from proj.celery import app
logger = get_logger(__name__)
@app.task(bind=True, base=AbortableTask)
def long_running_task(self):
results = []
for i in range(100):
# check after every 5 iterations...
# (or alternatively, check when some timer is due)
if not i % 5:
if self.is_aborted():
# respect aborted state, and terminate gracefully.
logger.warning('Task aborted')
return
value = do_something_expensive(i)
results.append(y)
logger.info('Task complete')
return results
In the producer:
.. code-block:: python
from __future__ import absolute_import
import time
from proj.tasks import MyLongRunningTask
def myview(request):
# result is of type AbortableAsyncResult
result = long_running_task.delay()
# abort the task after 10 seconds
time.sleep(10)
result.abort()
After the `result.abort()` call, the task execution isn't
aborted immediately. In fact, it's not guaranteed to abort at all.
Keep checking `result.state` status, or call `result.get(timeout=)` to
have it block until the task is finished.
.. note::
In order to abort tasks, there needs to be communication between the
producer and the consumer. This is currently implemented through the
database backend. Therefore, this class will only work with the
database backends.
"""
from __future__ import absolute_import, unicode_literals
from celery import Task
from celery.result import AsyncResult
__all__ = ('AbortableAsyncResult', 'AbortableTask')
"""
Task States
-----------
.. state:: ABORTED
ABORTED
~~~~~~~
Task is aborted (typically by the producer) and should be
aborted as soon as possible.
"""
ABORTED = 'ABORTED'
class AbortableAsyncResult(AsyncResult):
"""Represents an abortable result.
Specifically, this gives the `AsyncResult` a :meth:`abort()` method,
that sets the state of the underlying Task to `'ABORTED'`.
"""
def is_aborted(self):
"""Return :const:`True` if the task is (being) aborted."""
return self.state == ABORTED
def abort(self):
"""Set the state of the task to :const:`ABORTED`.
Abortable tasks monitor their state at regular intervals and
terminate execution if so.
Warning:
Be aware that invoking this method does not guarantee when the
task will be aborted (or even if the task will be aborted at all).
"""
# TODO: store_result requires all four arguments to be set,
# but only state should be updated here
return self.backend.store_result(self.id, result=None,
state=ABORTED, traceback=None)
class AbortableTask(Task):
"""Task that can be aborted.
This serves as a base class for all :class:`Task`'s
that support aborting during execution.
All subclasses of :class:`AbortableTask` must call the
:meth:`is_aborted` method periodically and act accordingly when
the call evaluates to :const:`True`.
"""
abstract = True
def AsyncResult(self, task_id):
"""Return the accompanying AbortableAsyncResult instance."""
return AbortableAsyncResult(task_id, backend=self.backend)
def is_aborted(self, **kwargs):
"""Return true if task is aborted.
Checks against the backend whether this
:class:`AbortableAsyncResult` is :const:`ABORTED`.
Always return :const:`False` in case the `task_id` parameter
refers to a regular (non-abortable) :class:`Task`.
Be aware that invoking this method will cause a hit in the
backend (for example a database query), so find a good balance
between calling it regularly (for responsiveness), but not too
often (for performance).
"""
task_id = kwargs.get('task_id', self.request.id)
result = self.AsyncResult(task_id)
if not isinstance(result, AbortableAsyncResult):
return False
return result.is_aborted()
|
{
"content_hash": "3af5e5437baaf92a7d3c2466a03d9b92",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 78,
"avg_line_length": 30.11111111111111,
"alnum_prop": 0.6599339677607302,
"repo_name": "mdworks2016/work_development",
"id": "36cce30dd69a5e752a98310c3b665f9ce4ee5799",
"size": "5173",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Python/20_Third_Certification/venv/lib/python3.7/site-packages/celery/contrib/abortable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "142"
},
{
"name": "Kotlin",
"bytes": "68744"
},
{
"name": "Python",
"bytes": "1080"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, patterns, url
from rest_framework.routers import SimpleRouter
import mkt.feed.views as views
from mkt.api.base import SubRouterWithFormat
from mkt.api.v1.urls import urlpatterns as v1_urls
from mkt.api.views import endpoint_removed
from mkt.search.views import RocketbarViewV2
feed = SimpleRouter()
feed.register(r'apps', views.FeedAppViewSet, base_name='feedapps')
feed.register(r'brands', views.FeedBrandViewSet, base_name='feedbrands')
feed.register(r'collections', views.FeedCollectionViewSet,
base_name='feedcollections')
feed.register(r'shelves', views.FeedShelfViewSet, base_name='feedshelves')
feed.register(r'items', views.FeedItemViewSet, base_name='feeditems')
subfeedapp = SubRouterWithFormat()
subfeedapp.register('image', views.FeedAppImageViewSet,
base_name='feed-app-image')
subfeedcollection = SubRouterWithFormat()
subfeedcollection.register('image', views.FeedCollectionImageViewSet,
base_name='feed-collection-image')
subfeedshelf = SubRouterWithFormat()
subfeedshelf.register('image', views.FeedShelfImageViewSet,
base_name='feed-shelf-image')
urlpatterns = patterns('',
url(r'^apps/search/rocketbar/', RocketbarViewV2.as_view(),
name='rocketbar-search-api'),
url(r'^rocketfuel/collections/.*', endpoint_removed),
url(r'^feed/builder/$', views.FeedBuilderView.as_view(),
name='feed.builder'),
url(r'^feed/elements/search/$', views.FeedElementSearchView.as_view(),
name='feed.element-search'),
url(r'^feed/get/', views.FeedView.as_view(), name='feed.get'),
url(r'^feed/', include(feed.urls)),
url(r'^feed/apps/', include(subfeedapp.urls)),
url(r'^feed/collections/', include(subfeedcollection.urls)),
url(r'^feed/shelves/', include(subfeedshelf.urls)),
url(r'^feed/shelves/(?P<pk>[^/.]+)/publish/$',
views.FeedShelfPublishView.as_view(),
name='feed-shelf-publish'),
url(r'^consumer/feed/(?P<item_type>[\w]+)/(?P<slug>[^/.]+)/$',
views.FeedElementGetView.as_view(), name='feed.feed_element_get'),
# Remove fireplace version once fireplace has been updated to use
# consumer/feed/ with ?app_serializer=fireplace.
url(r'^fireplace/feed/(?P<item_type>[\w]+)/(?P<slug>[^/.]+)/$',
views.FeedElementGetView.as_view(), name='feed.fire_feed_element_get'),
url(r'^transonic/feed/(?P<item_type>[\w]+)/$',
views.FeedElementListView.as_view(), name='feed.feed_element_list'),
) + v1_urls
|
{
"content_hash": "9fd49d67f8d7b2591f432a43d937bd81",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 45.375,
"alnum_prop": 0.6938213301849665,
"repo_name": "andymckay/zamboni",
"id": "822eb818fd68bbc9bc0fbb7a1807a5b5637ec182",
"size": "2541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/api/v2/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357533"
},
{
"name": "JavaScript",
"bytes": "524153"
},
{
"name": "Python",
"bytes": "3863676"
},
{
"name": "Shell",
"bytes": "14980"
}
],
"symlink_target": ""
}
|
from film_crawler.base import logger, Helper, Config
from film_crawler.crawler import DoubanCrawler, FilmDownloadInfoCrawlerFactory
from film_crawler.data import PersistenceFilmQueryInfo, StatFilmQueryInfo
def film_query_info_wrapper():
logger.info('豆瓣用户[ID:{}][{}]的电影'.format(Config.DOUBAN_USER_ID, Config.DOUBAN_FILM_QUERY_INFO_TYPE_READABLE))
films = []
if Config.IS_LOAD_FILM_QUERY_INFO_FROM_FILE:
logger.info('> 开始读取本地文件...')
films = PersistenceFilmQueryInfo.load_from_file(Config.DOUBAN_FILM_QUERY_INFO_FILENAME)
logger.info('< 读取结束,共[{}]部.'.format(len(films)))
else:
logger.info('> 开始抓取...')
douban_crawler = DoubanCrawler()
films = douban_crawler.crawl(Config.DOUBAN_USER_ID, Config.DOUBAN_FILM_QUERY_INFO_TYPE,
Config.IS_FETCH_FILM_QUERY_DETAIL_INFO)
if Config.IS_SAVE_FILM_QUERY_INFO_TO_FILE:
PersistenceFilmQueryInfo.save_to_file(films, Config.DOUBAN_FILM_QUERY_INFO_FILENAME)
logger.info('< 抓取结束,共[{}]部.'.format(len(films)))
if Config.IS_STAT_FILM_QUERY_INFO:
StatFilmQueryInfo.stat_type_list(films, '类型词云图.png')
StatFilmQueryInfo.stat_cast_list(films, '演员词云图.png')
StatFilmQueryInfo.stat_director_list(films, '导演词云图.png')
StatFilmQueryInfo.stat_writer_list(films, '编剧词云图.png')
StatFilmQueryInfo.stat_country_list(films, '发行国家词云图.png')
return films
def film_download_info_wrapper(films):
download_info_crawlers = [
# FilmDownloadInfoCrawlerFactory('dy2018'),
FilmDownloadInfoCrawlerFactory('dytt8'),
FilmDownloadInfoCrawlerFactory('btrenren')
]
for film in films:
logger.info('> 开始抓取电影名[{}]的下载页面({} {})...'.format(film.name, film.detail_info.douban_rate,
film.detail_info.douban_rate_people))
for c in download_info_crawlers:
film.download_info_list = c.crawl(film.name)
for download_info in film.download_info_list:
logger.info(' {} - {}'.format(download_info.title, download_info.url))
Helper.sleep_uniform(Config.CRAWL_INTERVAL_MIN_SEC, Config.CRAWL_INTERVAL_MAX_SEC)
if __name__ == '__main__':
films= film_query_info_wrapper()
if Config.IS_FETCH_FILM_DOWNLOAD_INFO:
film_download_info_wrapper(films)
logger.info('bye...')
|
{
"content_hash": "07d696628524fefaf1ef96e184f4a2c0",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 112,
"avg_line_length": 46.36538461538461,
"alnum_prop": 0.6507673164661966,
"repo_name": "q191201771/film_crawler",
"id": "3ca4faba0ceef51abd2648077b99034a68734272",
"size": "2650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "film_crawler/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27881"
}
],
"symlink_target": ""
}
|
'''
Contains a LinkedList class for constructing linked lists out of Node objects.
Partly adapted from:
http://en.literateprograms.org/Singly_linked_list_%28Python%29
'''
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
self.number_of_created_nodes = 0
self.number_of_deleted_nodes = 0
def insert(self, val):
''' Insert a new node with given data at the head of the list. '''
# The specifications explicitly said "at the head of the list",
# so this function will have no way to insert at arbitrary indices.
new_node = Node()
new_node.data = val
new_node.next_node = self.head
if self.head is None:
self.tail = new_node
self.head = new_node
self.number_of_created_nodes += 1
def pop(self):
''' Remove the value off the head of the list and return it. '''
value_to_return = self.head.data
# This is simply moving the next_node redirect forward.
# Deletes nothing but the reference.
self.head = self.head.next_node
# Note that "None" is the redirect value for the very first Node.
# The list will self-maintain its start this way.
# A more thorough implementation would include a dummy node
# with special functions to make every LinkedList function bounce off
# the floor instead of sending errors due to the use of None.
# If the node being removed is the final node,
# remember to set self.tail to None.
# This behavior will default to making the tail equal to
# the head if the snake is decapitated. This will more readily
# reveal any possible future errors relating to removing
# the head when you're not supposed to.
if self.head is None:
self.tail = None
self.number_of_deleted_nodes += 1
return value_to_return
def size(self):
''' Return length of the list. '''
return (self.number_of_created_nodes - self.number_of_deleted_nodes)
def search(self, val):
''' Traverse the list and return the node containing
the supplied value if present; otherwise, return None. '''
if self.head is None:
return None
else:
return self.head.search_self_or_next_node_for_a_value(val)
def remove(self, node):
''' Remove a given Node from the list, where ever it might be. '''
# This operation is unusually complex due to the number of
# things that need to be kept track of. The head and the tail are both
# special cases, each with their own re-tagging considerations.
# If the list has no nodes, it's still possible for a node
# reference to continue existing.
# To preempt this, return None if the list is headless.
if self.head is None:
return None
else:
# The head always has None for a previous node.
node_before_the_node_to_remove, node_to_remove = \
self.head.search_self_or_next_node_for_identity_match(None, node)
# If the node being removed is neither the head nor the tail,
# it needs to bridge the gap in order to be
# "removed" from the list.
if (node_to_remove != self.tail) and (node_to_remove != self.head):
node_after_the_node_to_remove = node_to_remove.next_node
# This bridges the list, effectively
# removing the node_to_remove.
node_before_the_node_to_remove.next_node = \
node_after_the_node_to_remove
if node_to_remove == self.tail:
# This sort of operation always feels wrong to type
# since it isn't removing anything but the reference,
# but that's how this LinkedList works.
# It deletes nothing, only swapping references.
if node_before_the_node_to_remove is not None:
node_before_the_node_to_remove.next_node = None
self.tail = node_before_the_node_to_remove
if node_to_remove == self.head:
self.head = node_to_remove.next_node
self.number_of_deleted_nodes += 1
def __str__(self):
# pass
# def print_list(self):
''' Print the entirety of the list
represented as a Python tuple literal. '''
if self.head is None:
string_to_return = "()"
else:
node_to_check = self.head
incrementor_for_node_printing = 0
string_to_return = "("
while True:
# If there's only one thing in the list, it doesn't need
# the leading comma.
# Or if the thing we're about to add is the first thing.
if incrementor_for_node_printing != 0:
string_to_return += ", "
if isinstance(node_to_check.data, str):
# If it's a string, make it pretend to be a string. o_o
string_to_return += "'" + str(node_to_check.data) + "'"
else:
string_to_return += str(node_to_check.data)
incrementor_for_node_printing += 1
# Terminate assembling the string at the tail of the list.
if node_to_check != self.tail:
node_to_check = node_to_check.next_node
else:
break
string_to_return += ")"
return string_to_return
class Node:
def __init__(self):
self.data = None
self.next_node = None
def search_self_or_next_node_for_a_value(self, value):
# This method is the recursive part of LinkedList.search()
if self.data == value:
return self
elif self.next_node is None:
return None
else:
return self.next_node.search_self_or_next_node_for_a_value(value)
def search_self_or_next_node_for_identity_match(self, previous_node,
supplied_node):
# This method is the recursive part of LinkedList.remove()
if self == supplied_node:
return previous_node, self
# Leaving this in because it's possible a node reference will remain
# even after it has had its ties deleted.
elif self.next_node is None:
return previous_node, None
else:
# This should return the previous_node identity
# for the last call.
# It drags up the returned elements from the very bottom.
return self.next_node \
.search_self_or_next_node_for_identity_match(
self, supplied_node)
|
{
"content_hash": "d2dbbe066443bf8d972c155db5150d5f",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 81,
"avg_line_length": 30.130434782608695,
"alnum_prop": 0.5721500721500722,
"repo_name": "BFriedland/data-structures",
"id": "1f444181be51badae1b03a8536d48a3117d8a9f7",
"size": "6930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linked_list/linked_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "321102"
}
],
"symlink_target": ""
}
|
"""Contains the base Layer class, from which all layers inherit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import itertools
import threading
import warnings
import weakref
import numpy as np
import six
from six.moves import zip # pylint: disable=redefined-builtin
from google.protobuf import json_format
from tensorflow.core.framework import node_def_pb2
from tensorflow.python import tf2
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.engine import keras_tensor
from tensorflow.python.keras.engine import node as node_module
from tensorflow.python.keras.mixed_precision import autocast_variable
from tensorflow.python.keras.mixed_precision import loss_scale_optimizer
from tensorflow.python.keras.mixed_precision import policy
from tensorflow.python.keras.saving.saved_model import layer_serialization
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import object_identity
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils import version_utils
# A module that only depends on `keras.layers` import these from here.
from tensorflow.python.keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import
from tensorflow.python.keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import get_canonical_name_for_symbol
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
# pylint: disable=g-inconsistent-quotes
metrics_mod = generic_utils.LazyLoader(
"metrics_mod", globals(),
"tensorflow.python.keras.metrics")
# pylint: enable=g-inconsistent-quotes
# Prefix that is added to the TF op layer names.
_TF_OP_LAYER_NAME_PREFIX = 'tf_op_layer_'
# TODO(mdan): Should we have a single generic type for types that can be passed
# to tf.cast?
_AUTOCAST_TYPES = (ops.Tensor, sparse_tensor.SparseTensor,
ragged_tensor.RaggedTensor)
keras_layers_gauge = monitoring.BoolGauge('/tensorflow/api/keras/layers',
'keras layers usage', 'method')
keras_models_gauge = monitoring.BoolGauge(
'/tensorflow/api/keras/models', 'keras model usage', 'method')
keras_api_gauge = monitoring.BoolGauge('/tensorflow/api/keras',
'keras api usage', 'method')
keras_premade_model_gauge = monitoring.BoolGauge(
'/tensorflow/api/keras/premade_models', 'premade keras model usage', 'type')
@keras_export('keras.layers.Layer')
class Layer(module.Module, version_utils.LayerVersionSelector):
"""This is the class from which all layers inherit.
A layer is a callable object that takes as input one or more tensors and
that outputs one or more tensors. It involves *computation*, defined
in the `call()` method, and a *state* (weight variables), defined
either in the constructor `__init__()` or in the `build()` method.
Users will just instantiate a layer and then treat it as a callable.
Args:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: The dtype of the layer's computations and weights. Can also be a
`tf.keras.mixed_precision.Policy`, which allows the computation and weight
dtype to differ. Default of `None` means to use
`tf.keras.mixed_precision.global_policy()`, which is a float32 policy
unless set to different value.
dynamic: Set this to `True` if your layer should only be run eagerly, and
should not be used to generate a static computation graph.
This would be the case for a Tree-RNN or a recursive network,
for example, or generally for any layer that manipulates tensors
using Python control flow. If `False`, we assume that the layer can
safely be used to generate a static computation graph.
Attributes:
name: The name of the layer (string).
dtype: The dtype of the layer's weights.
variable_dtype: Alias of `dtype`.
compute_dtype: The dtype of the layer's computations. Layers automatically
cast inputs to this dtype which causes the computations and output to also
be in this dtype. When mixed precision is used with a
`tf.keras.mixed_precision.Policy`, this will be different than
`variable_dtype`.
dtype_policy: The layer's dtype policy. See the
`tf.keras.mixed_precision.Policy` documentation for details.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
trainable: Whether the layer should be trained (boolean), i.e. whether
its potentially-trainable weights should be returned as part of
`layer.trainable_weights`.
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
We recommend that descendants of `Layer` implement the following methods:
* `__init__()`: Defines custom layer attributes, and creates layer state
variables that do not depend on input shapes, using `add_weight()`.
* `build(self, input_shape)`: This method can be used to create weights that
depend on the shape(s) of the input(s), using `add_weight()`. `__call__()`
will automatically build the layer (if it has not been built yet) by
calling `build()`.
* `call(self, inputs, *args, **kwargs)`: Called in `__call__` after making
sure `build()` has been called. `call()` performs the logic of applying the
layer to the input tensors (which should be passed in as argument).
Two reserved keyword arguments you can optionally use in `call()` are:
- `training` (boolean, whether the call is in inference mode or training
mode). See more details in [the layer/model subclassing guide](
https://www.tensorflow.org/guide/keras/custom_layers_and_models#privileged_training_argument_in_the_call_method)
- `mask` (boolean tensor encoding masked timesteps in the input, used
in RNN layers). See more details in [the layer/model subclassing guide](
https://www.tensorflow.org/guide/keras/custom_layers_and_models#privileged_mask_argument_in_the_call_method)
A typical signature for this method is `call(self, inputs)`, and user could
optionally add `training` and `mask` if the layer need them. `*args` and
`**kwargs` is only useful for future extension when more input parameters
are planned to be added.
* `get_config(self)`: Returns a dictionary containing the configuration used
to initialize this layer. If the keys differ from the arguments
in `__init__`, then override `from_config(self)` as well.
This method is used when saving
the layer or a model that contains this layer.
Examples:
Here's a basic example: a layer with two variables, `w` and `b`,
that returns `y = w . x + b`.
It shows how to implement `build()` and `call()`.
Variables set as attributes of a layer are tracked as weights
of the layers (in `layer.weights`).
```python
class SimpleDense(Layer):
def __init__(self, units=32):
super(SimpleDense, self).__init__()
self.units = units
def build(self, input_shape): # Create the state of the layer (weights)
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_shape[-1], self.units),
dtype='float32'),
trainable=True)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(self.units,), dtype='float32'),
trainable=True)
def call(self, inputs): # Defines the computation from inputs to outputs
return tf.matmul(inputs, self.w) + self.b
# Instantiates the layer.
linear_layer = SimpleDense(4)
# This will also call `build(input_shape)` and create the weights.
y = linear_layer(tf.ones((2, 2)))
assert len(linear_layer.weights) == 2
# These weights are trainable, so they're listed in `trainable_weights`:
assert len(linear_layer.trainable_weights) == 2
```
Note that the method `add_weight()` offers a shortcut to create weights:
```python
class SimpleDense(Layer):
def __init__(self, units=32):
super(SimpleDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
```
Besides trainable weights, updated via backpropagation during training,
layers can also have non-trainable weights. These weights are meant to
be updated manually during `call()`. Here's a example layer that computes
the running sum of its inputs:
```python
class ComputeSum(Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
# Create a non-trainable weight.
self.total = tf.Variable(initial_value=tf.zeros((input_dim,)),
trainable=False)
def call(self, inputs):
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
return self.total
my_sum = ComputeSum(2)
x = tf.ones((2, 2))
y = my_sum(x)
print(y.numpy()) # [2. 2.]
y = my_sum(x)
print(y.numpy()) # [4. 4.]
assert my_sum.weights == [my_sum.total]
assert my_sum.non_trainable_weights == [my_sum.total]
assert my_sum.trainable_weights == []
```
For more information about creating layers, see the guide
[Making new Layers and Models via subclassing](
https://www.tensorflow.org/guide/keras/custom_layers_and_models)
"""
# See tf.Module for the usage of this property.
# The key for _obj_reference_counts_dict is a Trackable, which could be a
# variable or layer etc. tf.Module._flatten will fail to flatten the key
# since it is trying to convert Trackable to a string. This attribute can be
# ignored even after the fix of nest lib, since the trackable object should
# already been available as individual attributes. _obj_reference_counts_dict
# just contains a copy of them.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_obj_reference_counts_dict',),
module.Module._TF_MODULE_IGNORED_PROPERTIES
))
# When loading from a SavedModel, Layers typically can be revived into a
# generic Layer wrapper. Sometimes, however, layers may implement methods
# that go beyond this wrapper, as in the case of PreprocessingLayers'
# `adapt` method. When this is the case, layer implementers can override
# must_restore_from_config to return True; layers with this property must
# be restored into their actual objects (and will fail if the object is
# not available to the restoration code).
_must_restore_from_config = False
def _get_cell_name(self):
canonical_name = get_canonical_name_for_symbol(
self.__class__, api_name='keras', add_prefix_to_v1_names=True)
if canonical_name is not None:
return 'tf.{}'.format(canonical_name)
return self.__class__.__module__ + '.' + self.__class__.__name__
def _instrument_layer_creation(self):
self._instrumented_keras_api = False
self._instrumented_keras_layer_class = False
self._instrumented_keras_model_class = False
if not getattr(self, '_disable_keras_instrumentation', False):
keras_api_gauge.get_cell('layer').set(True)
self._instrumented_keras_api = True
if getattr(self, '_is_model_for_instrumentation', False):
keras_models_gauge.get_cell(self._get_cell_name()).set(True)
self._instrumented_keras_model_class = True
else:
keras_layers_gauge.get_cell(self._get_cell_name()).set(True)
self._instrumented_keras_layer_class = True
@trackable.no_automatic_dependency_tracking
def __init__(self,
trainable=True,
name=None,
dtype=None,
dynamic=False,
**kwargs):
self._instrument_layer_creation()
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
# are only applicable to input layers: do not pass these keywords
# to non-input layers.
allowed_kwargs = {
'input_dim',
'input_shape',
'batch_input_shape',
'batch_size',
'weights',
'activity_regularizer',
'autocast',
'implementation',
}
# Validate optional keyword arguments.
generic_utils.validate_kwargs(kwargs, allowed_kwargs)
# Mutable properties
# Indicates whether the layer's weights are updated during training
# and whether the layer's updates are run during training.
self._trainable = trainable
# A stateful layer is a layer whose updates are run during inference too,
# for instance stateful RNNs.
self._stateful = False
# Indicates whether `build` needs to be called upon layer call, to create
# the layer's weights.
self.built = False
# Provides information about which inputs are compatible with the layer.
self._input_spec = None
# SavedModel-related attributes.
# Record the build input shape for loading purposes.
# TODO(kathywu): Move this to Layer._set_save_spec once cl/290121460 is
# submitted.
self._build_input_shape = None
self._saved_model_inputs_spec = None
# `Layer.compute_mask` will be called at the end of `Layer.__call__` if
# `Layer.compute_mask` is overridden, or if the `Layer` subclass sets
# `self.supports_masking=True`.
self._supports_masking = not generic_utils.is_default(self.compute_mask)
self._init_set_name(name)
self._activity_regularizer = regularizers.get(
kwargs.pop('activity_regularizer', None))
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
self._updates = []
# Object to store all thread local layer properties.
self._thread_local = threading.local()
# A list of zero-argument lambdas which return Tensors, used for variable
# regularizers.
self._callable_losses = []
# A list of symbolic Tensors containing activity regularizers and losses
# manually added through `add_loss` in graph-building mode.
self._losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# Ensures the same metric is not added multiple times in `MirroredStrategy`.
self._metrics_lock = threading.Lock()
# Both graph and subclassed networks have a dtype policy. For graph
# networks, the policy's compute and variable dtypes are ignored. Such
# networks only use the policy if it is a PolicyV1, in which case it uses
# the PolicyV1's loss_scale (Policy does not have a loss_scale). For
# subclassed networks, the compute and variable dtypes are used as like any
# ordinary layer.
self._set_dtype_policy(dtype)
# Boolean indicating whether the layer automatically casts its inputs to the
# layer's compute_dtype.
self._autocast = kwargs.get('autocast',
base_layer_utils.v2_dtype_behavior_enabled())
# Tracks `TrackableDataStructure`s, `Module`s, and `Layer`s.
# Ordered by when the object was assigned as an attr.
# Entries are unique.
self._maybe_create_attribute('_self_tracked_trackables', [])
# These lists will be filled via successive calls
# to self._add_inbound_node().
# Used in symbolic mode only, only in conjunction with graph-networks
self._inbound_nodes_value = []
self._outbound_nodes_value = []
self._init_call_fn_args()
# Whether the `call` method can be used to build a TF graph without issues.
# This attribute has no effect if the model is created using the Functional
# API. Instead, `model.dynamic` is determined based on the internal layers.
self._dynamic = dynamic
# Manage input shape information if passed.
if 'input_dim' in kwargs and 'input_shape' not in kwargs:
# Backwards compatibility: alias 'input_dim' to 'input_shape'.
kwargs['input_shape'] = (kwargs['input_dim'],)
if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:
# In this case we will later create an input layer
# to insert before the current layer
if 'batch_input_shape' in kwargs:
batch_input_shape = tuple(kwargs['batch_input_shape'])
elif 'input_shape' in kwargs:
if 'batch_size' in kwargs:
batch_size = kwargs['batch_size']
else:
batch_size = None
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
self._batch_input_shape = batch_input_shape
# Manage initial weight values if passed.
self._initial_weights = kwargs.get('weights', None)
# Whether the layer will track any layers that is set as attribute on itself
# as sub-layers, the weights from the sub-layers will be included in the
# parent layer's variables() as well.
# Default to True, which means auto tracking is turned on. Certain subclass
# might want to turn it off, like Sequential model.
self._auto_track_sub_layers = True
# For backwards compat reasons, most built-in layers do not guarantee
# That they will 100% preserve the structure of input args when saving
# / loading configs. E.g. they may un-nest an arg that is
# a list with one element.
self._preserve_input_structure_in_config = False
@trackable.no_automatic_dependency_tracking
@generic_utils.default
def build(self, input_shape):
"""Creates the variables of the layer (optional, for subclass implementers).
This is a method that implementers of subclasses of `Layer` or `Model`
can override if they need a state-creation step in-between
layer instantiation and layer call.
This is typically used to create the weights of `Layer` subclasses.
Args:
input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs
(one instance per input).
"""
# Only record the build input shapes of overridden build methods.
if not hasattr(self.build, '_is_default'):
self._build_input_shape = input_shape
self.built = True
@doc_controls.for_subclass_implementers
def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument
"""This is where the layer's logic lives.
Note here that `call()` method in `tf.keras` is little bit different
from `keras` API. In `keras` API, you can pass support masking for
layers as additional arguments. Whereas `tf.keras` has `compute_mask()`
method to support masking.
Args:
inputs: Input tensor, or list/tuple of input tensors.
*args: Additional positional arguments. Currently unused.
**kwargs: Additional keyword arguments. Currently unused.
Returns:
A tensor or list/tuple of tensors.
"""
return inputs
@doc_controls.for_subclass_implementers
def _add_trackable(self, trackable_object, trainable):
"""Adds a Trackable object to this layer's state.
Args:
trackable_object: The tf.tracking.Trackable object to add.
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases) or
"non_trainable_variables" (e.g. BatchNorm mean and variance).
Returns:
The TrackableWeightHandler used to track this object.
"""
handler = base_layer_utils.TrackableWeightHandler(trackable_object)
if trainable:
self._trainable_weights.append(handler)
else:
self._non_trainable_weights.append(handler)
return handler
@doc_controls.for_subclass_implementers
def add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
use_resource=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
**kwargs):
"""Adds a new variable to the layer.
Args:
name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype`.
initializer: Initializer instance (callable).
regularizer: Regularizer instance (callable).
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean and variance).
Note that `trainable` cannot be `True` if `synchronization`
is set to `ON_READ`.
constraint: Constraint instance (callable).
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
**kwargs: Additional keyword arguments. Accepted values are `getter`,
`collections`, `experimental_autocast` and `caching_device`.
Returns:
The variable created.
Raises:
ValueError: When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as `ON_READ`.
"""
if shape is None:
shape = ()
kwargs.pop('partitioner', None) # Ignored.
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in ['collections', 'experimental_autocast',
'caching_device', 'getter']:
raise TypeError('Unknown keyword argument:', kwarg)
collections_arg = kwargs.pop('collections', None)
# 'experimental_autocast' can be set to False by the caller to indicate an
# AutoCastVariable should never be created.
autocast = kwargs.pop('experimental_autocast', True)
# See the docstring for tf.Variable about the details for caching_device.
caching_device = kwargs.pop('caching_device', None)
if dtype is None:
dtype = self.dtype or backend.floatx()
dtype = dtypes.as_dtype(dtype)
if self._dtype_policy.variable_dtype is None:
# The policy is "_infer", so we infer the policy from the variable dtype.
self._set_dtype_policy(policy.Policy(dtype.base_dtype.name))
initializer = initializers.get(initializer)
regularizer = regularizers.get(regularizer)
constraint = constraints.get(constraint)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
# Initialize variable when no initializer provided
if initializer is None:
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = initializers.get('glorot_uniform')
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = initializers.get('zeros')
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError('An initializer for variable %s of type %s is required'
' for layer %s' % (name, dtype.base_dtype, self.name))
getter = kwargs.pop('getter', base_layer_utils.make_variable)
if (autocast and
self._dtype_policy.compute_dtype != self._dtype_policy.variable_dtype
and dtype.is_floating):
old_getter = getter
# Wrap variable constructor to return an AutoCastVariable.
def getter(*args, **kwargs): # pylint: disable=function-redefined
variable = old_getter(*args, **kwargs)
return autocast_variable.create_autocast_variable(variable)
# Also the caching_device does not work with the mixed precision API,
# disable it if it is specified.
# TODO(b/142020079): Reenable it once the bug is fixed.
if caching_device is not None:
tf_logging.warn('`caching_device` does not work with mixed precision '
'API. Ignoring user specified `caching_device`.')
caching_device = None
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
# TODO(allenl): a `make_variable` equivalent should be added as a
# `Trackable` method.
getter=getter,
# Manage errors in Layer rather than Trackable.
overwrite=True,
initializer=initializer,
dtype=dtype,
constraint=constraint,
trainable=trainable,
use_resource=use_resource,
collections=collections_arg,
synchronization=synchronization,
aggregation=aggregation,
caching_device=caching_device)
if regularizer is not None:
# TODO(fchollet): in the future, this should be handled at the
# level of variable creation, and weight regularization losses
# should be variable attributes.
name_in_scope = variable.name[:variable.name.find(':')]
self._handle_weight_regularization(name_in_scope,
variable,
regularizer)
if base_layer_utils.is_split_variable(variable):
for v in variable:
backend.track_variable(v)
if trainable:
self._trainable_weights.append(v)
else:
self._non_trainable_weights.append(v)
else:
backend.track_variable(variable)
if trainable:
self._trainable_weights.append(variable)
else:
self._non_trainable_weights.append(variable)
return variable
@generic_utils.default
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable)
containing the configuration of a layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
The config of a layer does not include connectivity
information, nor the layer class name. These are handled
by `Network` (one layer of abstraction above).
Note that `get_config()` does not guarantee to return a fresh copy of dict
every time it is called. The callers should make a copy of the returned dict
if they want to modify it.
Returns:
Python dictionary.
"""
all_args = tf_inspect.getfullargspec(self.__init__).args
config = {
'name': self.name,
'trainable': self.trainable,
}
if hasattr(self, '_batch_input_shape'):
config['batch_input_shape'] = self._batch_input_shape
config['dtype'] = policy.serialize(self._dtype_policy)
if hasattr(self, 'dynamic'):
# Only include `dynamic` in the `config` if it is `True`
if self.dynamic:
config['dynamic'] = self.dynamic
elif 'dynamic' in all_args:
all_args.remove('dynamic')
expected_args = config.keys()
# Finds all arguments in the `__init__` that are not in the config:
extra_args = [arg for arg in all_args if arg not in expected_args]
# Check that either the only argument in the `__init__` is `self`,
# or that `get_config` has been overridden:
if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):
raise NotImplementedError('Layer %s has arguments in `__init__` and '
'therefore must override `get_config`.' %
self.__class__.__name__)
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same layer from the config
dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Args:
config: A Python dictionary, typically the
output of get_config.
Returns:
A layer instance.
"""
return cls(**config)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
If the layer has not been built, this method will call `build` on the
layer. This assumes that the layer will later be used with inputs that
match the input shape provided here.
Args:
input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.
Returns:
An input shape tuple.
"""
if context.executing_eagerly():
# In this case we build the model first in order to do shape inference.
# This is acceptable because the framework only calls
# `compute_output_shape` on shape values that the layer would later be
# built for. It would however cause issues in case a user attempts to
# use `compute_output_shape` manually with shapes that are incompatible
# with the shape the Layer will be called on (these users will have to
# implement `compute_output_shape` themselves).
self._maybe_build(input_shape)
with func_graph.FuncGraph(str(self.name) + '_scratch_graph').as_default():
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
inputs = nest.map_structure(_make_placeholder_like, input_shape)
try:
outputs = self(inputs, training=False)
except TypeError as e:
six.raise_from(
NotImplementedError(
'We could not automatically infer the static shape of the '
'layer\'s output. Please implement the '
'`compute_output_shape` method on your layer (%s).' %
self.__class__.__name__), e)
return nest.map_structure(lambda t: t.shape, outputs)
raise NotImplementedError(
'Please run in eager mode or implement the `compute_output_shape` '
'method on your layer (%s).' % self.__class__.__name__)
@doc_controls.for_subclass_implementers
def compute_output_signature(self, input_signature):
"""Compute the output tensor signature of the layer based on the inputs.
Unlike a TensorShape object, a TensorSpec object contains both shape
and dtype information for a tensor. This method allows layers to provide
output dtype information if it is different from the input dtype.
For any layer that doesn't implement this function,
the framework will fall back to use `compute_output_shape`, and will
assume that the output dtype matches the input dtype.
Args:
input_signature: Single TensorSpec or nested structure of TensorSpec
objects, describing a candidate input for the layer.
Returns:
Single TensorSpec or nested structure of TensorSpec objects, describing
how the layer would transform the provided input.
Raises:
TypeError: If input_signature contains a non-TensorSpec object.
"""
def check_type_return_shape(s):
if not isinstance(s, tensor_spec.TensorSpec):
raise TypeError('Only TensorSpec signature types are supported, '
'but saw signature entry: {}.'.format(s))
return s.shape
input_shape = nest.map_structure(check_type_return_shape, input_signature)
output_shape = self.compute_output_shape(input_shape)
dtype = self._compute_dtype
if dtype is None:
input_dtypes = [s.dtype for s in nest.flatten(input_signature)]
# Default behavior when self.dtype is None, is to use the first input's
# dtype.
dtype = input_dtypes[0]
return nest.map_structure(
lambda s: tensor_spec.TensorSpec(dtype=dtype, shape=s),
output_shape)
def _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs):
if self.dynamic:
# We will use static shape inference to return symbolic tensors
# matching the specifications of the layer outputs.
# Since `self.dynamic` is True, we will never attempt to
# run the underlying TF graph (which is disconnected).
# TODO(fchollet): consider py_func as an alternative, which
# would enable us to run the underlying graph if needed.
input_signature = nest.map_structure(
lambda x: tensor_spec.TensorSpec(shape=x.shape, dtype=x.dtype),
inputs)
output_signature = self.compute_output_signature(input_signature)
return nest.map_structure(keras_tensor.KerasTensor, output_signature)
else:
return self._infer_output_signature(inputs, args, kwargs, input_masks)
def _infer_output_signature(self, inputs, args, kwargs, input_masks):
"""TODO(kaftan): Docstring."""
call_fn = self.call
# Wrapping `call` function in autograph to allow for dynamic control
# flow and control dependencies in call. We are limiting this to
# subclassed layers as autograph is strictly needed only for
# subclassed layers and models.
# tf_convert will respect the value of autograph setting in the
# enclosing tf.function, if any.
if (base_layer_utils.is_subclassed(self) and
not base_layer_utils.from_saved_model(self)):
call_fn = autograph.tf_convert(self.call, ag_ctx.control_status_ctx())
# We enter a scratch graph and build placeholder inputs inside of it that
# match the input args.
# We then call the layer inside of the scratch graph to identify the
# output signatures, then we build KerasTensors corresponding to those
# outputs.
scratch_graph = func_graph.FuncGraph(str(self.name) + '_scratch_graph')
with scratch_graph.as_default():
inputs = nest.map_structure(
keras_tensor.keras_tensor_to_placeholder, inputs)
args = nest.map_structure(
keras_tensor.keras_tensor_to_placeholder, args)
kwargs = nest.map_structure(
keras_tensor.keras_tensor_to_placeholder, kwargs)
input_masks = nest.map_structure(
keras_tensor.keras_tensor_to_placeholder, input_masks)
with backend.name_scope(self._name_scope()):
with autocast_variable.enable_auto_cast_variables(
self._compute_dtype_object):
# Build layer if applicable (if the `build` method has been
# overridden).
# TODO(kaftan): do we maybe_build here, or have we already done it?
self._maybe_build(inputs)
inputs = self._maybe_cast_inputs(inputs)
outputs = call_fn(inputs, *args, **kwargs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks,
build_graph=False)
outputs = nest.map_structure(
keras_tensor.keras_tensor_from_tensor, outputs)
if hasattr(self, '_set_inputs') and not self.inputs:
# TODO(kaftan): figure out if we need to do this at all
# Subclassed network: explicitly set metadata normally set by
# a call to self._set_inputs().
self._set_inputs(inputs, outputs)
del scratch_graph
return outputs
@generic_utils.default
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor.
Args:
inputs: Tensor or list of tensors.
mask: Tensor or list of tensors.
Returns:
None or a tensor (or list of tensors,
one per output tensor of the layer).
"""
if not self._supports_masking:
if any(m is not None for m in nest.flatten(mask)):
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask.
return None
# if masking is explicitly supported, by default
# carry over the input mask
return mask
def __call__(self, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Args:
*args: Positional arguments to be passed to `self.call`.
**kwargs: Keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
Note:
- The following optional keyword arguments are reserved for specific uses:
* `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
* `mask`: Boolean input mask.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
- If the layer is not built, the method will call `build`.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
RuntimeError: if `super().__init__()` was not called in the constructor.
"""
if not hasattr(self, '_thread_local'):
raise RuntimeError(
'You must call `super().__init__()` in the layer constructor.')
# `inputs` (the first arg in the method spec) is special cased in
# layer call due to historical reasons.
# This special casing currently takes the form of:
# - 'inputs' must be explicitly passed. A layer cannot have zero arguments,
# and inputs cannot have been provided via the default value of a kwarg.
# - numpy/scalar values in `inputs` get converted to tensors
# - implicit masks / mask metadata are only collected from 'inputs`
# - Layers are built using shape info from 'inputs' only
# - input_spec compatibility is only checked against `inputs`
# - mixed precision casting (autocast) is only applied to `inputs`,
# not to any other argument.
# - setting the SavedModel saving spec.
inputs, args, kwargs = self._split_out_first_arg(args, kwargs)
input_list = nest.flatten(inputs)
# Functional Model construction mode is invoked when `Layer`s are called on
# symbolic `KerasTensor`s, i.e.:
# >> inputs = tf.keras.Input(10)
# >> outputs = MyLayer()(inputs) # Functional construction mode.
# >> model = tf.keras.Model(inputs, outputs)
if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
return self._functional_construction_call(inputs, args, kwargs,
input_list)
# Maintains info about the `Layer.call` stack.
call_context = base_layer_utils.call_context()
# Accept NumPy and scalar inputs by converting to Tensors.
if any(isinstance(x, (
np_arrays.ndarray, np.ndarray, float, int)) for x in input_list):
inputs = nest.map_structure(_convert_numpy_or_python_types, inputs)
input_list = nest.flatten(inputs)
# Handle `mask` propagation from previous layer to current layer. Masks can
# be propagated explicitly via the `mask` argument, or implicitly via
# setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed
# explicitly take priority.
input_masks, mask_is_implicit = self._get_input_masks(
inputs, input_list, args, kwargs)
if self._expects_mask_arg and mask_is_implicit:
kwargs['mask'] = input_masks
# Training mode for `Layer.call` is set via (in order of priority):
# (1) The `training` argument passed to this `Layer.call`, if it is not None
# (2) The training mode of an outer `Layer.call`.
# (3) The default mode set by `tf.keras.backend.set_learning_phase` (if set)
# (4) Any non-None default value for `training` specified in the call
# signature
# (5) False (treating the layer as if it's in inference)
args, kwargs, training_mode = self._set_training_mode(
args, kwargs, call_context)
# Losses are cleared for all sublayers on the outermost `Layer.call`.
# Losses are not cleared on inner `Layer.call`s, because sublayers can be
# called multiple times.
if not call_context.in_call:
self._clear_losses()
eager = context.executing_eagerly()
with call_context.enter(
layer=self,
inputs=inputs,
build_graph=not eager,
training=training_mode):
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
if eager:
call_fn = self.call
name_scope = self._name
else:
name_scope = self._name_scope() # Avoid autoincrementing.
call_fn = self._autographed_call()
with ops.name_scope_v2(name_scope):
if not self.built:
self._maybe_build(inputs)
if self._autocast:
inputs = self._maybe_cast_inputs(inputs, input_list)
with autocast_variable.enable_auto_cast_variables(
self._compute_dtype_object):
outputs = call_fn(inputs, *args, **kwargs)
if self._activity_regularizer:
self._handle_activity_regularization(inputs, outputs)
if self._supports_masking:
self._set_mask_metadata(inputs, outputs, input_masks, not eager)
if self._saved_model_inputs_spec is None:
self._set_save_spec(inputs)
return outputs
def _functional_construction_call(self, inputs, args, kwargs, input_list):
call_context = base_layer_utils.call_context()
# Accept NumPy and scalar inputs by converting to Tensors.
if any(isinstance(x, (
np_arrays.ndarray, np.ndarray, float, int)) for x in input_list):
def _convert_non_tensor(x):
# Don't call `ops.convert_to_tensor` on all `inputs` because
# `SparseTensors` can't be converted to `Tensor`.
if isinstance(x, (np_arrays.ndarray, np.ndarray, float, int)):
return ops.convert_to_tensor_v2_with_dispatch(x)
return x
inputs = nest.map_structure(_convert_non_tensor, inputs)
input_list = nest.flatten(inputs)
# Handle `mask` propagation from previous layer to current layer. Masks can
# be propagated explicitly via the `mask` argument, or implicitly via
# setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed
# explicitly take priority.
mask_arg_passed_by_framework = False
input_masks, mask_is_implicit = self._get_input_masks(
inputs, input_list, args, kwargs)
if self._expects_mask_arg and mask_is_implicit:
kwargs['mask'] = input_masks
mask_arg_passed_by_framework = True
# If `training` argument is None or not explicitly passed,
# propagate `training` value from this layer's calling layer.
training_value = None
training_arg_passed_by_framework = False
# Priority 1: `training` was explicitly passed a non-None value.
if self._call_arg_was_passed('training', args, kwargs):
training_value = self._get_call_arg_value('training', args, kwargs)
if not self._expects_training_arg:
kwargs.pop('training')
if training_value is None:
# Priority 2: `training` was passed to a parent layer.
if call_context.training is not None:
training_value = call_context.training
# Priority 3: `learning_phase()` has been set.
elif backend.global_learning_phase_is_set():
training_value = backend.learning_phase()
# Force the training_value to be bool type which matches to the contract
# for layer/model call args.
if tensor_util.is_tf_type(training_value):
training_value = math_ops.cast(training_value, dtypes.bool)
else:
training_value = bool(training_value)
# Priority 4: trace layer with the default training argument specified
# in the `call` signature (or in inference mode if the `call` signature
# specifies no non-None default).
else:
training_value = self._default_training_arg
# In cases (2), (3), (4) the training argument is passed automatically
# by the framework, and will not be hard-coded into the model.
if self._expects_training_arg:
args, kwargs = self._set_call_arg_value('training', training_value,
args, kwargs)
training_arg_passed_by_framework = True
with call_context.enter(
layer=self, inputs=inputs, build_graph=True, training=training_value):
# Check input assumptions set after layer building, e.g. input shape.
outputs = self._keras_tensor_symbolic_call(
inputs, input_masks, args, kwargs)
if outputs is None:
raise ValueError('A layer\'s `call` method should return a '
'Tensor or a list of Tensors, not None '
'(layer: ' + self.name + ').')
if training_arg_passed_by_framework:
args, kwargs = self._set_call_arg_value(
'training', None, args, kwargs, pop_kwarg_if_none=True)
if mask_arg_passed_by_framework:
kwargs.pop('mask')
# Node connectivity does not special-case the first argument.
outputs = self._set_connectivity_metadata((inputs,) + args, kwargs,
outputs)
return outputs
def _set_training_mode(self, args, kwargs, call_context):
training_mode = None
if self._expects_training_arg:
# (1) `training` was passed to this `Layer.call`.
if self._call_arg_was_passed('training', args, kwargs):
training_mode = self._get_call_arg_value('training', args, kwargs)
# If no `training` arg was passed, or `None` was explicitly passed,
# the framework will make a decision about the training mode is.
if training_mode is None:
call_ctx_training = call_context.training
# (2) `training` mode is inferred from an outer `Layer.call`.
if call_ctx_training is not None:
training_mode = call_ctx_training
# (3) User set `tf.keras.backend.set_learning_phase`.
elif backend.global_learning_phase_is_set():
training_mode = backend.learning_phase()
# Ensure value is a `bool` or `tf.bool`.
if isinstance(training_mode, bool):
pass
elif tensor_util.is_tf_type(training_mode):
training_mode = math_ops.cast(training_mode, dtypes.bool)
else:
training_mode = bool(training_mode)
# (4) We default to using `call`'s default value for `training`,
# or treating the layer as if it is in inference if no non-None default
# is specified in the `call` signature.
else:
training_mode = self._default_training_arg
# For case (2), (3), (4) `training` arg is passed by framework.
args, kwargs = self._set_call_arg_value('training', training_mode, args,
kwargs)
else:
if 'training' in kwargs:
# `training` was passed to this `Layer` but is not needed for
# `Layer.call`. It will set the default mode for inner `Layer.call`s.
training_mode = kwargs.pop('training')
else:
# Grab the current `training` mode from any outer `Layer.call`.
training_mode = call_context.training
return args, kwargs, training_mode
def _autographed_call(self):
# Wrapping `call` function in autograph to allow for dynamic control
# flow and control dependencies in call. We are limiting this to
# subclassed layers as autograph is strictly needed only for
# subclassed layers and models.
# tf_convert will respect the value of autograph setting in the
# enclosing tf.function, if any.
if (base_layer_utils.is_subclassed(self) and
not base_layer_utils.from_saved_model(self)):
return autograph.tf_convert(self.call, ag_ctx.control_status_ctx())
else:
return self.call
@property
def dtype(self):
"""The dtype of the layer weights.
This is equivalent to `Layer.dtype_policy.variable_dtype`. Unless
mixed precision is used, this is the same as `Layer.compute_dtype`, the
dtype of the layer's computations.
"""
return self._dtype_policy.variable_dtype
@property
def name(self):
"""Name of the layer (string), set in the constructor."""
return self._name
@property
def supports_masking(self):
"""Whether this layer supports computing a mask using `compute_mask`."""
return self._supports_masking
@supports_masking.setter
def supports_masking(self, value):
self._supports_masking = value
@property
def dynamic(self):
"""Whether the layer is dynamic (eager-only); set in the constructor."""
return any(layer._dynamic for layer in self._flatten_layers())
@property
@doc_controls.do_not_doc_inheritable
def stateful(self):
return any(layer._stateful for layer in self._flatten_layers())
@stateful.setter
def stateful(self, value):
self._stateful = value
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
for layer in self._flatten_layers():
layer._trainable = value
@property
def activity_regularizer(self):
"""Optional regularizer function for the output of this layer."""
return self._activity_regularizer
@activity_regularizer.setter
def activity_regularizer(self, regularizer):
"""Optional regularizer function for the output of this layer."""
self._activity_regularizer = regularizer
@property
def input_spec(self):
"""`InputSpec` instance(s) describing the input format for this layer.
When you create a layer subclass, you can set `self.input_spec` to enable
the layer to run input compatibility checks when it is called.
Consider a `Conv2D` layer: it can only be called on a single input tensor
of rank 4. As such, you can set, in `__init__()`:
```python
self.input_spec = tf.keras.layers.InputSpec(ndim=4)
```
Now, if you try to call the layer on an input that isn't rank 4
(for instance, an input of shape `(2,)`, it will raise a nicely-formatted
error:
```
ValueError: Input 0 of layer conv2d is incompatible with the layer:
expected ndim=4, found ndim=1. Full shape received: [2]
```
Input checks that can be specified via `input_spec` include:
- Structure (e.g. a single input, a list of 2 inputs, etc)
- Shape
- Rank (ndim)
- Dtype
For more information, see `tf.keras.layers.InputSpec`.
Returns:
A `tf.keras.layers.InputSpec` instance, or nested structure thereof.
"""
return self._input_spec
@input_spec.setter
# Must be decorated to prevent tracking, since the input_spec can be nested
# InputSpec objects.
@trackable.no_automatic_dependency_tracking
def input_spec(self, value):
for v in nest.flatten(value):
if v is not None and not isinstance(v, InputSpec):
raise TypeError('Layer input_spec must be an instance of InputSpec. '
'Got: {}'.format(v))
self._input_spec = value
@property
def trainable_weights(self):
"""List of all trainable weights tracked by this layer.
Trainable weights are updated via gradient descent during training.
Returns:
A list of trainable variables.
"""
if self.trainable:
children_weights = self._gather_children_attribute('trainable_variables')
return self._dedup_weights(self._trainable_weights + children_weights)
else:
return []
@property
def non_trainable_weights(self):
"""List of all non-trainable weights tracked by this layer.
Non-trainable weights are *not* updated during training. They are expected
to be updated manually in `call()`.
Returns:
A list of non-trainable variables.
"""
if self.trainable:
children_weights = self._gather_children_attribute(
'non_trainable_variables')
non_trainable_weights = self._non_trainable_weights + children_weights
else:
children_weights = self._gather_children_attribute('variables')
non_trainable_weights = (
self._trainable_weights + self._non_trainable_weights +
children_weights)
return self._dedup_weights(non_trainable_weights)
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.trainable_weights + self.non_trainable_weights
@property
@doc_controls.do_not_generate_docs
def updates(self):
warnings.warn('`layer.updates` will be removed in a future version. '
'This property should not be used in TensorFlow 2.0, '
'as `updates` are applied automatically.')
return []
@property
def losses(self):
"""List of losses added using the `add_loss()` API.
Variable regularization tensors are created when this property is accessed,
so it is eager safe: accessing `losses` under a `tf.GradientTape` will
propagate gradients back to the corresponding variables.
Examples:
>>> class MyLayer(tf.keras.layers.Layer):
... def call(self, inputs):
... self.add_loss(tf.abs(tf.reduce_mean(inputs)))
... return inputs
>>> l = MyLayer()
>>> l(np.ones((10, 1)))
>>> l.losses
[1.0]
>>> inputs = tf.keras.Input(shape=(10,))
>>> x = tf.keras.layers.Dense(10)(inputs)
>>> outputs = tf.keras.layers.Dense(1)(x)
>>> model = tf.keras.Model(inputs, outputs)
>>> # Activity regularization.
>>> len(model.losses)
0
>>> model.add_loss(tf.abs(tf.reduce_mean(x)))
>>> len(model.losses)
1
>>> inputs = tf.keras.Input(shape=(10,))
>>> d = tf.keras.layers.Dense(10, kernel_initializer='ones')
>>> x = d(inputs)
>>> outputs = tf.keras.layers.Dense(1)(x)
>>> model = tf.keras.Model(inputs, outputs)
>>> # Weight regularization.
>>> model.add_loss(lambda: tf.reduce_mean(d.kernel))
>>> model.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=1.0>]
Returns:
A list of tensors.
"""
collected_losses = []
for layer in self._flatten_layers():
# If any eager losses are present, we assume the model to be part of an
# eager training loop (either a custom one or the one used when
# `run_eagerly=True`) and so we always return just the eager losses.
if layer._eager_losses:
# Filter placeholder losses that may have been added by revived layers.
# (see base_layer_utils for details).
if (layer._eager_losses[0] is
not base_layer_utils.REVIVED_LOSS_PLACEHOLDER):
collected_losses.extend(layer._eager_losses)
else:
collected_losses.extend(layer._losses)
for regularizer in layer._callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
collected_losses.append(loss_tensor)
return collected_losses
def add_loss(self, losses, **kwargs):
"""Add loss tensor(s), potentially dependent on layer inputs.
Some losses (for instance, activity regularization losses) may be dependent
on the inputs passed when calling a layer. Hence, when reusing the same
layer on different inputs `a` and `b`, some entries in `layer.losses` may
be dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
This method can be used inside a subclassed layer or model's `call`
function, in which case `losses` should be a Tensor or list of Tensors.
Example:
```python
class MyLayer(tf.keras.layers.Layer):
def call(self, inputs):
self.add_loss(tf.abs(tf.reduce_mean(inputs)))
return inputs
```
This method can also be called directly on a Functional Model during
construction. In this case, any loss Tensors passed to this Model must
be symbolic and be able to be traced back to the model's `Input`s. These
losses become part of the model's topology and are tracked in `get_config`.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Activity regularization.
model.add_loss(tf.abs(tf.reduce_mean(x)))
```
If this is not the case for your loss (if, for example, your loss references
a `Variable` of one of the model's layers), you can wrap your loss in a
zero-argument lambda. These losses are not tracked as part of the model's
topology since they can't be serialized.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
d = tf.keras.layers.Dense(10)
x = d(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Weight regularization.
model.add_loss(lambda: tf.reduce_mean(d.kernel))
```
Args:
losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
may also be zero-argument callables which create a loss tensor.
**kwargs: Additional keyword arguments for backward compatibility.
Accepted values:
inputs - Deprecated, will be automatically inferred.
"""
kwargs.pop('inputs', None)
if kwargs:
raise TypeError('Unknown keyword arguments: %s' % (kwargs.keys(),))
def _tag_callable(loss):
"""Tags callable loss tensor as `_unconditional_loss`."""
if callable(loss):
# We run the loss without autocasting, as regularizers are often
# numerically unstable in float16.
with autocast_variable.enable_auto_cast_variables(None):
loss = loss()
if loss is None:
return None # Will be filtered out when computing the .losses property
if not tensor_util.is_tf_type(loss):
loss = ops.convert_to_tensor_v2_with_dispatch(
loss, dtype=backend.floatx())
loss._unconditional_loss = True # pylint: disable=protected-access
return loss
losses = nest.flatten(losses)
callable_losses = []
eager_losses = []
symbolic_losses = []
for loss in losses:
if callable(loss):
callable_losses.append(functools.partial(_tag_callable, loss))
continue
if loss is None:
continue
if not tensor_util.is_tf_type(loss) and not isinstance(
loss, keras_tensor.KerasTensor):
loss = ops.convert_to_tensor_v2_with_dispatch(
loss, dtype=backend.floatx())
# TF Functions should take the eager path.
if ((tf_utils.is_symbolic_tensor(loss) or
isinstance(loss, keras_tensor.KerasTensor)) and
not base_layer_utils.is_in_tf_function()):
symbolic_losses.append(loss)
elif tensor_util.is_tf_type(loss):
eager_losses.append(loss)
self._callable_losses.extend(callable_losses)
in_call_context = base_layer_utils.call_context().in_call
if eager_losses and not in_call_context:
raise ValueError(
'Expected a symbolic Tensors or a callable for the loss value. '
'Please wrap your loss computation in a zero argument `lambda`.')
self._eager_losses.extend(eager_losses)
for symbolic_loss in symbolic_losses:
if getattr(self, '_is_graph_network', False):
self._graph_network_add_loss(symbolic_loss)
else:
# Possible a loss was added in a Layer's `build`.
self._losses.append(symbolic_loss)
def _clear_losses(self):
"""Used every step in eager to reset losses."""
# Set to thread local directly to avoid Layer.__setattr__ overhead.
if not getattr(self, '_self_tracked_trackables',
None): # Fast path for single Layer.
self._thread_local._eager_losses = []
else:
for layer in self._flatten_layers():
layer._thread_local._eager_losses = []
@property
def metrics(self):
"""List of metrics added using the `add_metric()` API.
Example:
>>> input = tf.keras.layers.Input(shape=(3,))
>>> d = tf.keras.layers.Dense(2)
>>> output = d(input)
>>> d.add_metric(tf.reduce_max(output), name='max')
>>> d.add_metric(tf.reduce_min(output), name='min')
>>> [m.name for m in d.metrics]
['max', 'min']
Returns:
A list of `Metric` objects.
"""
collected_metrics = []
for layer in self._flatten_layers():
with layer._metrics_lock:
collected_metrics.extend(layer._metrics)
return collected_metrics
def add_metric(self, value, name=None, **kwargs):
"""Adds metric tensor to the layer.
This method can be used inside the `call()` method of a subclassed layer
or model.
```python
class MyMetricLayer(tf.keras.layers.Layer):
def __init__(self):
super(MyMetricLayer, self).__init__(name='my_metric_layer')
self.mean = tf.keras.metrics.Mean(name='metric_1')
def call(self, inputs):
self.add_metric(self.mean(inputs))
self.add_metric(tf.reduce_sum(inputs), name='metric_2')
return inputs
```
This method can also be called directly on a Functional Model during
construction. In this case, any tensor passed to this Model must
be symbolic and be able to be traced back to the model's `Input`s. These
metrics become part of the model's topology and are tracked when you
save the model via `save()`.
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
model.add_metric(math_ops.reduce_sum(x), name='metric_1')
```
Note: Calling `add_metric()` with the result of a metric object on a
Functional Model, as shown in the example below, is not supported. This is
because we cannot trace the metric result tensor back to the model's inputs.
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
```
Args:
value: Metric tensor.
name: String metric name.
**kwargs: Additional keyword arguments for backward compatibility.
Accepted values:
`aggregation` - When the `value` tensor provided is not the result of
calling a `keras.Metric` instance, it will be aggregated by default
using a `keras.Metric.Mean`.
"""
kwargs_keys = list(kwargs.keys())
if (len(kwargs_keys) > 1 or
(len(kwargs_keys) == 1 and kwargs_keys[0] != 'aggregation')):
raise TypeError('Unknown keyword arguments: ', str(kwargs.keys()))
from_metric_obj = hasattr(value, '_metric_obj')
is_symbolic = isinstance(value, keras_tensor.KerasTensor)
in_call_context = base_layer_utils.call_context().in_call
if name is None and not from_metric_obj:
# Eg. `self.add_metric(math_ops.reduce_sum(x))`
# In eager mode, we use metric name to lookup a metric. Without a name,
# a new Mean metric wrapper will be created on every model/layer call.
# So, we raise an error when no name is provided.
# We will do the same for symbolic mode for consistency although a name
# will be generated if no name is provided.
# We will not raise this error in the foll use case for the sake of
# consistency as name in provided in the metric constructor.
# mean = metrics.Mean(name='my_metric')
# model.add_metric(mean(outputs))
raise ValueError('Please provide a name for your metric like '
'`self.add_metric(tf.reduce_sum(inputs), '
'name=\'mean_activation\')`')
elif from_metric_obj:
name = value._metric_obj.name
if not in_call_context and not is_symbolic:
raise ValueError('Expected a symbolic Tensor for the metric value, '
'received: ' + str(value))
# If a metric was added in a Layer's `call` or `build`.
if in_call_context or not getattr(self, '_is_graph_network', False):
# TF Function path should take the eager path.
# If the given metric is available in `metrics` list we just update state
# on it, otherwise we create a new metric instance and
# add it to the `metrics` list.
metric_obj = getattr(value, '_metric_obj', None)
# Tensors that come from a Metric object already updated the Metric state.
should_update_state = not metric_obj
name = metric_obj.name if metric_obj else name
with self._metrics_lock:
match = self._get_existing_metric(name)
if match:
metric_obj = match
elif metric_obj:
self._metrics.append(metric_obj)
else:
# Build the metric object with the value's dtype if it defines one
metric_obj = metrics_mod.Mean(
name=name, dtype=getattr(value, 'dtype', None))
self._metrics.append(metric_obj)
if should_update_state:
metric_obj(value)
else:
if from_metric_obj:
raise ValueError('Using the result of calling a `Metric` object '
'when calling `add_metric` on a Functional '
'Model is not supported. Please pass the '
'Tensor to monitor directly.')
# Insert layers into the Keras Graph Network.
aggregation = None if from_metric_obj else 'mean'
self._graph_network_add_metric(value, aggregation, name)
@doc_controls.do_not_doc_inheritable
def add_update(self, updates, inputs=None):
"""Add update op(s), potentially dependent on layer inputs.
Weight updates (for instance, the updates of the moving mean and variance
in a BatchNormalization layer) may be dependent on the inputs passed
when calling a layer. Hence, when reusing the same layer on
different inputs `a` and `b`, some entries in `layer.updates` may be
dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
This call is ignored when eager execution is enabled (in that case, variable
updates are run on the fly and thus do not need to be tracked for later
execution).
Args:
updates: Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting `trainable=False`
on this Layer, when executing in Eager mode.
inputs: Deprecated, will be automatically inferred.
"""
if inputs is not None:
tf_logging.warning(
'`add_update` `inputs` kwarg has been deprecated. You no longer need '
'to pass a value to `inputs` as it is being automatically inferred.')
call_context = base_layer_utils.call_context()
# No need to run updates during Functional API construction.
if call_context.in_keras_graph:
return
# Callable updates are disabled by setting `trainable=False`.
if not call_context.frozen:
for update in nest.flatten(updates):
if callable(update):
update() # pylint: disable=not-callable
def set_weights(self, weights):
"""Sets the weights of the layer, from NumPy arrays.
The weights of a layer represent the state of the layer. This function
sets the weight values from numpy arrays. The weight values should be
passed in the order they are created by the layer. Note that the layer's
weights must be instantiated before calling this function, by calling
the layer.
For example, a `Dense` layer returns a list of two values: the kernel matrix
and the bias vector. These can be used to set the weights of another
`Dense` layer:
>>> layer_a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = layer_a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> layer_a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> layer_b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = layer_b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> layer_b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> layer_b.set_weights(layer_a.get_weights())
>>> layer_b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Args:
weights: a list of NumPy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
params = self.weights
expected_num_weights = 0
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
expected_num_weights += param.num_tensors
else:
expected_num_weights += 1
if expected_num_weights != len(weights):
raise ValueError(
'You called `set_weights(weights)` on layer "%s" '
'with a weight list of length %s, but the layer was '
'expecting %s weights. Provided weights: %s...' %
(self.name, len(weights), expected_num_weights, str(weights)[:50]))
weight_index = 0
weight_value_tuples = []
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
num_tensors = param.num_tensors
tensors = weights[weight_index:weight_index + num_tensors]
param.set_weights(tensors)
weight_index += num_tensors
else:
weight = weights[weight_index]
ref_shape = param.shape
if not ref_shape.is_compatible_with(weight.shape):
raise ValueError(
'Layer weight shape %s not compatible with provided weight '
'shape %s' % (ref_shape, weight.shape))
weight_value_tuples.append((param, weight))
weight_index += 1
backend.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current weights of the layer, as NumPy arrays.
The weights of a layer represent the state of the layer. This function
returns both trainable and non-trainable weight values associated with this
layer as a list of NumPy arrays, which can in turn be used to load state
into similarly parameterized layers.
For example, a `Dense` layer returns a list of two values: the kernel matrix
and the bias vector. These can be used to set the weights of another
`Dense` layer:
>>> layer_a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = layer_a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> layer_a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> layer_b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = layer_b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> layer_b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> layer_b.set_weights(layer_a.get_weights())
>>> layer_b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Returns:
Weights values as a list of NumPy arrays.
"""
weights = self.weights
output_weights = []
for weight in weights:
if isinstance(weight, base_layer_utils.TrackableWeightHandler):
output_weights.extend(weight.get_tensors())
else:
output_weights.append(weight)
return backend.batch_get_value(output_weights)
@doc_controls.do_not_generate_docs
def get_updates_for(self, inputs):
"""Deprecated, do NOT use!
Retrieves updates relevant to a specific set of inputs.
Args:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of update ops of the layer that depend on `inputs`.
"""
warnings.warn('`layer.get_updates_for` is deprecated and '
'will be removed in a future version. '
'Please use `layer.updates` method instead.')
return self.updates
@doc_controls.do_not_generate_docs
def get_losses_for(self, inputs):
"""Deprecated, do NOT use!
Retrieves losses relevant to a specific set of inputs.
Args:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of loss tensors of the layer that depend on `inputs`.
"""
warnings.warn('`layer.get_losses_for` is deprecated and '
'will be removed in a future version. '
'Please use `layer.losses` instead.')
return self.losses
@doc_controls.do_not_doc_inheritable
def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple inputs).
"""
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@doc_controls.do_not_doc_inheritable
def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple outputs).
"""
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@property
@doc_controls.do_not_doc_inheritable
def input_mask(self):
"""Retrieves the input mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Input mask tensor (potentially None) or list of input
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
inputs = self.input
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@property
@doc_controls.do_not_doc_inheritable
def output_mask(self):
"""Retrieves the output mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Output mask tensor (potentially None) or list of output
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
output = self.output
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@doc_controls.do_not_doc_inheritable
def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_shapes',
'input shape')
@doc_controls.do_not_doc_inheritable
def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_shapes',
'output shape')
@doc_controls.do_not_doc_inheritable
def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first input node of the layer.
Returns:
A tensor (or list of tensors if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_tensors',
'input')
@doc_controls.do_not_doc_inheritable
def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first output node of the layer.
Returns:
A tensor (or list of tensors if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_tensors',
'output')
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name +
' is not connected, no input to return.')
return self._get_node_attribute_at_index(0, 'input_tensors', 'input')
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')
return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
@property
@doc_controls.do_not_doc_inheritable
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined input shape.')
all_input_shapes = set(
[str(node.input_shapes) for node in self._inbound_nodes])
if len(all_input_shapes) == 1:
return self._inbound_nodes[0].input_shapes
else:
raise AttributeError('The layer "' + str(self.name) +
' has multiple inbound nodes, '
'with different input shapes. Hence '
'the notion of "input shape" is '
'ill-defined for the layer. '
'Use `get_input_shape_at(node_index)` '
'instead.')
def count_params(self):
"""Count the total number of scalars composing the weights.
Returns:
An integer count.
Raises:
ValueError: if the layer isn't yet built
(in which case its weights aren't yet defined).
"""
if not self.built:
if getattr(self, '_is_graph_network', False):
with tf_utils.maybe_init_scope(self):
self._maybe_build(self.inputs)
else:
raise ValueError('You tried to call `count_params` on ' + self.name +
', but the layer isn\'t built. '
'You can build it manually via: `' + self.name +
'.build(batch_input_shape)`.')
return layer_utils.count_params(self.weights)
@property
@doc_controls.do_not_doc_inheritable
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined output shape.')
all_output_shapes = set(
[str(node.output_shapes) for node in self._inbound_nodes])
if len(all_output_shapes) == 1:
return self._inbound_nodes[0].output_shapes
else:
raise AttributeError('The layer "%s"'
' has multiple inbound nodes, '
'with different output shapes. Hence '
'the notion of "output shape" is '
'ill-defined for the layer. '
'Use `get_output_shape_at(node_index)` '
'instead.' % self.name)
@property
@doc_controls.do_not_doc_inheritable
def inbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._inbound_nodes
@property
@doc_controls.do_not_doc_inheritable
def outbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._outbound_nodes
##############################################################################
# Methods & attributes below are public aliases of other methods. #
##############################################################################
@doc_controls.do_not_doc_inheritable
def apply(self, inputs, *args, **kwargs):
"""Deprecated, do NOT use!
This is an alias of `self.__call__`.
Args:
inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
warnings.warn('`layer.apply` is deprecated and '
'will be removed in a future version. '
'Please use `layer.__call__` method instead.')
return self.__call__(inputs, *args, **kwargs)
@doc_controls.do_not_doc_inheritable
def add_variable(self, *args, **kwargs):
"""Deprecated, do NOT use! Alias for `add_weight`."""
warnings.warn('`layer.add_variable` is deprecated and '
'will be removed in a future version. '
'Please use `layer.add_weight` method instead.')
return self.add_weight(*args, **kwargs)
@property
@doc_controls.do_not_generate_docs
def variables(self):
"""Returns the list of all layer variables/weights.
Alias of `self.weights`.
Note: This will not track the weights of nested `tf.Modules` that are not
themselves Keras layers.
Returns:
A list of variables.
"""
return self.weights
@property
@doc_controls.do_not_generate_docs
def trainable_variables(self):
return self.trainable_weights
@property
@doc_controls.do_not_generate_docs
def non_trainable_variables(self):
return self.non_trainable_weights
##############################################################################
# Methods & attributes below are all private and only used by the framework. #
##############################################################################
@property
def _inbound_nodes(self):
return self._inbound_nodes_value
@_inbound_nodes.setter
@trackable.no_automatic_dependency_tracking
def _inbound_nodes(self, value):
self._inbound_nodes_value = value
@property
def _outbound_nodes(self):
return self._outbound_nodes_value
@_outbound_nodes.setter
@trackable.no_automatic_dependency_tracking
def _outbound_nodes(self, value):
self._outbound_nodes_value = value
def _set_dtype_policy(self, dtype):
"""Sets self._dtype_policy."""
if isinstance(dtype, policy.Policy):
self._dtype_policy = dtype
elif isinstance(dtype, dict):
self._dtype_policy = policy.deserialize(dtype)
elif isinstance(dtype, str) and dtype in ('mixed_float16',
'mixed_bfloat16'):
# The isinstance check is required since np.dtype raises an error if
# compared to a non-dtype string.
self._dtype_policy = policy.Policy(dtype)
elif dtype:
self._dtype_policy = policy.Policy(dtypes.as_dtype(dtype).name)
else:
self._dtype_policy = policy.global_policy()
if (self._dtype_policy.name == 'mixed_float16' and
not loss_scale_optimizer.strategy_supports_loss_scaling()):
# Although only loss scaling doesn't support certain strategies, to avoid
# confusion, we disallow the 'mixed_float16' policy with unsupported
# strategies. This is because 'mixed_float16' requires loss scaling for
# numeric stability.
strategy = ds_context.get_strategy()
raise ValueError('Mixed precision is not supported with the '
'tf.distribute.Strategy: %s. Either stop using mixed '
'precision by removing the use of the "%s" policy or '
'use a different Strategy, e.g. a MirroredStrategy.' %
(strategy.__class__.__name__, self._dtype_policy.name))
# Performance optimization: cache the compute dtype as a Dtype object or
# None, so that str to Dtype conversion doesn't happen in Layer.__call__.
# TODO(b/157486353): Investigate returning DTypes in Policy.
if self._dtype_policy.compute_dtype:
self._compute_dtype_object = dtypes.as_dtype(
self._dtype_policy.compute_dtype)
else:
self._compute_dtype_object = None
@property
def dtype_policy(self):
"""The dtype policy associated with this layer.
This is an instance of a `tf.keras.mixed_precision.Policy`.
"""
return self._dtype_policy
@property
def compute_dtype(self):
"""The dtype of the layer's computations.
This is equivalent to `Layer.dtype_policy.compute_dtype`. Unless
mixed precision is used, this is the same as `Layer.dtype`, the dtype of
the weights.
Layers automatically cast their inputs to the compute dtype, which causes
computations and the output to be in the compute dtype as well. This is done
by the base Layer class in `Layer.__call__`, so you do not have to insert
these casts if implementing your own layer.
Layers often perform certain internal computations in higher precision when
`compute_dtype` is float16 or bfloat16 for numeric stability. The output
will still typically be float16 or bfloat16 in such cases.
Returns:
The layer's compute dtype.
"""
return self._dtype_policy.compute_dtype
@property
def _compute_dtype(self):
"""Deprecated alias of `compute_dtype`."""
return self._dtype_policy.compute_dtype
@property
def variable_dtype(self):
"""Alias of `Layer.dtype`, the dtype of the weights."""
return self.dtype
def _maybe_cast_inputs(self, inputs, input_list=None):
"""Maybe casts the inputs to the compute dtype.
If self._compute_dtype is floating-point, and self_autocast is True,
floating-point inputs are casted to self._compute_dtype.
Args:
inputs: Input tensor, or structure of input tensors.
input_list: Flat list of input tensors.
Returns:
`inputs`, but tensors may have been casted to self._compute_dtype
"""
if not input_list:
input_list = nest.flatten(inputs)
compute_dtype_object = self._compute_dtype_object
should_autocast = (
self._autocast and compute_dtype_object and
compute_dtype_object.is_floating)
if (should_autocast and
any(map(self._should_cast_single_input, input_list))):
# Only perform expensive `nest` operation when needed.
return nest.map_structure(self._cast_single_input, inputs)
else:
return inputs
def _should_cast_single_input(self, x):
if isinstance(x, _AUTOCAST_TYPES):
return (self._compute_dtype_object and
x.dtype != self._compute_dtype_object and x.dtype.is_floating)
return False
def _cast_single_input(self, x):
"""Cast a single Tensor or TensorSpec to the compute dtype."""
if self._should_cast_single_input(x):
return math_ops.cast(x, self._compute_dtype_object)
else:
return x
# _dtype used to be an attribute set in the constructor. We still expose it
# because some clients still use it.
# TODO(reedwm): Deprecate, then remove the _dtype property.
@property
def _dtype(self):
# This is equivalent to returning self.dtype . We do not return self.dtype
# as it would cause infinite recursion in a few subclasses, which override
# "dtype" to return self._dtype.
return self._dtype_policy.variable_dtype
@_dtype.setter
def _dtype(self, value):
value = dtypes.as_dtype(value).name
self._set_dtype_policy(policy.Policy(value))
def _name_scope(self):
if not tf2.enabled():
return self.name
name_scope = self.name
current_name_scope = ops.get_name_scope()
if current_name_scope:
name_scope = current_name_scope + '/' + name_scope
if name_scope:
# Note that the trailing `/` prevents autogenerated
# numerical suffixes to get appended. It will also fully reset
# nested name scope (i.e. the outer name scope has no effect).
name_scope += '/'
return name_scope
def _init_set_name(self, name, zero_based=True):
if not name:
self._name = backend.unique_object_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
else:
backend.observe_object_name(name)
self._name = name
def _get_existing_metric(self, name=None):
match = [m for m in self._metrics if m.name == name]
if not match:
return
if len(match) > 1:
raise ValueError(
'Please provide different names for the metrics you have added. '
'We found {} metrics with the name: "{}"'.format(len(match), name))
return match[0]
def _handle_weight_regularization(self, name, variable, regularizer):
"""Create lambdas which compute regularization losses."""
def _loss_for_variable(v):
"""Creates a regularization loss `Tensor` for variable `v`."""
with backend.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
return regularization
if base_layer_utils.is_split_variable(variable):
for v in variable:
self.add_loss(functools.partial(_loss_for_variable, v))
else:
self.add_loss(functools.partial(_loss_for_variable, variable))
def _handle_activity_regularization(self, inputs, outputs):
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if self._activity_regularizer:
output_list = nest.flatten(outputs)
with backend.name_scope('ActivityRegularizer'):
for output in output_list:
activity_loss = self._activity_regularizer(output)
batch_size = math_ops.cast(
array_ops.shape(output)[0], activity_loss.dtype)
# Make activity regularization strength batch-agnostic.
mean_activity_loss = activity_loss / batch_size
self.add_loss(mean_activity_loss)
def _set_mask_metadata(self, inputs, outputs, previous_mask, build_graph):
# Many `Layer`s don't need to call `compute_mask`.
# This method is optimized to do as little work as needed for the common
# case.
if not self._supports_masking:
return
flat_outputs = nest.flatten(outputs)
mask_already_computed = (
getattr(self, '_compute_output_and_mask_jointly', False) or
all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs))
if mask_already_computed:
if build_graph:
self._set_mask_keras_history_checked(flat_outputs)
return
output_masks = self.compute_mask(inputs, previous_mask)
if output_masks is None:
return
flat_masks = nest.flatten(output_masks)
for tensor, mask in zip(flat_outputs, flat_masks):
try:
tensor._keras_mask = mask
except AttributeError:
# C Type such as np.ndarray.
pass
if build_graph:
self._set_mask_keras_history_checked(flat_outputs)
def _set_mask_keras_history_checked(self, flat_outputs):
for output in flat_outputs:
if getattr(output, '_keras_mask', None) is not None:
# Do not track masks for `TensorFlowOpLayer` construction.
output._keras_mask._keras_history_checked = True
def _get_input_masks(self, inputs, input_list, args, kwargs):
if not self._supports_masking and not self._expects_mask_arg:
# Input masks only need to be retrieved if they are needed for `call`
# or `compute_mask`.
input_masks = None
implicit_mask = False
elif self._call_arg_was_passed('mask', args, kwargs):
input_masks = self._get_call_arg_value('mask', args, kwargs)
implicit_mask = False
else:
input_masks = [getattr(t, '_keras_mask', None) for t in input_list]
if all(mask is None for mask in input_masks):
input_masks = None
implicit_mask = False
else:
# Only do expensive `nest` op when masking is actually being used.
input_masks = nest.pack_sequence_as(inputs, input_masks)
implicit_mask = True
return input_masks, implicit_mask
def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False):
# Performance optimization: do no work in most common case.
if not args and not kwargs:
return False
if arg_name in kwargs:
return True
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
return arg_name in dict(zip(call_fn_args, args))
def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False):
if arg_name in kwargs:
return kwargs[arg_name]
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
args_dict = dict(zip(call_fn_args, args))
return args_dict[arg_name]
def _set_call_arg_value(
self, arg_name, new_value, args,
kwargs, inputs_in_args=False, pop_kwarg_if_none=False):
arg_pos = self._call_fn_arg_positions.get(arg_name, None)
if arg_pos is not None:
if not inputs_in_args:
# Ignore `inputs` arg.
arg_pos = arg_pos - 1
if len(args) > arg_pos:
args = list(args)
args[arg_pos] = new_value
return tuple(args), kwargs
if new_value is None and pop_kwarg_if_none:
kwargs.pop(arg_name, None)
else:
kwargs[arg_name] = new_value
return args, kwargs
def _set_connectivity_metadata(self, args, kwargs, outputs):
# If the layer returns tensors from its inputs unmodified,
# we copy them to avoid loss of KerasHistory metadata.
flat_outputs = nest.flatten(outputs)
flat_inputs = nest.flatten((args, kwargs))
input_ids_set = {id(i) for i in flat_inputs}
outputs_copy = []
for x in flat_outputs:
if id(x) in input_ids_set:
with backend.name_scope(self.name):
x = array_ops.identity(x)
outputs_copy.append(x)
outputs = nest.pack_sequence_as(outputs, outputs_copy)
# Create node, Node wires itself to inbound and outbound layers.
# The Node constructor actually updates this layer's self._inbound_nodes,
# sets _keras_history on the outputs, and adds itself to the
# `_outbound_nodes` of the layers that produced the inputs to this
# layer call.
node_module.Node(self, call_args=args, call_kwargs=kwargs, outputs=outputs)
return outputs
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the methods:
- get_input_shape_at
- get_output_shape_at
- get_input_at
etc...
Args:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The layer's attribute `attr` at the node of index `node_index`.
Raises:
RuntimeError: If the layer has no inbound nodes, or if called in Eager
mode.
ValueError: If the index provided does not match any node.
"""
if not self._inbound_nodes:
raise RuntimeError('The layer has never been called '
'and thus has no defined ' + attr_name + '.')
if not len(self._inbound_nodes) > node_index:
raise ValueError('Asked to get ' + attr_name + ' at node ' +
str(node_index) + ', but the layer has only ' +
str(len(self._inbound_nodes)) + ' inbound nodes.')
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
def _maybe_build(self, inputs):
# Check input assumptions set before layer building, e.g. input rank.
if not self.built:
input_spec.assert_input_compatibility(
self.input_spec, inputs, self.name)
input_list = nest.flatten(inputs)
if input_list and self._dtype_policy.compute_dtype is None:
try:
dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
else:
self._set_dtype_policy(policy.Policy(dtype))
input_shapes = None
# Converts Tensors / CompositeTensors to TensorShapes.
if all(hasattr(x, 'shape') for x in input_list):
input_shapes = tf_utils.get_shapes(inputs)
else:
# Converts input shape to TensorShapes.
try:
input_shapes = tf_utils.convert_shapes(inputs, to_tuples=False)
except ValueError:
pass
# Only call `build` if the user has manually overridden the build method.
if not hasattr(self.build, '_is_default'):
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf_utils.maybe_init_scope(self):
self.build(input_shapes) # pylint:disable=not-callable
# We must set also ensure that the layer is marked as built, and the build
# shape is stored since user defined build functions may not be calling
# `super.build()`
Layer.build(self, input_shapes)
# Optionally load weight values specified at layer instantiation.
if self._initial_weights is not None:
with ops.init_scope():
# Using `init_scope` since we want variable assignment in
# `set_weights` to be treated like variable initialization.
self.set_weights(self._initial_weights)
self._initial_weights = None
def _symbolic_call(self, inputs):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
output_shapes = self.compute_output_shape(input_shapes)
# Convert to TensorShape so that nest.map_structure will not map into
# individual dim of the shape.
output_shapes = tf_utils.convert_shapes(output_shapes, to_tuples=False)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
return nest.map_structure(_make_placeholder_like, output_shapes)
def _get_trainable_state(self):
"""Get the `trainable` state of each sublayer.
Returns:
A dict mapping all sublayers to their `trainable` value.
"""
trainable_state = weakref.WeakKeyDictionary()
for layer in self._flatten_layers():
trainable_state[layer] = layer.trainable
return trainable_state
def _set_trainable_state(self, trainable_state):
"""Set `trainable` state for each sublayer."""
for layer in self._flatten_layers():
if layer in trainable_state:
layer.trainable = trainable_state[layer]
@property
def _obj_reference_counts(self):
"""A dictionary counting the number of attributes referencing an object."""
self._maybe_create_attribute('_obj_reference_counts_dict',
object_identity.ObjectIdentityDictionary())
return self._obj_reference_counts_dict
@trackable.no_automatic_dependency_tracking
def _maybe_create_attribute(self, name, default_value):
"""Create the attribute with the default value if it hasn't been created.
This is useful for fields that is used for tracking purpose,
_trainable_weights, or _layers. Note that user could create a layer subclass
and assign an internal field before invoking the Layer.__init__(), the
__setattr__() need to create the tracking fields and __init__() need to not
override them.
Args:
name: String, the name of the attribute.
default_value: Object, the default value of the attribute.
"""
if not hasattr(self, name):
self.__setattr__(name, default_value)
def __delattr__(self, name):
# For any super.__delattr__() call, we will directly use the implementation
# in Trackable and skip the behavior in AutoTrackable. The Layer was
# originally use Trackable as base class, the change of using Module as base
# class forced us to have AutoTrackable in the class hierarchy.
#
# TODO(b/180760306) Keeping the status quo of skipping _delattr__ and
# __setattr__ in AutoTrackable may be unsustainable.
existing_value = getattr(self, name, None)
# If this value is replacing an existing object assigned to an attribute, we
# should clean it out to avoid leaking memory. First we check if there are
# other attributes referencing it.
reference_counts = self._obj_reference_counts
if existing_value not in reference_counts:
super(tracking.AutoTrackable, self).__delattr__(name)
return
reference_count = reference_counts[existing_value]
if reference_count > 1:
# There are other remaining references. We can't remove this object from
# _layers etc.
reference_counts[existing_value] = reference_count - 1
super(tracking.AutoTrackable, self).__delattr__(name)
return
else:
# This is the last remaining reference.
del reference_counts[existing_value]
super(tracking.AutoTrackable, self).__delattr__(name)
if (isinstance(existing_value, Layer)
or base_layer_utils.has_weights(existing_value)):
super(tracking.AutoTrackable, self).__setattr__(
'_self_tracked_trackables',
[l for l in self._self_tracked_trackables if l is not existing_value])
if isinstance(existing_value, tf_variables.Variable):
super(tracking.AutoTrackable, self).__setattr__(
'_trainable_weights',
[w for w in self._trainable_weights if w is not existing_value])
super(tracking.AutoTrackable, self).__setattr__(
'_non_trainable_weights',
[w for w in self._non_trainable_weights if w is not existing_value])
def __setattr__(self, name, value):
if (name == '_self_setattr_tracking' or
not getattr(self, '_self_setattr_tracking', True) or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)):
try:
super(tracking.AutoTrackable, self).__setattr__(name, value)
except AttributeError:
raise AttributeError(
('Can\'t set the attribute "{}", likely because it conflicts with '
'an existing read-only @property of the object. Please choose a '
'different name.').format(name))
return
# Wraps data structures in `Trackable`, unwraps `NoDependency` objects.
value = data_structures.sticky_attribute_assignment(
trackable=self, value=value, name=name)
reference_counts = self._obj_reference_counts
reference_counts[value] = reference_counts.get(value, 0) + 1
# Clean out the old attribute, which clears _layers and _trainable_weights
# if necessary.
try:
self.__delattr__(name)
except AttributeError:
pass
# Keep track of metric instance created in subclassed layer.
for val in nest.flatten(value):
if isinstance(val, metrics_mod.Metric) and hasattr(self, '_metrics'):
self._metrics.append(val)
# Append value to self._self_tracked_trackables if relevant
if (getattr(self, '_auto_track_sub_layers', True) and
(isinstance(value, module.Module) or
base_layer_utils.has_weights(value))):
self._maybe_create_attribute('_self_tracked_trackables', [])
# We need to check object identity to avoid de-duplicating empty
# container types which compare equal.
if not any((layer is value for layer in self._self_tracked_trackables)):
self._self_tracked_trackables.append(value)
if hasattr(value, '_use_resource_variables'):
# Legacy layers (V1 tf.layers) must always use
# resource variables.
value._use_resource_variables = True
# Append value to list of trainable / non-trainable weights if relevant
# TODO(b/125122625): This won't pick up on any variables added to a
# list/dict after creation.
for val in nest.flatten(value, expand_composites=True):
if not isinstance(val, tf_variables.Variable):
continue
# Users may add extra weights/variables
# simply by assigning them to attributes (invalid for graph networks)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
if val.trainable:
if any(val is w for w in self._trainable_weights):
continue
self._trainable_weights.append(val)
else:
if any(val is w for w in self._non_trainable_weights):
continue
self._non_trainable_weights.append(val)
backend.track_variable(val)
# TODO(b/180760306) Skip the auto trackable from tf.Module to keep status
# quo. See the comment at __delattr__.
super(tracking.AutoTrackable, self).__setattr__(name, value)
def _gather_children_attribute(self, attribute):
assert attribute in {
'variables', 'trainable_variables', 'non_trainable_variables'
}
if hasattr(self, '_self_tracked_trackables'):
nested_layers = self._flatten_modules(include_self=False, recursive=False)
return list(
itertools.chain.from_iterable(
getattr(layer, attribute) for layer in nested_layers))
return []
def _flatten_layers(self, recursive=True, include_self=True):
for m in self._flatten_modules(
recursive=recursive, include_self=include_self):
if isinstance(m, Layer):
yield m
def _flatten_modules(self, recursive=True, include_self=True):
"""Flattens `tf.Module` instances (excluding `Metrics`).
Args:
recursive: Whether to recursively flatten through submodules.
include_self: Whether to include this `Layer` instance.
Yields:
`tf.Module` instance tracked by this `Layer`.
"""
if include_self:
yield self
# Only instantiate set and deque if needed.
trackables = getattr(self, '_self_tracked_trackables', None)
if trackables:
seen_object_ids = set()
deque = collections.deque(trackables)
while deque:
trackable_obj = deque.popleft()
trackable_id = id(trackable_obj)
if trackable_id in seen_object_ids:
continue
seen_object_ids.add(trackable_id)
# Metrics are not considered part of the Layer's topology.
if (isinstance(trackable_obj, module.Module) and
not isinstance(trackable_obj, metrics_mod.Metric)):
yield trackable_obj
# Introspect recursively through sublayers.
if recursive:
subtrackables = getattr(trackable_obj, '_self_tracked_trackables',
None)
if subtrackables:
deque.extendleft(reversed(subtrackables))
elif isinstance(trackable_obj, data_structures.TrackableDataStructure):
# Data structures are introspected even with `recursive=False`.
tracked_values = trackable_obj._values
if tracked_values:
deque.extendleft(reversed(tracked_values))
# This is a hack so that the is_layer (within
# training/trackable/layer_utils.py) check doesn't get the weights attr.
# TODO(b/110718070): Remove when fixed.
def _is_layer(self):
return True
def _init_call_fn_args(self, expects_training_arg=None):
# Clear cached call function arguments.
self.__class__._call_full_argspec.fget.cache.pop(self, None)
self.__class__._call_fn_args.fget.cache.pop(self, None)
self.__class__._call_accepts_kwargs.fget.cache.pop(self, None)
call_fn_args = self._call_fn_args
call_fn_args += self._call_full_argspec.kwonlyargs or []
if expects_training_arg is None:
self._expects_training_arg = ('training' in call_fn_args or
self._call_accepts_kwargs)
else:
# Use value encoded into the metadata when loading from the SavedModel.
self._expects_training_arg = expects_training_arg
# The default training arg will be any (non-None) default specified in the
# method signature, or None if no value is specified.
call_fn_arg_defaults = self._call_fn_arg_defaults.copy()
call_fn_arg_defaults.update(self._call_full_argspec.kwonlydefaults or {})
self._default_training_arg = call_fn_arg_defaults.get('training')
self._expects_mask_arg = ('mask' in call_fn_args or
self._call_accepts_kwargs)
@property
@layer_utils.cached_per_instance
def _call_full_argspec(self):
# Argspec inspection is expensive and the call spec is used often, so it
# makes sense to cache the result.
return tf_inspect.getfullargspec(self.call)
@property
@layer_utils.cached_per_instance
def _call_fn_args(self):
all_args = self._call_full_argspec.args
# Scrub `self` that appears if a decorator was applied.
if all_args and all_args[0] == 'self':
return all_args[1:]
return all_args
@property
@layer_utils.cached_per_instance
def _call_fn_arg_defaults(self):
call_fn_args = self._call_fn_args
call_fn_defaults = self._call_full_argspec.defaults or []
defaults = dict()
# The call arg defaults are an n-tuple of the last n elements of the args
# list. (n = # of elements that have a default argument)
for i in range(-1 * len(call_fn_defaults), 0):
defaults[call_fn_args[i]] = call_fn_defaults[i]
return defaults
@property
@layer_utils.cached_per_instance
def _call_fn_arg_positions(self):
call_fn_arg_positions = dict()
for pos, arg in enumerate(self._call_fn_args):
call_fn_arg_positions[arg] = pos
return call_fn_arg_positions
@property
@layer_utils.cached_per_instance
def _call_accepts_kwargs(self):
return self._call_full_argspec.varkw is not None
@property
def _eager_losses(self):
# A list of loss values containing activity regularizers and losses
# manually added through `add_loss` during eager execution. It is cleared
# after every batch.
# Because we plan on eventually allowing a same model instance to be trained
# in eager mode or graph mode alternatively, we need to keep track of
# eager losses and symbolic losses via separate attributes.
if not hasattr(self._thread_local, '_eager_losses'):
self._thread_local._eager_losses = []
return self._thread_local._eager_losses
@_eager_losses.setter
def _eager_losses(self, losses):
self._thread_local._eager_losses = losses
def _dedup_weights(self, weights):
"""Dedupe weights while maintaining order as much as possible."""
output, seen_ids = [], set()
for w in weights:
if id(w) not in seen_ids:
output.append(w)
# Track the Variable's identity to avoid __eq__ issues.
seen_ids.add(id(w))
return output
def _split_out_first_arg(self, args, kwargs):
# Grab the argument corresponding to the first argument in the
# layer's `call` method spec. This will either be the first positional
# argument, or it will be provided as a keyword argument.
if args:
inputs = args[0]
args = args[1:]
elif self._call_fn_args[0] in kwargs:
kwargs = copy.copy(kwargs)
inputs = kwargs.pop(self._call_fn_args[0])
else:
raise ValueError(
'The first argument to `Layer.call` must always be passed.')
return inputs, args, kwargs
# SavedModel properties. Please see keras/saving/saved_model for details.
@trackable.no_automatic_dependency_tracking
def _set_save_spec(self, inputs):
if self._saved_model_inputs_spec is not None:
return # Already set.
self._saved_model_inputs_spec = nest.map_structure(tf_utils.get_tensor_spec,
inputs)
def _get_save_spec(self, dynamic_batch=True):
if self._saved_model_inputs_spec is None:
return None
return nest.map_structure(
lambda t: tf_utils.get_tensor_spec(t, dynamic_batch=dynamic_batch),
self._saved_model_inputs_spec)
@property
def _trackable_saved_model_saver(self):
return layer_serialization.LayerSavedModelSaver(self)
@property
def _object_identifier(self):
return self._trackable_saved_model_saver.object_identifier
@property
def _tracking_metadata(self):
return self._trackable_saved_model_saver.tracking_metadata
def _list_extra_dependencies_for_serialization(self, serialization_cache):
return (self._trackable_saved_model_saver
.list_extra_dependencies_for_serialization(serialization_cache))
def _list_functions_for_serialization(self, serialization_cache):
return (self._trackable_saved_model_saver
.list_functions_for_serialization(serialization_cache))
@property
def _use_input_spec_as_call_signature(self):
# Whether input spec can be used as the call signature when tracing the
# Layer for SavedModel. By default, this is set to `True` for layers
# exported from the Keras library, because the layers more rigidly define
# the `input_specs` property (many custom layers only set the `ndims`)
return get_canonical_name_for_symbol(type(self),
api_name='keras') is not None
def __getstate__(self):
# Override to support `copy.deepcopy` and pickling.
# Thread-local objects cannot be copied in Python 3, so pop these.
# Thread-local objects are used to cache losses in MirroredStrategy, and
# so shouldn't be copied.
state = self.__dict__.copy()
state.pop('_thread_local', None)
state.pop('_metrics_lock', None)
return state
def __setstate__(self, state):
state['_thread_local'] = threading.local()
state['_metrics_lock'] = threading.Lock()
# Bypass Trackable logic as `__dict__` already contains this info.
object.__setattr__(self, '__dict__', state)
class TensorFlowOpLayer(Layer):
"""Wraps a TensorFlow Operation in a Layer.
This class is used internally by the Functional API. When a user
uses a raw TensorFlow Operation on symbolic tensors originating
from an `Input` Layer, the resultant operation will be wrapped
with this Layer object in order to make the operation compatible
with the Keras API.
This Layer will create a new, identical operation (except for inputs
and outputs) every time it is called. If `run_eagerly` is `True`,
the op creation and calculation will happen inside an Eager function.
Instances of this Layer are created when `autolambda` is called, which
is whenever a Layer's `__call__` encounters symbolic inputs that do
not have Keras metadata, or when a Network's `__init__` encounters
outputs that do not have Keras metadata.
Attributes:
node_def: String, the serialized NodeDef of the Op this layer will wrap.
name: String, the name of the Layer.
constants: Dict of NumPy arrays, the values of any Tensors needed for this
Operation that do not originate from a Keras `Input` Layer. Since all
placeholders must come from Keras `Input` Layers, these Tensors must be
treated as constant in the Functional API.
trainable: Bool, whether this Layer is trainable. Currently Variables are
not supported, and so this parameter has no effect.
dtype: The default dtype of this Layer. Inherited from `Layer` and has no
effect on this class, however is used in `get_config`.
"""
@trackable.no_automatic_dependency_tracking
def __init__(self,
node_def,
name,
constants=None,
trainable=True,
dtype=None):
# Pass autocast=False, as if inputs are cast, input types might not match
# Operation type.
super(TensorFlowOpLayer, self).__init__(
name=_TF_OP_LAYER_NAME_PREFIX + name, trainable=trainable, dtype=dtype,
autocast=False)
if isinstance(node_def, dict):
self.node_def = json_format.ParseDict(node_def, node_def_pb2.NodeDef())
else:
if not isinstance(node_def, bytes):
node_def = node_def.encode('utf-8')
self.node_def = node_def_pb2.NodeDef.FromString(node_def)
# JSON serialization stringifies keys which are integer input indices.
self.constants = ({
int(index): constant for index, constant in constants.items()
} if constants is not None else {})
# Layer uses original op unless it is called on new inputs.
# This means `built` is not set in `__call__`.
self.built = True
# Do not individually trace TensorflowOpLayers in the SavedModel.
self._must_restore_from_config = True
def call(self, inputs):
if context.executing_eagerly():
return self._defun_call(inputs)
return self._make_op(inputs)
def _make_node_def(self, graph):
node_def = node_def_pb2.NodeDef()
node_def.CopyFrom(self.node_def)
# Used in TPUReplicateContext to indicate whether this node has been cloned
# and to not add TPU attributes.
node_def.attr['_cloned'].b = True
node_def.name = graph.unique_name(node_def.name)
return node_def
def _make_op(self, inputs):
inputs = nest.flatten(inputs)
graph = inputs[0].graph
node_def = self._make_node_def(graph)
with graph.as_default():
for index, constant in self.constants.items():
# Recreate constant in graph to add distribution context.
value = tensor_util.constant_value(constant)
if value is not None:
constant = constant_op.constant(value, name=node_def.input[index])
inputs.insert(index, constant)
c_op = ops._create_c_op(graph, node_def, inputs, control_inputs=[])
op = graph._create_op_from_tf_operation(c_op)
op._control_flow_post_processing()
# Record the gradient because custom-made ops don't go through the
# code-gen'd eager call path
op_type = compat.as_str(op.op_def.name)
attr_names = [compat.as_str(attr.name) for attr in op.op_def.attr]
attrs = []
for attr_name in attr_names:
attrs.append(attr_name)
attrs.append(op.get_attr(attr_name))
attrs = tuple(attrs)
execute.record_gradient(op_type, op.inputs, attrs, op.outputs)
if len(op.outputs) == 1:
return op.outputs[0]
return op.outputs
@def_function.function
def _defun_call(self, inputs):
"""Wraps the op creation method in an Eager function for `run_eagerly`."""
return self._make_op(inputs)
def get_config(self):
config = super(TensorFlowOpLayer, self).get_config()
config.update({
# `__init__` prefixes the name. Revert to the constructor argument.
'name': config['name'][len(_TF_OP_LAYER_NAME_PREFIX):],
'node_def': json_format.MessageToDict(self.node_def),
'constants': {
i: backend.get_value(c) for i, c in self.constants.items()
}
})
return config
class AddLoss(Layer):
"""Adds its inputs as a loss.
Attributes:
unconditional: Whether or not the loss should be conditioned on the inputs.
"""
def __init__(self, unconditional, **kwargs):
# Pass autocast=False, as there is no reason to cast loss to a different
# dtype.
kwargs['autocast'] = False
super(AddLoss, self).__init__(**kwargs)
self.unconditional = unconditional
def call(self, inputs):
self.add_loss(inputs, inputs=(not self.unconditional))
return inputs
def get_config(self):
config = super(AddLoss, self).get_config()
config.update({'unconditional': self.unconditional})
return config
class AddMetric(Layer):
"""Adds its inputs as a metric.
Attributes:
aggregation: 'mean' or None. How the inputs should be aggregated.
metric_name: The name to use for this metric.
"""
def __init__(self, aggregation=None, metric_name=None, **kwargs):
super(AddMetric, self).__init__(**kwargs)
self.aggregation = aggregation
self.metric_name = metric_name
def call(self, inputs):
self.add_metric(inputs, aggregation=self.aggregation, name=self.metric_name)
return inputs
def get_config(self):
config = super(AddMetric, self).get_config()
config.update({
'aggregation': self.aggregation,
'metric_name': self.metric_name
})
return config
def _in_functional_construction_mode(layer, inputs, args, kwargs, input_list): # pylint: disable=unused-argument
"""Check the arguments to see if we are constructing a functional model."""
# We are constructing a functional model if any of the inputs
# are KerasTensors
return any(
isinstance(tensor, keras_tensor.KerasTensor)
for tensor in nest.flatten([inputs, args, kwargs]))
def _convert_numpy_or_python_types(x):
if isinstance(x, (np_arrays.ndarray, np.ndarray, float, int)):
return ops.convert_to_tensor_v2_with_dispatch(x)
return x
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
|
{
"content_hash": "70bf5cf14cfcf4efacf5925067ae8c60",
"timestamp": "",
"source": "github",
"line_count": 3219,
"max_line_length": 120,
"avg_line_length": 39.62224293258776,
"alnum_prop": 0.6588863451044346,
"repo_name": "petewarden/tensorflow",
"id": "1a9d701b5d368f9330fdb250974b3340120db530",
"size": "128268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/engine/base_layer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
import functools
import itertools
import warnings
import numpy as np
import pandas as pd
from . import common
from . import indexing
from . import ops
from . import utils
from .pycompat import basestring, OrderedDict, zip, dask_array_type
from .indexing import (PandasIndexAdapter, orthogonally_indexable)
import xray # only for Dataset and DataArray
def as_variable(obj, key=None, strict=True):
"""Convert an object into an Variable
- If the object is already an `Variable`, return it.
- If the object is a `DataArray`, return it if `strict=False` or return
its variable if `strict=True`.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new `Variable`.
- If all else fails, attempt to convert the object into an `Variable` by
unpacking it into the arguments for `Variable.__init__`.
"""
# TODO: consider extending this method to automatically handle Iris and
# pandas objects.
if strict and hasattr(obj, 'variable'):
# extract the primary Variable from DataArrays
obj = obj.variable
if not isinstance(obj, (Variable, xray.DataArray)):
if hasattr(obj, 'dims') and (hasattr(obj, 'data') or
hasattr(obj, 'values')):
obj = Variable(obj.dims, getattr(obj, 'data', obj.values),
getattr(obj, 'attrs', None),
getattr(obj, 'encoding', None))
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except TypeError:
raise TypeError('cannot convert argument into an Variable')
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif getattr(obj, 'name', None) is not None:
obj = Variable(obj.name, obj)
elif key is not None:
obj = Variable(key, obj)
else:
raise TypeError('cannot infer Variable dimensions')
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndexAdapter and LazilyIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
return PandasIndexAdapter(data)
return data
def _as_compatible_data(data, fastpath=False):
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If data has dtype=datetime64, ensure that it has ns precision. If it's a
pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, 'ndim', 0) > 0:
# can't use fastpath (yet) for scalars
return _maybe_wrap_data(data)
# add a custom fast-path for dask.array to avoid expensive checks for the
# dtype attribute
if isinstance(data, dask_array_type):
return data
if isinstance(data, pd.Index):
if isinstance(data, pd.MultiIndex):
raise NotImplementedError(
'no support yet for using a pandas.MultiIndex in an '
'xray.Coordinate')
return _maybe_wrap_data(data)
if isinstance(data, pd.Timestamp):
# TODO: convert, handle datetime objects, too
data = np.datetime64(data.value, 'ns')
if isinstance(data, timedelta):
data = np.timedelta64(getattr(data, 'value', data), 'ns')
if (not hasattr(data, 'dtype') or not hasattr(data, 'shape') or
isinstance(data, (np.string_, np.unicode_,
np.datetime64, np.timedelta64))):
# data must be ndarray-like
data = np.asarray(data)
# we don't want nested self-described arrays
data = getattr(data, 'values', data)
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
dtype, fill_value = common._maybe_promote(data.dtype)
data = np.asarray(data, dtype=dtype)
data[mask] = fill_value
else:
data = np.asarray(data)
if isinstance(data, np.ndarray):
if data.dtype.kind == 'O':
data = common._possibly_convert_objects(data)
elif data.dtype.kind == 'M':
data = np.asarray(data, 'datetime64[ns]')
elif data.dtype.kind == 'm':
data = np.asarray(data, 'timedelta64[ns]')
return _maybe_wrap_data(data)
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0-dimensional object array or datetime64.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
"""
data = np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == 'O':
# unpack 0d object arrays to be consistent with numpy
data = data.item()
elif data.dtype.kind == 'M':
# convert to a np.datetime64 object, because 0-dimensional ndarrays
# with dtype=datetime64 are broken :(
data = np.datetime64(data, 'ns')
elif data.dtype.kind == 'm':
data = np.timedelta64(data, 'ns')
return data
class Variable(common.AbstractArray, utils.NdimSizeLenMixin):
"""A netcdf-like variable consisting of dimensions, data and attributes
which describe a single Array. A single Variable object is not fully
described outside the context of its parent Dataset (if you want such a
fully described object, use a DataArray instead).
The main functional difference between Variables and numpy arrays is that
numerical operations on Variables implement array broadcasting by dimension
name. For example, adding an Variable with dimensions `('time',)` to
another Variable with dimensions `('space',)` results in a new Variable
with dimensions `('time', 'space')`. Furthermore, numpy reduce operations
like ``mean`` or ``sum`` are overwritten to take a "dimension" argument
instead of an "axis".
Variables are light-weight objects used as the building block for datasets.
They are more primitive objects, so operations with them provide marginally
higher performance than using DataArrays. However, manipulating data in the
form of a Dataset or DataArray should almost always be preferred, because
they can use more complete metadata in context of coordinate labels.
"""
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
"""
Parameters
----------
dims : str or sequence of str
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions.
data : array_like
Data array which supports numpy-like data access.
attrs : dict_like or None, optional
Attributes to assign to the new variable. If None (default), an
empty attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.
Well behaviored code to serialize a Variable should ignore
unrecognized encoding items.
"""
self._data = _as_compatible_data(data, fastpath=fastpath)
self._dims = self._parse_dimensions(dims)
self._attrs = None
self._encoding = None
if attrs is not None:
self.attrs = attrs
if encoding is not None:
self.encoding = encoding
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@property
def nbytes(self):
return self.size * self.dtype.itemsize
@property
def _in_memory(self):
return isinstance(self._data, (np.ndarray, PandasIndexAdapter))
@property
def data(self):
if isinstance(self._data, dask_array_type):
return self._data
else:
return self.values
@data.setter
def data(self, data):
data = _as_compatible_data(data)
if data.shape != self.shape:
raise ValueError(
"replacement data must match the Variable's shape")
self._data = data
def _data_cached(self):
if not isinstance(self._data, np.ndarray):
self._data = np.asarray(self._data)
return self._data
@property
def _indexable_data(self):
return orthogonally_indexable(self._data)
def load(self):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return this variable.
Normally, it should not be necessary to call this method in user code,
because all xray functions should either work on deferred data or
load data automatically.
"""
self._data_cached()
return self
def load_data(self): # pragma: no cover
warnings.warn('the Variable method `load_data` has been deprecated; '
'use `load` instead',
FutureWarning, stacklevel=2)
return self.load()
def __getstate__(self):
"""Always cache data as an in-memory array before pickling"""
self._data_cached()
# self.__dict__ is the default pickle object, we don't need to
# implement our own __setstate__ method to make pickle work
return self.__dict__
@property
def values(self):
"""The variable's data as a numpy.ndarray"""
return _as_array_or_item(self._data_cached())
@values.setter
def values(self, values):
self.data = values
def to_variable(self):
"""Return this variable as a base xray.Variable"""
return Variable(self.dims, self._data, self._attrs,
encoding=self._encoding, fastpath=True)
def to_coord(self):
"""Return this variable as an xray.Coordinate"""
return Coordinate(self.dims, self._data, self._attrs,
encoding=self._encoding, fastpath=True)
def to_index(self):
"""Convert this variable to a pandas.Index"""
return self.to_coord().to_index()
@property
def dims(self):
"""Tuple of dimension names with which this variable is associated.
"""
return self._dims
def _parse_dimensions(self, dims):
if isinstance(dims, basestring):
dims = (dims,)
dims = tuple(dims)
if len(dims) != self.ndim:
raise ValueError('dimensions %s must have the same length as the '
'number of data dimensions, ndim=%s'
% (dims, self.ndim))
return dims
@dims.setter
def dims(self, value):
self._dims = self._parse_dimensions(value)
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def __getitem__(self, key):
"""Return a new Array object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement "orthogonal indexing" like
netCDF4-python, where the key can only include integers, slices
(including `Ellipsis`) and 1d arrays, each of which are applied
orthogonally along their respective dimensions.
The difference does not matter in most cases unless you are using
numpy's "fancy indexing," which can otherwise result in data arrays
whose shapes is inconsistent (or just uninterpretable with) with the
variable's dimensions.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
key = self._item_key_to_tuple(key)
key = indexing.expanded_indexer(key, self.ndim)
dims = tuple(dim for k, dim in zip(key, self.dims)
if not isinstance(k, (int, np.integer)))
values = self._indexable_data[key]
# orthogonal indexing should ensure the dimensionality is consistent
if hasattr(values, 'ndim'):
assert values.ndim == len(dims), (values.ndim, len(dims))
else:
assert len(dims) == 0, len(dims)
return type(self)(dims, values, self._attrs, self._encoding,
fastpath=True)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
key = self._item_key_to_tuple(key)
if isinstance(self._data, dask_array_type):
raise TypeError("this variable's data is stored in a dask array, "
'which does not support item assignment. To '
'assign to this variable, you must first load it '
'into memory explicitly using the .load_data() '
'method or accessing its .values attribute.')
data = orthogonally_indexable(self._data_cached())
data[key] = value
@property
def attrs(self):
"""Dictionary of local attributes on this variable.
"""
if self._attrs is None:
self._attrs = OrderedDict()
return self._attrs
@attrs.setter
def attrs(self, value):
self._attrs = OrderedDict(value)
@property
def encoding(self):
"""Dictionary of encodings on this variable.
"""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
try:
self._encoding = dict(value)
except ValueError:
raise ValueError('encoding must be castable to a dictionary')
def copy(self, deep=True):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
"""
data = self.values.copy() if deep else self._data
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatability with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
__hash__ = None
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return getattr(self._data, 'chunks', None)
_array_counter = itertools.count()
def chunk(self, chunks=None, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xray.Variable
"""
import dask.array as da
if utils.is_dict_like(chunks):
chunks = dict((self.get_axis_num(dim), chunk)
for dim, chunk in chunks.items())
if chunks is None:
chunks = self.chunks or self.shape
data = self._data
if isinstance(data, da.Array):
data = data.rechunk(chunks)
else:
if utils.is_dict_like(chunks):
chunks = tuple(chunks.get(n, s)
for n, s in enumerate(self.shape))
data = da.from_array(data, chunks, name=name, lock=lock)
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True)
def isel(self, **indexers):
"""Return a new array indexed along the specified dimension(s).
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
Returns
-------
obj : Array object
A new Array with the selected data and dimensions. In general,
the new variable's data will be a view of this variable's data,
unless numpy fancy indexing was triggered by using an array
indexer, in which case the data will be a copy.
"""
invalid = [k for k in indexers if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
key = [slice(None)] * self.ndim
for i, dim in enumerate(self.dims):
if dim in indexers:
key[i] = indexers[dim]
return self[tuple(key)]
def transpose(self, *dims):
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
Although this operation returns a view of this variable's data, it is
not lazy -- the data will be fully loaded.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
axes = self.get_axis_num(dims)
data = ops.transpose(self.data, axes)
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def squeeze(self, dim=None):
"""Return a new Variable object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : Variable
This array, but with with all or a subset of the dimensions of
length 1 removed.
Notes
-----
Although this operation returns a view of this variable's data, it is
not lazy -- the data will be fully loaded.
See Also
--------
numpy.squeeze
"""
dims = dict(zip(self.dims, self.shape))
return common.squeeze(self, dims, dim)
def expand_dims(self, dims, shape=None):
"""Return a new variable with expanded dimensions.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, basestring):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError('new dimensions must be a superset of existing '
'dimensions')
self_dims = set(self.dims)
expanded_dims = tuple(
d for d in dims if d not in self_dims) + self.dims
if shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = [dims_map[d] for d in expanded_dims]
expanded_data = ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[
(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(expanded_dims, expanded_data, self._attrs,
self._encoding, fastpath=True)
return expanded_var.transpose(*dims)
def fillna(self, value):
return self._fillna(value)
def where(self, cond):
return self._where(cond)
def reduce(self, func, dim=None, axis=None, keep_attrs=False,
allow_lazy=False, **kwargs):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if dim is not None:
axis = self.get_axis_num(dim)
data = func(self.data if allow_lazy else self.values,
axis=axis, **kwargs)
removed_axes = (range(self.ndim) if axis is None
else np.atleast_1d(axis) % self.ndim)
dims = [dim for n, dim in enumerate(self.dims)
if n not in removed_axes]
attrs = self._attrs if keep_attrs else None
return Variable(dims, data, attrs=attrs)
@classmethod
def concat(cls, variables, dim='concat_dim', positions=None,
shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Array
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
if not isinstance(dim, basestring):
dim, = dim.dims
# can't do this lazily: we need to loop through variables at least
# twice
variables = list(variables)
first_var = variables[0]
arrays = [v.data for v in variables]
# TODO: use our own type promotion rules to ensure that
# [str, float] -> object, not str like numpy
if dim in first_var.dims:
axis = first_var.get_axis_num(dim)
dims = first_var.dims
if positions is None:
data = ops.concatenate(arrays, axis=axis)
else:
data = ops.interleaved_concat(arrays, positions, axis=axis)
else:
axis = 0
dims = (dim,) + first_var.dims
data = ops.stack(arrays, axis=axis)
attrs = OrderedDict(first_var.attrs)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError('inconsistent dimensions')
utils.remove_incompatible_items(attrs, var.attrs)
return cls(dims, data, attrs)
def _data_equals(self, other):
return (self._data is other._data or
ops.array_equiv(self.data, other.data))
def equals(self, other):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisions (like numpy.ndarrays).
"""
other = getattr(other, 'variable', other)
try:
return (self.dims == other.dims and self._data_equals(other))
except (TypeError, AttributeError):
return False
def broadcast_equals(self, other):
"""True if two Variables have the values after being broadcast against
each other; otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
"""
try:
self, other = broadcast_variables(self, other)
except (ValueError, AttributeError):
return False
return self.equals(other)
def identical(self, other):
"""Like equals, but also checks attributes.
"""
try:
return (utils.dict_equiv(self.attrs, other.attrs) and
self.equals(other))
except (TypeError, AttributeError):
return False
@property
def real(self):
return type(self)(self.dims, self.data.real, self._attrs)
@property
def imag(self):
return type(self)(self.dims, self.data.imag, self._attrs)
def __array_wrap__(self, obj, context=None):
return Variable(self.dims, obj)
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
return self.__array_wrap__(f(self.data, *args, **kwargs))
return func
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
if isinstance(other, (xray.DataArray, xray.Dataset)):
return NotImplemented
self_data, other_data, dims = _broadcast_compat_data(self, other)
new_data = (f(self_data, other_data)
if not reflexive
else f(other_data, self_data))
result = Variable(dims, new_data)
return result
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, xray.Dataset):
raise TypeError('cannot add a Dataset to a Variable in-place')
self_data, other_data, dims = _broadcast_compat_data(self, other)
if dims != self.dims:
raise ValueError('dimensions cannot change for in-place '
'operations')
self.values = f(self_data, other_data)
return self
return func
ops.inject_all_ops_and_reduce_methods(Variable)
class Coordinate(Variable):
"""Wrapper around pandas.Index that adds xray specific functionality.
The most important difference is that Coordinate objects must always have a
name, which is the dimension along which they index values.
Coordinates must always be 1-dimensional. In addition to Variable methods
and properties (attributes, encoding, broadcasting), they support some
pandas.Index methods directly (e.g., get_indexer), even though pandas does
not (yet) support duck-typing for indexes.
"""
def __init__(self, name, data, attrs=None, encoding=None, fastpath=False):
super(Coordinate, self).__init__(name, data, attrs, encoding, fastpath)
if self.ndim != 1:
raise ValueError('%s objects must be 1-dimensional' %
type(self).__name__)
def _data_cached(self):
if not isinstance(self._data, PandasIndexAdapter):
self._data = PandasIndexAdapter(self._data)
return self._data
def __getitem__(self, key):
key = self._item_key_to_tuple(key)
values = self._indexable_data[key]
if not hasattr(values, 'ndim') or values.ndim == 0:
return Variable((), values, self._attrs, self._encoding)
else:
return type(self)(self.dims, values, self._attrs, self._encoding,
fastpath=True)
def __setitem__(self, key, value):
raise TypeError('%s values cannot be modified' % type(self).__name__)
def copy(self, deep=True):
"""Returns a copy of this object.
If `deep=True`, the values array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
"""
# there is no need to copy the index values here even if deep=True
# since pandas.Index objects are immutable
data = PandasIndexAdapter(self) if deep else self._data
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True)
def _data_equals(self, other):
return self.to_index().equals(other.to_index())
def to_coord(self):
"""Return this variable as an xray.Coordinate"""
return self
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
return pd.Index(self._data_cached().array, name=self.dims[0])
# pandas.Index like properties:
@property
def name(self):
return self.dims[0]
@name.setter
def name(self, value):
raise AttributeError('cannot modify name of Coordinate in-place')
def get_indexer(self, label):
return self.to_index().get_indexer(label)
def slice_indexer(self, start=None, stop=None, step=None):
return self.to_index().slice_indexer(start, stop, step)
def slice_locs(self, start=None, stop=None):
return self.to_index().slice_locs(start, stop)
def get_loc(self, label):
return self.to_index().get_loc(label)
@property
def is_monotonic(self):
return self.to_index().is_monotonic
def is_numeric(self):
return self.to_index().is_numeric()
def _unified_dims(variables):
# validate dimensions
all_dims = OrderedDict()
for var in variables:
var_dims = var.dims
if len(set(var_dims)) < len(var_dims):
raise ValueError('broadcasting cannot handle duplicate '
'dimensions: %r' % list(var_dims))
for d, s in zip(var_dims, var.shape):
if d not in all_dims:
all_dims[d] = s
elif all_dims[d] != s:
raise ValueError('operands cannot be broadcast together '
'with mismatched lengths for dimension %r: %s'
% (d, (all_dims[d], s)))
return all_dims
def _broadcast_compat_variables(*variables):
dims = tuple(_unified_dims(variables))
return tuple(var.expand_dims(dims) if var.dims != dims else var
for var in variables)
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearence in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(var.expand_dims(dims_map) if var.dims != dims_tuple else var
for var in variables)
def _broadcast_compat_data(self, other):
if all(hasattr(other, attr) for attr
in ['dims', 'data', 'shape', 'encoding']):
# `other` satisfies the necessary Variable API for broadcast_variables
new_self, new_other = _broadcast_compat_variables(self, other)
self_data = new_self.data
other_data = new_other.data
dims = new_self.dims
else:
# rely on numpy broadcasting rules
self_data = self.data
other_data = other
dims = self.dims
return self_data, other_data, dims
|
{
"content_hash": "712ca6278195320d4333482d7a33110a",
"timestamp": "",
"source": "github",
"line_count": 951,
"max_line_length": 81,
"avg_line_length": 36.840168243953734,
"alnum_prop": 0.6000285428856857,
"repo_name": "petercable/xray",
"id": "af600ab0fe64bb525358e7461d8a952c9320aadd",
"size": "35035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xray/core/variable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "767771"
}
],
"symlink_target": ""
}
|
"""
Network resource backend.
"""
# W0613:unused arguments,R0201:mth could be func,R0903:too few pub mthd.
# W0232:no init
# pylint: disable=W0613,R0201,R0903,W0232
from occi import backend
from occi_os_api.extensions import os_addon
from occi_os_api.nova_glue import net
class NetworkBackend(backend.KindBackend, backend.ActionBackend):
"""
Backend to handle network resources.
"""
def create(self, entity, extras):
"""
Currently unsupported.
"""
raise AttributeError('Currently not supported.')
def action(self, entity, action, attributes, extras):
"""
Currently unsupported.
"""
raise AttributeError('Currently not supported.')
class IpNetworkBackend(backend.MixinBackend):
"""
A mixin backend for the IPnetworking.
"""
def create(self, entity, extras):
"""
Currently unsupported.
"""
raise AttributeError('Currently not supported.')
class IpNetworkInterfaceBackend(backend.MixinBackend):
"""
A mixin backend for the IpNetworkingInterface (covered by
NetworkInterfaceBackend).
"""
pass
class NetworkInterfaceBackend(backend.KindBackend):
"""
A backend for network links.
"""
def create(self, link, extras):
"""
As nova does not support creation of L2 networks we don't.
"""
if link.target.identifier == '/network/public':
# public means floating IP in OS!
# if the os_net_link mixin is avail. a pool must be provided:
if 'org.openstack.network.floating.pool' not in link.attributes\
and os_addon.OS_NET_LINK in link.mixins:
raise AttributeError('Please specify the pool name when using'
' this mixin!')
elif os_addon.OS_NET_LINK in link.mixins:
pool = link.attributes['org.openstack.network.floating.pool']
else:
pool = None
address = net.add_floating_ip(link.source.attributes['occi.'
'core.id'],
pool,
extras['nova_ctx'])
link.attributes['occi.networkinterface.interface'] = 'eth0'
link.attributes['occi.networkinterface.mac'] = 'aa:bb:cc:dd:ee:ff'
link.attributes['occi.networkinterface.state'] = 'active'
link.attributes['occi.networkinterface.address'] = address
link.attributes['occi.networkinterface.gateway'] = '0.0.0.0'
link.attributes['occi.networkinterface.allocation'] = 'static'
else:
raise AttributeError('Currently not supported.')
def update(self, old, new, extras):
"""
Allows for the update of network links.
"""
raise AttributeError('Currently not supported.')
def delete(self, link, extras):
"""
Remove a floating ip!
"""
if link.target.identifier == '/network/public':
# public means floating IP in OS!
net.remove_floating_ip(link.source.attributes['occi.core.id'],
link.attributes['occi.networkinterface.'
'address'],
extras['nova_ctx'])
|
{
"content_hash": "8682a0b33db07ec6b97f4912cea962f6",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 78,
"avg_line_length": 32.264150943396224,
"alnum_prop": 0.5681286549707603,
"repo_name": "EGI-FCTF/occi-os",
"id": "733d1a5a850c547ec8187ae06d6bad4e17cb64f8",
"size": "4124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "occi_os_api/backends/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "127592"
},
{
"name": "Shell",
"bytes": "1564"
}
],
"symlink_target": ""
}
|
from Queue import Queue
from contextlib import contextmanager
class ObjectPool(object):
"""
TODOD: implement spin down of unused objects
"""
class Remove(Exception):
pass
def __init__(self, obj, size=None, maxsize=None, *args, **kwargs):
self.queue = Queue(maxsize = maxsize)
self.maxsize = maxsize
self.size = size
self.obj = obj
self.args = args
self.kwargs = kwargs
[self.queue.put(obj(*self.args, **self.kwargs)) for i in range(size)]
self.cursize = size
@contextmanager
def get(self, block=True, timeout=None):
try:
obj = self._get(block, timeout)
yield obj
self.release(obj)
except self.Remove:
self.remove(obj)
except Exception as e:
self.release(obj)
raise e
def remove(self, obj):
self.release(self.obj(*self.args, **self.kwargs))
def _get(self, block, timeout):
if self.queue.empty() and self.cursize < self.maxsize:
self.cursize += 1
self.queue.put(self.obj(*self.args, **self.kwargs))
return self.queue.get(block=block, timeout=timeout)
def release(self, obj):
assert isinstance(obj, self.obj)
self.queue.put(obj)
|
{
"content_hash": "8487d51c23076b136cc49dfb1677e0f3",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 77,
"avg_line_length": 27.25,
"alnum_prop": 0.581039755351682,
"repo_name": "timcherry/protobuf-rpc",
"id": "f2912798aed8561a6d8111af874f7115d436b29d",
"size": "1308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "protobuf_rpc/pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26517"
}
],
"symlink_target": ""
}
|
import imp
import os
import inspect
import logging
class YAS3FSPlugin (object):
@staticmethod
def load_from_file(yas3fs, filepath, expected_class = None):
class_inst = None
try:
mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])
if file_ext.lower() == '.py':
py_mod = imp.load_source(mod_name, filepath)
elif file_ext.lower() == '.pyc':
py_mod = imp.load_compiled(mod_name, filepath)
else:
raise
if not py_mod:
raise
for klass in inspect.getmembers(py_mod,inspect.isclass):
if not issubclass(klass[1], YAS3FSPlugin):
continue
if expected_class == None or expected_class == klass[0]:
class_inst = klass[1](yas3fs)
break
except Exception, e:
raise Exception("cannot load plugin file " + filepath + " " + e)
if not class_inst:
raise Exception("cannot load plugin class " + expected_class + " " + e)
return class_inst
@staticmethod
def load_from_class(yas3fs, expected_class):
try:
module_name = 'yas3fs.' + expected_class
# i = imp.find_module(module_name)
module = __import__(module_name)
klass = getattr(module.__dict__[expected_class], expected_class)
class_inst = klass(yas3fs)
return class_inst
except Exception, e:
print str(e)
raise Exception("cannot load plugin class " + expected_class + " " + str(e))
def __init__(self, yas3fs, logger=None):
self.logger = logger
if (not self.logger):
self.logger = logging.getLogger('yas3fsPlugin')
def do_cmd_on_s3_now_w_retries(self, fn):
# self, key, pub, action, args, kargs, retries = 1
def wrapper(*args, **kargs):
try:
return fn(*args, **kargs)
except Exception as e:
selfless_args = None
if args[1]:
selfless_args = args[1:]
self.logger.info("PLUGIN do_cmd_on_s3_now_w_retries FAILED" + " " + str(selfless_args))
return args[2] #????
return wrapper
|
{
"content_hash": "290c4cac01bd099f66ab9c71d66151cc",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 91,
"avg_line_length": 26.464788732394368,
"alnum_prop": 0.6546035125066525,
"repo_name": "padde/yas3fs",
"id": "0f6e2f81c2c57364361fac9e740da764024bd5fe",
"size": "1899",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "yas3fs/YAS3FSPlugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "153374"
}
],
"symlink_target": ""
}
|
"""
A script to import initial data from ProEco into Theros
"""
import logging
import argparse
import sys
import re
import hashlib
import os
parser=argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-w","--works", help="the csv file containing the raw works export from ProEco", metavar="CSV_FILE" , default="travaux.csv", dest="worksFile")
parser.add_argument("-s","--subjects", help="the csv file containing the subjects export from ProEco", metavar="CSV_FILE" , default="cours.csv", dest="subjectsFile")
parser.add_argument("--teachers", help="the csv file containing the teachers export from ProEco", metavar="CSV_FILE" , default="profs.csv", dest="teachersFile")
parser.add_argument("-v","--verbose", action="store_const", dest="logging", const=logging.DEBUG, default=logging.INFO, help="show debug logs")
parser.add_argument("--dsn", help="the dsn to use for db operations", action="store", dest="dsn", default="theros_dev")
parser.add_argument("-i", "--insert", help="insert (ignoring duplicates) data into database (see --dsn)", action="store_true", dest="insertData")
parser.add_argument("-y", "--shoolyear", help="specify the current school year", action="store", dest="schoolyear", default="2016-17")
parser.add_argument("-t", "--truncate", help="truncate all tables", action="store_true", dest="truncate")
parser.add_argument("-d", "--drop", help="drop all tables", action="store_true", dest="drop")
parser.add_argument("--no-tutors", help="do not process tutors information", action="store_false", dest="tutors")
args=parser.parse_args()
logging.basicConfig(level=args.logging, stream=sys.stdout, format="%(levelname)7s : %(message)s")
logger=logging.getLogger()
if not args.dsn and args.insertData:
parser.error("missing --dsn specification")
def iterCsv(fname, header=True):
if not os.path.exists(fname):
logger.warn("%s does not exist", fname)
return
with open(fname) as fh:
for line in fh:
if header:
header=False
continue
line=line.decode("utf8")
yield map(lambda s:s.strip(), line.split("\t"))
worksFile=args.worksFile
works=[]
classes=set()
students={}
compositions=[]
class Work:
def __init__(self, klass, student, desc, line):
self.klass=klass
self.student=student
self.desc=desc
self.line=line
for i,line in enumerate(iterCsv(worksFile)):
klass,student, dummy, foo, desc, tutor, address, zipCode, city = line[:9]
klass=klass.replace(" ","").upper()
if not re.search(r"^\d[A-Z]+$", klass):
logger.warn("discard line %i of %s : bad class: %s",i+1, worksFile, klass)
continue
student=student.replace(" "," ")
classes.add(klass)
students[student]=(student, tutor, address, zipCode, city)
compositions.append(Work(klass, student, desc, i+1))
if desc:
works.append(compositions[-1])
logger.debug("keep %s", works[-1])
else:
logger.debug("discarded line %s", line)
logger.info("got %i works, %i students, %i classes", len(works), len(students), len(classes))
subjects={}
for i,line in enumerate(iterCsv(args.subjectsFile, False)):
logging.debug("%s: %s", i, line)
code=line[2].strip().upper()
desc=line[5]
subjects[code]=desc
logger.info("got %i subjects", len(subjects))
teachings=set()
teachers={}
for i,line in enumerate(iterCsv(args.teachersFile)):
first,last,dob = line[:3]
subject=line[-1].strip().upper()
klass=line[-3].replace(" ","").upper()
name=first+" "+last
teachers[name]=hashlib.md5(dob).hexdigest()
localClasses=[ c for c in classes if c.startswith(klass)]
logger.debug("%s teaches %s in %s (%i match, %s)", name, subject, klass, len(localClasses), subject in subjects)
if not klass:
logger.warn("%s teaches %s in no class", name, subject)
elif subject not in subjects:
logger.warn("%s teaches %s in %s : unknown subject", name, subject, klass)
elif len(localClasses) == 0:
logger.warn("%s teaches %s in %s : unknown class", name, subject, klass)
else:
for c in localClasses:
teachings.add((name, subject, c))
logger.info("got %i teachers and %i teachings", len(teachers), len(teachings))
teachings=sorted(teachings)
if args.insertData or args.truncate or args.drop:
logging.info("connecting to database")
import pyodbc
conn=pyodbc.connect(dsn=args.dsn)
try:
db=conn.cursor()
if args.truncate or args.drop:
result=db.execute("SHOW TABLES").fetchall()
db.execute("set foreign_key_checks=0")
if args.drop:
logging.info("drop tables")
cmd="DROP TABLE"
else:
logging.info("truncate tables")
cmd="TRUNCATE"
for row in result:
db.execute("%s %s"%(cmd,row[0]))
db.execute("set foreign_key_checks=1")
if args.insertData:
logging.info("inserting students")
students=sorted(students.values(), key=lambda t:t[0])
if args.tutors:
params=[s+s[1:] for s in students]
db.executemany("""
INSERT INTO student(st_name, st_tutor, st_address, st_zip, st_city)
VALUES (?,?,?,?,?)
ON DUPLICATE KEY UPDATE st_tutor = ?, st_address = ?, st_zip = ?, st_city = ?
""", params)
else:
params=[(s[0],) for s in students]
db.executemany("INSERT IGNORE INTO student(st_name) VALUES (?)", params)
logging.info("inserting classes")
params=[(c,) for c in sorted(classes)]
db.executemany("INSERT IGNORE INTO class(cl_desc) VALUES (?)", params)
logging.info("inserting classes compositions")
db.execute("INSERT IGNORE INTO schoolyear(sy_desc) VALUES (?)", (args.schoolyear,))
schoolyear = db.execute("SELECT sy_id FROM schoolyear WHERE sy_desc = ?", (args.schoolyear,)).fetchone().sy_id
classes=db.execute("SELECT * FROM class").fetchall()
classes=dict([(c.cl_desc, c.cl_id) for c in classes])
students=db.execute("SELECT * FROM student").fetchall()
students=dict([(s.st_name, s.st_id) for s in students])
db.executemany("INSERT IGNORE INTO student_class(sc_st_id, sc_cl_id, sc_sy_id) VALUES (?,?,?)",[
(students[c.student], classes[c.klass], schoolyear) for c in compositions
])
logging.info("inserting works")
query = "INSERT IGNORE INTO raw_data(rd_st_id, rd_cl_id, rd_desc, rd_sy_id) VALUES (?,?,?,?)"
params=[]
for w in works:
if w.klass not in classes:
logger.error("no such class: %s for work at line %i",w.klass, w.line)
continue
if w.student not in students:
logger.error("no such student: %s for work at line %i",w.student, w.line)
continue
classId = classes[w.klass]
studentId = students[w.student]
params.append((studentId, classId, w.desc, schoolyear))
if len(params):
db.executemany(query, params)
else:
logger.warn("no work inserted")
if len(subjects):
logging.info("inserting subjects")
params=sorted(subjects.items())
db.executemany("INSERT IGNORE INTO subject(sub_code, sub_desc) VALUES (?,?)", params)
else:
logging.info("no subjects to insert")
if teachers:
logging.info("inserting teachers")
params=sorted(teachers.items())
params=[ (name, password, password) for name,password in params]
db.executemany("INSERT IGNORE INTO teacher(tea_fullname, tea_password, tea_pwd_changed) VALUES (?,?, 1) ON DUPLICATE KEY UPDATE tea_password = ?, tea_pwd_changed = 1", params)
else:
logging.info("no teachers to insert")
if teachings:
logging.info("inserting teachings")
subjects=dict([ (r.sub_code, r.sub_id) for r in db.execute("SELECT * FROM subject").fetchall() ])
teachers=dict([ (r.tea_fullname, r.tea_id) for r in db.execute("SELECT * FROM teacher").fetchall() ])
params=[ (teachers[name], subjects[code], schoolyear, classes[klass]) for name, code, klass in teachings ]
db.executemany("INSERT IGNORE INTO teacher_subject(ts_tea_id, ts_sub_id, ts_sy_id, ts_cl_id) VALUES (?,?,?,?)", params)
else:
logging.info("no teachings to insert")
conn.commit()
finally:
conn.close()
|
{
"content_hash": "860c9eae096dcee17a81bee319d0e38e",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 191,
"avg_line_length": 45.27918781725889,
"alnum_prop": 0.6045964125560538,
"repo_name": "doc212/theros",
"id": "e99c6476d9928d1743a022d20ec60012629036cf",
"size": "8942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "import/import.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "3607"
},
{
"name": "CSS",
"bytes": "5433"
},
{
"name": "HTML",
"bytes": "24256"
},
{
"name": "JavaScript",
"bytes": "39354"
},
{
"name": "PHP",
"bytes": "99976"
},
{
"name": "Python",
"bytes": "10766"
},
{
"name": "Shell",
"bytes": "518"
}
],
"symlink_target": ""
}
|
import unittest
import os
from unittest import TextTestResult
from django.test.runner import DiscoverRunner
from django.db import NotSupportedError
from djangae.utils import find_project_root
from google.appengine.ext import testbed
# Many Django tests require saving instances with a PK
# of zero. App Engine doesn't allow this (it treats the key
# as incomplete in this case) so we skip those tests here
DJANGO_TESTS_WHICH_REQUIRE_ZERO_PKS = {
'model_forms.tests.ModelMultipleChoiceFieldTests.test_model_multiple_choice_required_false',
'model_forms.tests.ModelChoiceFieldTests.test_modelchoicefield',
'custom_pk.tests.CustomPKTests.test_zero_non_autoincrement_pk',
'bulk_create.tests.BulkCreateTests.test_zero_as_autoval'
}
# These tests only work if you haven't changed AUTH_USER_MODEL
# This is probably a bug in Django (the tests should use skipIfCustomUser)
# but I haven't had a chance to see if it's fixed in master (and it's not fixed in
# 1.7, so this needs to exist either way)
DJANGO_TESTS_WHICH_REQUIRE_AUTH_USER = {
'proxy_models.tests.ProxyModelAdminTests.test_cascade_delete_proxy_model_admin_warning',
'proxy_models.tests.ProxyModelAdminTests.test_delete_str_in_model_admin',
'proxy_models.tests.ProxyModelTests.test_permissions_created' # Requires permissions created
}
DJANGO_TESTS_WHICH_HAVE_BUGS = {
'one_to_one.tests.OneToOneTests.test_foreign_key', # Uses the wrong IDs, fixed in 1.8+
}
# This is potentially fixable by us. sql_with_params returns a tuple of
# our Select/Insert/UpdateCommand, and an empty list (because the params
# are stored in the where tree. Some tests assume that we'll be returning the
# params separately, and so they fail. We could fix this by actually returning the
# values that went into the where, but that's for another day.
DJANGO_TESTS_WHICH_EXPECT_SQL_PARAMS = {
'model_forms.tests.ModelMultipleChoiceFieldTests.test_clean_does_deduplicate_values',
'ordering.tests.OrderingTests.test_order_by_f_expression_duplicates'
}
# Django 1.8 removed the supports_select_related flag, so we have to manually skip
# tests which depend on it
DJANGO_TESTS_WHICH_USE_SELECT_RELATED = {
'defer.tests.DeferTests.test_defer_with_select_related',
'defer.tests.DeferTests.test_defer_select_related_raises_invalid_query',
'defer.tests.DeferTests.test_only_select_related_raises_invalid_query',
'defer.tests.DeferTests.test_only_with_select_related',
'model_inheritance.tests.ModelInheritanceDataTests.test_select_related_works_on_parent_model_fields'
}
DJANGO_TESTS_TO_SKIP = DJANGO_TESTS_WHICH_REQUIRE_ZERO_PKS.union(
DJANGO_TESTS_WHICH_REQUIRE_AUTH_USER).union(
DJANGO_TESTS_WHICH_HAVE_BUGS).union(
DJANGO_TESTS_WHICH_EXPECT_SQL_PARAMS).union(
DJANGO_TESTS_WHICH_USE_SELECT_RELATED
)
def init_testbed():
# We don't initialize the datastore stub here, that needs to be done by Django's create_test_db and destroy_test_db.
IGNORED_STUBS = [ "init_datastore_v3_stub" ]
stub_kwargs = {
"init_taskqueue_stub": {
"root_path": find_project_root()
}
}
bed = testbed.Testbed()
bed.activate()
for init_name in testbed.INIT_STUB_METHOD_NAMES.values():
if init_name in IGNORED_STUBS:
continue
getattr(bed, init_name)(**stub_kwargs.get(init_name, {}))
return bed
def bed_wrap(test):
def _wrapped(*args, **kwargs):
bed = None
try:
# Init test stubs
bed = init_testbed()
return test(*args, **kwargs)
finally:
if bed:
bed.deactivate()
bed = None
return _wrapped
class SkipUnsupportedTestResult(TextTestResult):
def addError(self, test, err):
skip = os.environ.get("SKIP_UNSUPPORTED", True)
if skip and err[0] in (NotSupportedError,):
self.addExpectedFailure(test, err)
else:
super(SkipUnsupportedTestResult, self).addError(test, err)
class DjangaeTestSuiteRunner(DiscoverRunner):
def _discover_additional_tests(self):
"""
Django's DiscoverRunner only detects apps that are below
manage.py, which isn't particularly useful if you have other apps
on the path that need testing (arguably all INSTALLED_APPS should be tested
as they all form part of your project and a bug in them could bring your site down).
This method looks for a setting called DJANGAE_ADDITIONAL_TEST_APPS in
and will add extra test cases found in those apps. By default this adds the
djangae tests to your app, but you can of course override that.
"""
from django.conf import settings
from importlib import import_module
ADDITIONAL_APPS = getattr(settings, "DJANGAE_ADDITIONAL_TEST_APPS", ("djangae",))
extra_tests = []
for app in ADDITIONAL_APPS:
mod = import_module(app)
if mod:
folder = mod.__path__[0]
new_tests = self.test_loader.discover(start_dir=folder, top_level_dir=os.path.dirname(folder))
extra_tests.extend(new_tests._tests)
self.test_loader._top_level_dir = None
return extra_tests
def build_suite(self, *args, **kwargs):
extra_tests = self._discover_additional_tests()
args = list(args)
args[1] = extra_tests
suite = super(DjangaeTestSuiteRunner, self).build_suite(*args, **kwargs)
new_tests = []
# Django's DiscoveryRunner can create duplicate tests when passing
# extra_tests argument. Getting rid of that:
suite._tests = list(set(suite._tests))
for i, test in enumerate(suite._tests):
# https://docs.djangoproject.com/en/1.7/topics/testing/advanced/#django.test.TransactionTestCase.available_apps
# available_apis is part of an internal API that allows to speed up
# internal Django test, but that breaks the integration with
# Djangae models and tests, so we are disabling it here
if hasattr(test, 'available_apps'):
test.available_apps = None
if args[0] and not any([test.id().startswith(x) for x in args[0]]):
continue
if test.id() in DJANGO_TESTS_TO_SKIP:
continue #FIXME: It would be better to wrap this in skipTest or something
new_tests.append(bed_wrap(test))
suite._tests[:] = new_tests
return suite
class SkipUnsupportedRunner(DjangaeTestSuiteRunner):
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(
verbosity=self.verbosity,
failfast=self.failfast,
resultclass=SkipUnsupportedTestResult
).run(suite)
|
{
"content_hash": "df18d81b38106845370696d36b9d5f9d",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 123,
"avg_line_length": 38.443820224719104,
"alnum_prop": 0.6773345024112232,
"repo_name": "trik/djangae",
"id": "09d0a255b4b93f6885fcdd426e0cc68c6350216f",
"size": "6843",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djangae/test_runner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "277"
},
{
"name": "Python",
"bytes": "624663"
},
{
"name": "Shell",
"bytes": "368"
}
],
"symlink_target": ""
}
|
"""library to store the fingeprints along with the document info into the DB
"""
__authors__ = [
'"Kailash Budhathoki" <kailash.buki@gmail.com>'
]
import datetime
import errno
import os
import shutil
from pymongo import Connection
STORAGE_DIR = '%s/volumes/predator/files/' % os.environ['PREDATOR_HOME']
def _create_db_handler():
"""Creates a pymongo db handler
Args: None
Returns: None
"""
connection = Connection()
return connection['predator']
def _prepare_path(path):
"""ensures the parent directories for the given path
Args:
path: Absolute path of the destination
Returns:
None
Raises: IOError
"""
head, tail = os.path.split(path)
if len(head) == 0:
return
try:
os.makedirs(head)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def _extract_filename(path):
"""Extracts the file name from the file path
Args:
path: Absolute file path
Returns:
filename
"""
head, tail = os.path.split(path)
if len(head) == 0:
return
return tail
def archive_text_file(text_path):
"""Copies the text file from the temporary location to the archive path
Args:
text_path: The absolute path of the temporary location of the text file
Returns: The relative path of the archived file. For example: '20110112/linux.txt'
"""
today = datetime.date.today()
dirname = datetime.datetime.strftime(today, '%Y%m%d')
src, dst = text_path, STORAGE_DIR + dirname + '/'
_prepare_path(dst)
try:
shutil.move(src, dst)
except IOError, e:
src_renamed = src + '_copy'
os.rename(src, src_renamed)
src = src_renamed
shutil.move(src_renamed, dst)
except:
# catches other errors like "raise Error, "Destination path '%s' already exists" % real_dst"
pass
filename = _extract_filename(src)
relative_path = dirname + '/' + filename
return relative_path
def save_fp(fingerprint, doc_info):
"""Saves the fingerprints along with the document information in the db
Args:
doc_info: document information
Returns: None
"""
db = _create_db_handler()
fingerprint_in_db = db.fingerprint.find_one({'fingerprint': fingerprint})
if fingerprint_in_db:
dinfo = fingerprint_in_db['doc_info']
dinfo.append(doc_info)
db.fingerprint.save(dict(fingerprint = fingerprint, doc_info = dinfo))
else:
db.fingerprint.save({
'fingerprint': fingerprint,
'doc_info': [doc_info]})
if __name__ == '__main__':
archive_text_file('/Users/sagardh/Documents/Kernighan_Ritchie_Language_C.txt')
|
{
"content_hash": "d6a1711c9e063bb39f4165e8cb573a13",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 100,
"avg_line_length": 24.5,
"alnum_prop": 0.6126029359112066,
"repo_name": "kailashbuki/predator",
"id": "1faa87e829cad70c6cbd9d883db849549cf4b9ce",
"size": "3922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "installed/storekeeper/apps/fpkeeper/lib/keeper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "24151"
},
{
"name": "Python",
"bytes": "73388"
},
{
"name": "Shell",
"bytes": "7401"
}
],
"symlink_target": ""
}
|
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'snap_openstack Release Notes'
copyright = u'2016, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GlanceReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation',
u'Glance Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'glancereleasenotes', u'Glance Release Notes Documentation',
[u'Glance Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation',
u'Glance Developers', 'GlanceReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
|
{
"content_hash": "4a517fb8e1fa0c56992f8840730407b4",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 79,
"avg_line_length": 31.661016949152543,
"alnum_prop": 0.6992773019271948,
"repo_name": "coreycb/snap.openstack",
"id": "e0bfc4c833516794d059329a237fbd8edf7517de",
"size": "9051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "releasenotes/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "39170"
}
],
"symlink_target": ""
}
|
"""
Created on Thursday June 23 14:42:21 2016
tracks a moving object using method of cross-correlation to find the shift
between successive images in video from a hand-held infrared camera
@author: foresterd
"""
from sys import path as spath
spath.append('/home/foresterd/drfpy/plotting')
import show_plots
spath.append('/home/foresterd/drfpy/grave_IO')
import read_grave as rg
import pickle
import numpy as np
from skimage.feature import register_translation
from scipy.ndimage.interpolation import shift
import cv2
from scipy.misc import bytescale
from skimage.morphology import binary_opening
from scipy import ndimage
from skimage.color import gray2rgb
#------------------------------------------------------------------------------
nFramesToRead = -1 # any positive number, or -1 to read all frames in file
fgbg = cv2.createBackgroundSubtractorMOG2() # Adaptive Background Subtractor
showPlots = True
infile = 'drone_movie.raw'
outdir = 'outmovie'
zrpad = 10
zcpad = 10
# mark bad pixels (overperforming or underperforming) for this sensor
ylst = [248,238,251,283,286,305,405,448,117,398,348]
xlst = [197,301,309,309,425,432,377,587,497,239,63]
badPixels = (np.array(ylst, dtype=np.uint16), # y (rows)
np.array(xlst, dtype=np.uint16)) # x (cols)
#------------------------------------------------------------------------------
if showPlots:
plt, fig, ax = show_plots.getFig(1,1)
# READ THE LWIR CAMERA DATA
frameList, frameHdrList = rg.read_grave(infile, nFramesToRead)
rows, cols = frameList[0].shape
# frames subset
ijump = 120
nskip = 1
frameList = frameList[ijump+nskip:] # start after frame ijump
iframeList = [ii for ii in range(len(frameList)) if ii%nskip==0] # skip every nskip-th frame
icount = 0
for iframe in iframeList[:-1]:
Img = frameList[iframe].copy()
Img[badPixels] = np.mean(Img) # zero-out the 4-block bad super-pixel
#if showPlots: show_plots.showPlot(Img, iframe, ax, wait=True)
print('frame '+str(icount))
#print frame.dtype, np.max(frame), np.shape(frame)
tframe = np.left_shift(Img, 4) # 16-bit to 12-bit (camera bit depth)
tframe = bytescale(tframe) # convert to 8-bit image
#if showPlots: show_plots.showPlot(tframe, iframe, ax, wait=True)
if icount > 0:
# use phase correlation for image registration
#http://scikit-image.org/docs/dev/auto_examples/plot_register_translation.html
shft, error, diffphase = register_translation(tframePrev, tframe, 100)
if len(shft)!=0:
shifted = shift(tframe, shft)
diff = tframePrev - shifted
fgmask = fgbg.apply(diff)
fgmask[:,0:zcpad] = 0 # left side
fgmask[:,cols-zcpad:cols] = 0 # right side
fgmask[0:zrpad,:] = 0 # top
fgmask[rows-zrpad:rows,:] = 0 # bottom
if shft[1]>0:
fgmask[:,0:+shft[1]] = 0 # left side
else:
fgmask[:,cols+shft[1]:cols] = 0 # right side
cImg = np.zeros((rows,cols), np.bool)
cImg[fgmask>0] = 1
cImg = binary_opening(cImg)
#if showPlots: show_plots.showPlot(cImg, iframe, ax, wait=True)
Idisp = np.zeros((rows, cols, 3), np.uint8)
idx = np.where(cImg)
Idisp = gray2rgb(bytescale(Img))
Idisp[:,:,:][idx] = 0
Idisp[:,:,0][idx] = 255
#Idisp[:,:,0] = rimg
if showPlots: show_plots.showPlot(Idisp, iframe, ax, wait=False)
else:
print('Shift Not Found.')
icount += 1
tframePrev = tframe # save previous frame
|
{
"content_hash": "b24e45afbc4eb0c2eb9af8616ecf2e60",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 92,
"avg_line_length": 35.254901960784316,
"alnum_prop": 0.6218020022246941,
"repo_name": "drforester/drone-tracker",
"id": "0784d91da0bbade91014d323c807f59c14f2a79e",
"size": "3620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drone_track.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3620"
}
],
"symlink_target": ""
}
|
""" Provides support utilities for checking cubes."""
from typing import List, Optional, Union
import iris
import numpy as np
from iris.cube import Cube, CubeList
from iris.exceptions import CoordinateNotFoundError
def check_for_x_and_y_axes(cube: Cube, require_dim_coords: bool = False) -> None:
"""
Check whether the cube has an x and y axis, otherwise raise an error.
Args:
cube:
Cube to be checked for x and y axes.
require_dim_coords:
If true the x and y coordinates must be dimension coordinates.
Raises:
ValueError : Raise an error if non-uniform increments exist between
grid points.
"""
for axis in ["x", "y"]:
if require_dim_coords:
coord = cube.coords(axis=axis, dim_coords=True)
else:
coord = cube.coords(axis=axis)
if coord:
pass
else:
msg = "The cube does not contain the expected {} coordinates.".format(axis)
raise ValueError(msg)
def check_cube_coordinates(
cube: Cube, new_cube: Cube, exception_coordinates: Optional[List[str]] = None
) -> Cube:
"""Find and promote to dimension coordinates any scalar coordinates in
new_cube that were originally dimension coordinates in the progenitor
cube. If coordinate is in new_cube that is not in the old cube, keep
coordinate in its current position.
Args:
cube:
The input cube that will be checked to identify the preferred
coordinate order for the output cube.
new_cube:
The cube that must be checked and adjusted using the coordinate
order from the original cube.
exception_coordinates:
The names of the coordinates that are permitted to be within the
new_cube but are not available within the original cube.
Returns:
Modified cube with relevant scalar coordinates promoted to
dimension coordinates with the dimension coordinates re-ordered,
as best as can be done based on the original cube.
Raises:
CoordinateNotFoundError : Raised if the final dimension
coordinates of the returned cube do not match the input cube.
CoordinateNotFoundError : If a coordinate is within in the permitted
exceptions but is not in the new_cube.
"""
if exception_coordinates is None:
exception_coordinates = []
# Promote available and relevant scalar coordinates
cube_dim_names = [coord.name() for coord in cube.dim_coords]
for coord in new_cube.aux_coords[::-1]:
if coord.name() in cube_dim_names:
new_cube = iris.util.new_axis(new_cube, coord)
new_cube_dim_names = [coord.name() for coord in new_cube.dim_coords]
# If we have the wrong number of dimensions then raise an error.
if len(cube.dim_coords) + len(exception_coordinates) != len(new_cube.dim_coords):
msg = (
"The number of dimension coordinates within the new cube "
"do not match the number of dimension coordinates within the "
"original cube plus the number of exception coordinates. "
"\n input cube dimensions {}, new cube dimensions {}".format(
cube_dim_names, new_cube_dim_names
)
)
raise CoordinateNotFoundError(msg)
# Ensure dimension order matches
new_cube_dimension_order = {
coord.name(): new_cube.coord_dims(coord.name())[0]
for coord in new_cube.dim_coords
}
correct_order = []
new_cube_only_dims = []
for coord_name in cube_dim_names:
correct_order.append(new_cube_dimension_order[coord_name])
for coord_name in exception_coordinates:
try:
new_coord_dim = new_cube.coord_dims(coord_name)[0]
new_cube_only_dims.append(new_coord_dim)
except CoordinateNotFoundError:
msg = (
"All permitted exception_coordinates must be on the"
" new_cube. In this case, coordinate {0} within the list "
"of permitted exception_coordinates ({1}) is not available"
" on the new_cube."
).format(coord_name, exception_coordinates)
raise CoordinateNotFoundError(msg)
correct_order = np.array(correct_order)
for dim in new_cube_only_dims:
correct_order = np.insert(correct_order, dim, dim)
new_cube.transpose(correct_order)
return new_cube
def find_dimension_coordinate_mismatch(
first_cube: Cube, second_cube: Cube, two_way_mismatch: bool = True
) -> List[str]:
"""Determine if there is a mismatch between the dimension coordinates in
two cubes.
Args:
first_cube:
First cube to compare.
second_cube:
Second cube to compare.
two_way_mismatch:
If True, a two way mismatch is calculated e.g.
second_cube - first_cube AND
first_cube - second_cube
If False, a one way mismatch is calculated e.g.
second_cube - first_cube
Returns:
List of the dimension coordinates that are only present in
one out of the two cubes.
"""
first_dim_names = [coord.name() for coord in first_cube.dim_coords]
second_dim_names = [coord.name() for coord in second_cube.dim_coords]
if two_way_mismatch:
mismatch = list(set(second_dim_names) - set(first_dim_names)) + list(
set(first_dim_names) - set(second_dim_names)
)
else:
mismatch = list(set(second_dim_names) - set(first_dim_names))
return mismatch
def spatial_coords_match(cubes: Union[List, CubeList]) -> bool:
"""
Determine if the x and y coords of all the input cubes are the same.
Args:
cubes:
A list of cubes to compare.
Returns:
True if the x and y coords are the exactly the same to the
precision of the floating-point values (this should be true for
any cubes derived using cube.regrid()), otherwise False.
"""
ref = cubes[0]
match = True
for cube in cubes[1:]:
match = (
cube.coord(axis="x") == ref.coord(axis="x")
and cube.coord(axis="y") == ref.coord(axis="y")
and match
)
return match
def assert_time_coords_valid(inputs: List[Cube], time_bounds: bool):
"""
Raises appropriate ValueError if
- Any input cube has or is missing time bounds (depending on time_bounds)
- Input cube times do not match
- Input cube forecast_reference_times do not match (unless blend_time is present)
Note that blend_time coordinates do not have to match as it is likely that data
from nearby blends will be used together.
Args:
inputs:
List of Cubes where times should match
time_bounds:
When True, time bounds are checked for and compared on the input cubes.
When False, the absence of time bounds is checked for.
Raises:
ValueError: If any of the stated checks fail.
"""
if len(inputs) <= 1:
raise ValueError(f"Need at least 2 cubes to check. Found {len(inputs)}")
cubes_not_matching_time_bounds = [
c.name() for c in inputs if c.coord("time").has_bounds() != time_bounds
]
if cubes_not_matching_time_bounds:
str_bool = "" if time_bounds else "not "
msg = f"{' and '.join(cubes_not_matching_time_bounds)} must {str_bool}have time bounds"
raise ValueError(msg)
if inputs[0].coords("blend_time"):
time_coords_to_check = ["time"]
else:
time_coords_to_check = ["time", "forecast_reference_time"]
for time_coord_name in time_coords_to_check:
time_coords = [c.coord(time_coord_name) for c in inputs]
if not all([tc == time_coords[0] for tc in time_coords[1:]]):
msg = f"{time_coord_name} coordinates do not match. \n " + "\n ".join(
[f"{c.name()}: {c.coord('time')}" for c in inputs]
)
raise ValueError(msg)
def assert_spatial_coords_match(cubes: Union[List, CubeList]):
"""
Raises an Exception if `spatial_coords_match` returns False.
Args:
cubes:
A list of cubes to compare.
Raises:
ValueError if spatial coords do not match.
"""
if not spatial_coords_match(cubes):
raise ValueError(
f"Mismatched spatial coords for {', '.join([c.name() for c in cubes])}"
)
|
{
"content_hash": "57cb740d847c011c5793c92a0bde1e33",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 95,
"avg_line_length": 36.0042194092827,
"alnum_prop": 0.6238134302121177,
"repo_name": "metoppv/improver",
"id": "ef548c6231890af349ec74292a60071f08109109",
"size": "10190",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "improver/utilities/cube_checker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5073745"
},
{
"name": "Shell",
"bytes": "9493"
}
],
"symlink_target": ""
}
|
from .forms import CustomAuthenticationForm
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib.auth import views as auth_views
### Static url patterns; these urls aren't handled by navigation system
static_patterns = patterns('',
url(r'^admin/', include('bangoo.admin.urls')),
url(r'^media/', include('bangoo.media.admin.urls')),
url(r'^accounts/login/$', auth_views.login, {'authentication_form': CustomAuthenticationForm}, name='login'),
url(r'^accounts/logout/$', auth_views.logout_then_login, name='logout'),
url(r'^accounts/change_password/$', auth_views.password_change, {'post_change_redirect' : '/accounts/change_password/done/'}, name='change-password'),
url(r'^accounts/change_password/done/$', auth_views.password_change_done),
url(r'^accounts/reset_password/$', auth_views.password_reset, name='password-reset'),
url(r'^accounts/reset_password/done/$', auth_views.password_reset_done),
url(r'^accounts/reset_password/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', auth_views.password_reset_confirm),
url(r'^accounts/reset_password/complete/$', auth_views.password_reset_complete),
)
### If we are in DEBUG mode, then handle static files also
if settings.DEBUG:
static_patterns += staticfiles_urlpatterns()
static_patterns += patterns('',
url(r'^uploads/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT})
)
### Any other urls handled by the menu system.
menu_patterns = i18n_patterns('',
url(r'', include('bangoo.navigation.urls')),
)
### Concatenate url configs
urlpatterns = static_patterns + menu_patterns
|
{
"content_hash": "44ea06c4d049d9b00158f2c7a774973d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 154,
"avg_line_length": 51.457142857142856,
"alnum_prop": 0.7129372570794004,
"repo_name": "pombredanne/bangoo",
"id": "63dd381763dacedbb1c24807fae1cb865846364b",
"size": "1801",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examplesite/examplesite/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13260"
},
{
"name": "HTML",
"bytes": "22675"
},
{
"name": "JavaScript",
"bytes": "76481"
},
{
"name": "Python",
"bytes": "79841"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from api.webview.models import Document
class DocumentSerializer(serializers.ModelSerializer):
class Meta:
model = Document
fields = ('id', 'providerUpdatedDateTime', 'source', 'docID', 'raw', 'normalized')
|
{
"content_hash": "8cb668045ad35b4685d1ddff249dac85",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 90,
"avg_line_length": 27,
"alnum_prop": 0.7185185185185186,
"repo_name": "mehanig/scrapi",
"id": "decdb7cba4bfba2e448c97bde87680833fe9d483",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api/webview/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "964"
},
{
"name": "HTML",
"bytes": "2300"
},
{
"name": "Python",
"bytes": "312117"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_traffic_selector import ApiParameters
from library.modules.bigip_traffic_selector import ModuleParameters
from library.modules.bigip_traffic_selector import ModuleManager
from library.modules.bigip_traffic_selector import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_traffic_selector import ApiParameters
from ansible.modules.network.f5.bigip_traffic_selector import ModuleParameters
from ansible.modules.network.f5.bigip_traffic_selector import ModuleManager
from ansible.modules.network.f5.bigip_traffic_selector import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='selector1',
)
p = ModuleParameters(params=args)
assert p.name == 'selector1'
def test_api_parameters(self):
args = dict(
name='selector1',
)
p = ApiParameters(params=args)
assert p.name == 'selector1'
class TestUntypedManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='selector1',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
|
{
"content_hash": "bfe59ab13d86aef6cc0a5c8db485c2f6",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 82,
"avg_line_length": 27.9126213592233,
"alnum_prop": 0.664,
"repo_name": "thaim/ansible",
"id": "63ac7b3a88bb33095d5a3dae268bb58da01b01d7",
"size": "3034",
"binary": false,
"copies": "21",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/f5/test_bigip_traffic_selector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from netmiko.mellanox.mellanox_ssh import MellanoxSSH
__all__ = ['MellanoxSSH']
|
{
"content_hash": "ba627432c3080e69013376503c488b21",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 53,
"avg_line_length": 30.25,
"alnum_prop": 0.7603305785123967,
"repo_name": "isidroamv/netmiko",
"id": "2df6bbc954dd8d0b5ab2c0b67e6d82b07c404286",
"size": "121",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "netmiko/mellanox/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "244012"
},
{
"name": "Shell",
"bytes": "10760"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
#----------------------------------------------------------------------
# Purpose: This test exercises HDFS operations from python.
#----------------------------------------------------------------------
def hdfs_basic():
# Check if we are running inside the H2O network by seeing if we can touch
# the namenode.
hadoop_namenode_is_accessible = pyunit_utils.hadoop_namenode_is_accessible()
if hadoop_namenode_is_accessible:
hdfs_name_node = pyunit_utils.hadoop_namenode()
hdfs_iris_file = "/datasets/runit/iris_wheader.csv"
hdfs_iris_dir = "/datasets/runit/iris_test_train"
#----------------------------------------------------------------------
# Single file cases.
#----------------------------------------------------------------------
print("Testing single file importHDFS")
url = "hdfs://{0}{1}".format(hdfs_name_node, hdfs_iris_file)
iris_h2o = h2o.import_file(url)
iris_h2o.head()
iris_h2o.tail()
n = iris_h2o.nrow
print("rows: {0}".format(n))
assert n == 150, "Wrong number of rows. Got {0}. Should have got {1}".format(n, 150)
assert isinstance(iris_h2o, h2o.H2OFrame), "Wrong type. Expected H2OFrame, but got {0}".format(type(iris_h2o))
print("Import worked")
#----------------------------------------------------------------------
# Directory file cases.
#----------------------------------------------------------------------
print("Testing directory importHDFS")
urls = ["hdfs://{0}{1}/iris_test.csv".format(hdfs_name_node, hdfs_iris_dir),
"hdfs://{0}{1}/iris_train.csv".format(hdfs_name_node, hdfs_iris_dir)]
iris_dir_h2o = h2o.import_file(urls)
iris_dir_h2o.head()
iris_dir_h2o.tail()
n = iris_dir_h2o.nrow
print("rows: {0}".format(n))
assert n == 150, "Wrong number of rows. Got {0}. Should have got {1}".format(n, 150)
assert isinstance(iris_dir_h2o, h2o.H2OFrame), "Wrong type. Expected H2OFrame, but got {0}".\
format(type(iris_dir_h2o))
print("Import worked")
else:
raise EnvironmentError
if __name__ == "__main__":
pyunit_utils.standalone_test(hdfs_basic)
else:
hdfs_basic()
|
{
"content_hash": "61848d47becdd743058cb76cac788034",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 118,
"avg_line_length": 37.41538461538462,
"alnum_prop": 0.4979440789473684,
"repo_name": "h2oai/h2o-dev",
"id": "899b808dda4755c4599fcd505dfa7c32961529af",
"size": "2432",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_hdfs/pyunit_INTERNAL_HDFS_basic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "162399"
},
{
"name": "CoffeeScript",
"bytes": "267048"
},
{
"name": "Emacs Lisp",
"bytes": "6465"
},
{
"name": "HTML",
"bytes": "140849"
},
{
"name": "Java",
"bytes": "6216622"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Jupyter Notebook",
"bytes": "5585408"
},
{
"name": "Makefile",
"bytes": "34105"
},
{
"name": "Python",
"bytes": "2644394"
},
{
"name": "R",
"bytes": "1848754"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "22830"
},
{
"name": "Shell",
"bytes": "47513"
},
{
"name": "TeX",
"bytes": "579960"
}
],
"symlink_target": ""
}
|
def caches(cap, sources, srs_grids):
caches = {}
for name, source in sources.iteritems():
conf = for_source(name, source, srs_grids)
if not conf:
continue
caches[name[:-len('_wms')] + '_cache'] = conf
return caches
def for_source(name, source, srs_grids):
cache = {
'sources': [name]
}
grids = []
for srs in source['supported_srs']:
if srs in srs_grids:
grids.append(srs_grids[srs])
if not grids:
return None
cache['grids'] = grids
return cache
|
{
"content_hash": "718775a2116830c7af1d79a5981c1045",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 53,
"avg_line_length": 20.88888888888889,
"alnum_prop": 0.5478723404255319,
"repo_name": "Anderson0026/mapproxy",
"id": "c874af30933c2a8e819cbe3c376c095ad4ffd9ac",
"size": "1234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapproxy/script/conf/caches.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12401"
},
{
"name": "Python",
"bytes": "1477825"
},
{
"name": "Shell",
"bytes": "3087"
}
],
"symlink_target": ""
}
|
import SP_paras
import test_algorithm as ta
#this experiment could extract paras from SP and then input these paras into SAM and got res. Switch discard_AAWP control whether use AA AW AP or not.(much bigger than AS's)
# Switch~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
discard_AAWP = 1
check_paras = 1
check_ori_ABP_bands = 0
#this is dataProcess alg that could process SP_testing(array or list.)
def extract_SP_paras(SP_testing, discard_AAWP = discard_AAWP):
# if you extract paras from SP and use this paras to classify.
para_testing_dict = SP_paras.SP_paras(SP_testing)
if discard_AAWP == 1:
for i in para_testing_dict:
para_testing_dict[i].pop('AA')
para_testing_dict[i].pop('AW')
para_testing_dict[i].pop('AP')
para_testing_list = SP_paras.dict_to_list(para_testing_dict)
SP_testing = para_testing_list
return SP_testing
SP_training_sulf = ta.load_training_SP('sulfuro')
SP_training_oxi = ta.load_training_SP('oxido')
# algorithm that process sp data to ABP ori bands.
def ori_ABP_bands(sp):
#########################################this is for testing original sp ABP bands' result################################################################
sp = SP_paras.choose_ABP_bands(sp, choose_band = [1,0,0])
spList = []
for i in range(len(sp)):
for j in range(len(sp[i])):
spList.append(sp[i][j])
return spList
# algorithm that process sp data to ABP paras
def para_ABP_bands(sp):
para_dict = SP_paras.SP_paras(sp,choose_band = [0,0,1])
if discard_AAWP == 1:
for i in para_dict :
para_dict[i].pop('AA')
para_dict[i].pop('AP')
para_dict[i].pop('AW')
para_list = SP_paras.dict_to_list(para_dict)
return para_list
if check_ori_ABP_bands == 1:
SP_training_sulf = ori_ABP_bands(SP_training_sulf)
SP_training_oxi = ori_ABP_bands(SP_training_oxi)
if check_paras == 1:
SP_training_sulf = para_ABP_bands(SP_training_sulf)
SP_training_oxi = para_ABP_bands(SP_training_oxi)
if check_ori_ABP_bands ==1 :
ta.check(SP_training_oxi, SP_training_sulf, check_all = 1, dataProcess_alg = ori_ABP_bands)
if check_paras == 1:
ta.check(SP_training_oxi, SP_training_sulf, check_all = 1, dataProcess_alg = para_ABP_bands,file_acc_name = '3paras_ABP_band3_SAM.txt')
|
{
"content_hash": "8a597966f323aeeed0d1a5ecdfc4c8d6",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 174,
"avg_line_length": 35,
"alnum_prop": 0.621321961620469,
"repo_name": "Vincentyao1995/Globalink2017-UBC",
"id": "bc87276a38143f67012ac03fe29838f4bc3ad8cb",
"size": "2347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Vincent/before meeting DT/para_extract/Experiment3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10099"
},
{
"name": "Python",
"bytes": "329767"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
import datetime
from mapApp.models import Incident, Hazard, Theft, AlertArea
@login_required
def recentReports(request):
user = request.user
# Get the user's alertable points in the last month
collisions = Incident.objects.filter(p_type__exact="collision") | Incident.objects.filter(p_type__exact="fall")
nearmisses = Incident.objects.filter(p_type__exact="nearmiss")
hazards = Hazard.objects.all()
thefts = Theft.objects.all()
# Get only points that intersect user alert areas
rois = AlertArea.objects.filter(user=user.id)
# recent sets = points that intersect an rois as defined by user and are reported in last month
collisionsInPoly = Incident.objects.none()
nearmissesInPoly = Incident.objects.none()
hazardsInPoly = Hazard.objects.none()
theftsInPoly = Theft.objects.none()
# Find intersecting points
for g in rois:
collisionsInPoly = collisionsInPoly | collisions.filter(geom__intersects=g.geom)
nearmissesInPoly = nearmissesInPoly | nearmisses.filter(geom__intersects=g.geom)
hazardsInPoly = hazardsInPoly | hazards.filter(geom__intersects=g.geom)
theftsInPoly = theftsInPoly | thefts.filter(geom__intersects=g.geom)
now = datetime.datetime.now()
lastweek = now - datetime.timedelta(days=7)
context = {
'collisions': collisionsInPoly.filter(date__range=[lastweek, now]).order_by('-date'),
'nearmisses': nearmissesInPoly.filter(date__range=[lastweek, now]).order_by('-date'),
'hazards': hazardsInPoly.filter(date__range=[lastweek, now]).order_by('-date'),
'thefts': theftsInPoly.filter(date__range=[lastweek, now]).order_by('-date'),
'geofences': rois
}
return render(request, 'mapApp/recent_reports.html', context)
|
{
"content_hash": "b7fa410a96ae4f474fd002a52fcdeab2",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 115,
"avg_line_length": 43.5,
"alnum_prop": 0.7136886102403344,
"repo_name": "SPARLab/BikeMaps",
"id": "dda8863250344b4d823b0062efbe870ae22f0d0b",
"size": "1914",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mapApp/views/recentReports.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15111"
},
{
"name": "HTML",
"bytes": "134960"
},
{
"name": "JavaScript",
"bytes": "73792"
},
{
"name": "Python",
"bytes": "965817"
}
],
"symlink_target": ""
}
|
"""
This module handles messages.
"""
import logging
import json
import random
import yaml
from google.appengine.ext import deferred
from google.appengine.api import urlfetch # we've locked in the data so why not
import data.models
import data.tools
from flask import Flask
import flask
app = Flask(__name__)
USERNAME = "ml" # actually its 'slackbot' but docs say this should work, so do both
POSITIVE_RESPONSES = [
':yellow_heart:',
':smile_cat:',
'got it',
'thanks',
':black_joker:',
':pray:',
':unicorn_face:',
':godmode:'
]
def get_conf(name):
"""Gets a value out of the secret config file secrets.yaml"""
# we are just going to read it every time. This is probably unwise
with open('secrets.yaml') as buf:
conf = yaml.load(buf)
return conf[name]
@app.route('/new', methods=['POST'])
def new_msg():
"""Stores new messages"""
# check it's legit
if flask.request.data:
msg_data = json.loads(flask.request.data)
else:
msg_data = flask.request.form
if msg_data['token'] == get_conf('outbound_token') \
and msg_data['user_name'] != USERNAME \
and msg_data['user_name'] != 'slackbot': # avoid feedback
msg = msg_data['text']
logging.info('Received message: "%s" from %s',
msg, msg_data['user_name'])
# quickly process it, we want to fail here if things are missing
msg_data = {
'username': msg_data['user_name'],
'text': msg_data['text'],
'timestamp': msg_data['timestamp']
}
# defer handling it
deferred.defer(data.tools.store_msg, msg_data)
# return some kind of confirmation
return flask.Response(
json.dumps({'text':random.choice(POSITIVE_RESPONSES)}),
mimetype='application/json'
)
return 'nope'
def format_slackmsg(text):
return text.replace('\n', '\\n')
@app.route('/post', methods=['POST'])
def post_msg():
"""Posts a message to the webhook in the secret file"""
webhook_url = get_conf('incoming_webhook')
# get the message content from the request
msg_data = json.loads(flask.request.data)
text = msg_data['text']
secret = msg_data.get('secret', '')
if secret != get_conf('post_secret'):
return 'not allowed', 401
logging.info('posting: %s', text)
# make sure the text is appropriately escaped
body = {
'text':format_slackmsg(text)
}
# use urlfetch to POST it to the webhook
result = urlfetch.fetch(url=webhook_url,
payload=json.dumps(body),
method=urlfetch.POST,
headers={'Content-Type': 'application/json'})
logging.info('attempted...')
if result.status_code != 200:
logging.error('possible issue: %d, %s', result.status_code,
result.content)
else:
logging.info('...succesfully')
return '<p>{}, {}</p>'.format(result.status_code, result.content)
|
{
"content_hash": "2cdc724f6041169f02c0c5d54c83ba39",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 83,
"avg_line_length": 30.287128712871286,
"alnum_prop": 0.5946387708401438,
"repo_name": "PFCM/slack-ml",
"id": "1ab6e19f6f9a4698f5f3cc51a279dd2161de2e4e",
"size": "3059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "msg/msg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Python",
"bytes": "16020"
},
{
"name": "Shell",
"bytes": "1464"
}
],
"symlink_target": ""
}
|
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
from virtool.subtractions.models import SubtractionFile
from virtool.subtractions.tasks import AddSubtractionFilesTask
from virtool.tasks.models import Task
async def test_add_subtraction_files_task(
snapshot,
tmp_path,
spawn_client,
mongo,
pg: AsyncEngine,
static_time,
):
client = await spawn_client(authorize=True)
client.app["config"].data_path = tmp_path
test_dir = tmp_path / "subtractions" / "foo"
test_dir.mkdir(parents=True)
test_dir.joinpath("subtraction.fa.gz").write_text("FASTA file")
test_dir.joinpath("subtraction.1.bt2").write_text("Bowtie2 file")
subtraction = {
"_id": "foo",
"name": "Foo",
"nickname": "Foo Subtraction",
"deleted": False,
}
await client.db.subtraction.insert_one(subtraction)
task = Task(
id=1,
complete=False,
context={},
count=0,
progress=0,
step="rename_index_files",
type="add_subtraction_files",
created_at=static_time.datetime,
)
async with AsyncSession(pg) as session:
session.add(task)
await session.commit()
add_files_task = AddSubtractionFilesTask(client.app, 1)
await add_files_task.run()
async with AsyncSession(pg) as session:
assert (
await session.execute(select(SubtractionFile))
).scalars().all() == snapshot
|
{
"content_hash": "9b4da7d87c8a39892a6d79c797db6324",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 69,
"avg_line_length": 26.571428571428573,
"alnum_prop": 0.6478494623655914,
"repo_name": "igboyes/virtool",
"id": "8efe90658ed677cd047a922d5e8f1f550b2c3b93",
"size": "1488",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/subtractions/test_tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "961"
},
{
"name": "HTML",
"bytes": "44858"
},
{
"name": "Python",
"bytes": "1316464"
}
],
"symlink_target": ""
}
|
"""
Django settings for server project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import base64
import sys
from urllib import parse
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'os5^q5qs3%ch()09f7l$sy+p6^mue@+*r(l*hv_5z-87jngttq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False if 'DATABASE_URL' in os.environ else True
ALLOWED_HOSTS = [
'glacial-shore-18891.herokuapp.com',
'localhost'
]
# Application definition
INSTALLED_APPS = [
'corsheaders',
'movies.apps.MoviesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django_angular2',
'USER': 'root',
'PASSWORD': 'password',
'HOST': '127.0.0.1'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
}
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Rome'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
CSRF_COOKIE_SECURE = False
CSRF_TRUSTED_ORIGINS = ['django-angular2-movies.firebaseapp.com']
# custom settings
JWT_SECRET = base64.b64encode(b'ScaredCherriesEatSurelySimpleVulcansParticipateIntensely')
# heroku database settings
# Register database schemes in URLs.
parse.uses_netloc.append('mysql')
try:
# Check to make sure DATABASES is set in settings.py file.
# If not default to {}
if 'DATABASES' not in locals():
DATABASES = {}
if 'DATABASE_URL' in os.environ:
url = parse.urlparse(os.environ['DATABASE_URL'])
# Ensure default database exists.
DATABASES['default'] = DATABASES.get('default', {})
# Update with environment configuration.
DATABASES['default'].update({
'NAME': url.path[1:],
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname,
'PORT': url.port,
})
if url.scheme == 'mysql':
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
except Exception:
print('Unexpected error:', sys.exc_info())
|
{
"content_hash": "028805f382056d6ad664831d09b5f61a",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 91,
"avg_line_length": 26.421965317919074,
"alnum_prop": 0.6718442353970685,
"repo_name": "damnko/angular2-django-movies",
"id": "2e721ecc4b173a8a387748d4a52543117515c3da",
"size": "4571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django-server/server/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12356"
},
{
"name": "HTML",
"bytes": "14376"
},
{
"name": "JavaScript",
"bytes": "10917"
},
{
"name": "Python",
"bytes": "27305"
},
{
"name": "TypeScript",
"bytes": "44092"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
setup(name='DITA Generator',
version='@version@',
description='DITA Generator library',
author='Jarno Elovirta',
author_email='jarno@elovirta.com',
url='http://code.google.com/p/dita-generator/',
packages=['ditagen', 'ditagen.dita'],
#data_files = [('ditagen-data', ['../resources/lgpl.txt'])]
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Text Processing :: Markup :: XML',
'Topic :: Software Development :: Code Generators',
],
)
|
{
"content_hash": "71c99208f2ed4bca603b70adabb5b8fe",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 65,
"avg_line_length": 36.96296296296296,
"alnum_prop": 0.6032064128256514,
"repo_name": "jelovirt/dita-generator",
"id": "1da7488729d678f615ededec486c07293925bfc4",
"size": "1671",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5503"
},
{
"name": "HTML",
"bytes": "76618"
},
{
"name": "JavaScript",
"bytes": "34658"
},
{
"name": "Python",
"bytes": "269295"
}
],
"symlink_target": ""
}
|
import requests, os, re
from urllib.parse import urljoin, urlparse
from bs4 import BeautifulSoup
class NHKNewsParser():
nhkTopStoriesJSONUrl = "http://www3.nhk.or.jp/news/json16/accessranking.json"
nhkNewsBaseUrl = "http://www3.nhk.or.jp/news/"
oldNewsFile = "oldnews.txt"
def __init__(self):
# Ensure old news file exists
f = open(self.oldNewsFile, "a")
f.close()
self.nhkTopStories = self.update_articles()
self.filter_oldnews()
print(len(self.nhkTopStories))
# Calls the NHK api and retrieves an array of metadata for top stories.
# The parser will store a copy in self.nhkTopStories on instantiation
def update_articles(self):
response = requests.get(self.nhkTopStoriesJSONUrl)
if response.status_code != 200:
raise ConnectionError("Could not access NHK JSON, HTTP error code " + response.status_code)
items = response.json().get("channel").get("item")
return items
def filter_oldnews(self):
with open(self.oldNewsFile, "r") as f:
oldnews = [line.rstrip('\n') for line in f]
self.nhkTopStories = list(filter(lambda x: self.get_url(x) not in oldnews, self.nhkTopStories))
def get_url(self, story_metadata):
return self.nhkNewsBaseUrl + story_metadata.get("link")
# Gets a full article from story metadata
def get_article(self, story_metadata):
print("Get "+story_metadata.get("title"))
article_data = {}
article_data["title"] = story_metadata.get("title")
if(story_metadata.get("imgPath")):
article_data["image"] = self.nhkNewsBaseUrl + story_metadata.get("imgPath")
if(story_metadata.get("videoPath")):
article_data["video"] = self.nhkNewsBaseUrl + story_metadata.get("videoPath")
article_data["url"] = self.get_url(story_metadata)
article_data["pub_date"] = story_metadata.get("pubDate")
response = requests.get(article_data["url"])
if response.status_code != 200:
raise ConnectionError("Could not access NHK, HTTP error code " + response.status_code)
article_soup = BeautifulSoup(response.content.decode("utf-8"), "html.parser")
for br in article_soup.find_all("br"):
br.replace_with("\n\n")
# Get the basic article content
article_data["summary"] = "".join([div.get_text() for div in article_soup.find_all("div", {"id":"news_textbody"})])
article_data["body"] = "".join([div.get_text() for div in article_soup.find_all("div", {"id": "news_textmore"})])
# Get any additional content that might exist
additional = article_soup.find_all("div", {"class": "news_add"})
adds = []
for add in additional:
build = {}
title = add.find(re.compile('^h[1-6]$'))
if(title):
build["title"] = title.get_text()
build["content"] = "".join([div.get_text() for div in add.find_all("div")])
img = add.find("img")
if(img):
src = img["src"]
if(urlparse(src).netloc != ""):
build["image"] = src
else:
build["image"] = urljoin(article_data["url"], src)
adds.append(build)
article_data["adds"] = adds
with open(self.oldNewsFile, "a") as f:
f.write(article_data["url"] + "\n")
return article_data
# Gets the top num_articles articles
def get_top_articles(self, num_articles):
self.filter_oldnews()
return [self.get_article(x) for x in self.nhkTopStories[:num_articles]]
|
{
"content_hash": "d2494938f380ec68e43256852c53d936",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 123,
"avg_line_length": 36.86138613861386,
"alnum_prop": 0.5928015041633091,
"repo_name": "KinRyuuXIV/NHKNews",
"id": "9fca18725d96eab57cdcd95694f85e405b0a4b66",
"size": "3723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/NHKNewsParser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6168"
}
],
"symlink_target": ""
}
|
from numpy import zeros
from gwlfe.Input.WaterBudget.Percolation import Percolation
from gwlfe.Input.WaterBudget.Percolation import Percolation_f
from gwlfe.Memoization import memoize
try:
from .DeepSeep_inner_compiled import DeepSeep_inner
except ImportError:
print("Unable to import compiled DeepSeep_inner, using slower version")
from gwlfe.Input.WaterBudget.DeepSeep_inner import DeepSeep_inner
@memoize
def GrFlow(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0, Grow_0, CNP_0, Imper,
ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0, RecessionCoef, SeepCoef):
result = zeros((NYrs, 12, 31))
deepseep = zeros((NYrs, 12, 31))
satstor = zeros((NYrs, 12, 31))
percolation = Percolation(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0, Grow_0,
CNP_0,
Imper, ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap)
satstor_carryover = SatStor_0
for Y in range(NYrs):
for i in range(12):
for j in range(DaysMonth[Y][i]):
satstor[Y][i][j] = satstor_carryover
result[Y][i][j] = RecessionCoef * satstor[Y][i][j]
deepseep[Y][i][j] = SeepCoef * satstor[Y][i][j]
satstor[Y][i][j] = satstor[Y][i][j] + percolation[Y][i][j] - result[Y][i][j] - deepseep[Y][i][j]
if satstor[Y][i][j] < 0:
satstor[Y][i][j] = 0
satstor_carryover = satstor[Y][i][j]
return result
@memoize
def GrFlow_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0, Grow_0, CNP_0, Imper,
ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0, RecessionCoef, SeepCoef):
percolation = Percolation_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0, Grow_0,
CNP_0,
Imper, ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap)
return DeepSeep_inner(NYrs, SatStor_0, DaysMonth, RecessionCoef, SeepCoef, percolation)[1]
|
{
"content_hash": "31fd42c22d2c2c64116671e5d6fc12fa",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 117,
"avg_line_length": 50.627906976744185,
"alnum_prop": 0.6159853008727607,
"repo_name": "WikiWatershed/gwlf-e",
"id": "11f8e105876613d70d4eff94148cb13c10eebf9a",
"size": "2177",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "gwlfe/Input/WaterBudget/GrFlow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAMS",
"bytes": "5930291"
},
{
"name": "Python",
"bytes": "775719"
}
],
"symlink_target": ""
}
|
def aggregate(data, dataDict):
magic = data[0]
if validateMagic(magic):
id = data[1]
funcType = data[2]
storageType = data[3]
sequence = data[4]
payloadLength = data[5]
payload = []
print(str(payloadLength))
for x in range(6,payloadLength + 6):
payload.append(data[x])
print("payload %s", payload)
dataRep = dataDict.get(id)
if dataRep is None:
payloads = []
payloads.insert(sequence, payload)
dataRep = {'function' : getFunction(funcType), 'storageType' : storageType, 'payloads' : payloads}
else:
payloads = dataRep['payloads']
payloads.insert(sequence, payload)
dataRep['payloads'] = payloads
dataDict[id] = dataRep
lastByte = payload[len(payload) - 1]
if lastByte == 0x0A:
process(dataDict, id)
else:
print("Incorrect Magic")
def process(dataDict,id):
dataRep = dataDict[id]
#TODO: process data based on function
return True
def validateMagic(magic):
if magic == 0xFA:
return True
return False
def getFunction(funcType):
if funcType == 0x00:
return "Search"
elif funcType == 0x01:
return "Sort"
elif funcType == 0x02:
return "Store"
elif funcType == 0x03:
return "Retrieve"
else:
return "Unknown"
debug = False
if debug:
dataDict = {}
data = [0xFA, 0x01, 0x02, 0x01, 0x01, 0x01, 0x0F]
aggregate(data, dataDict)
print(dataDict)
data = [0xFA, 0x01, 0x02, 0x01, 0x01, 0x01, 0x0B]
aggregate(data, dataDict)
print(dataDict)
data = [0xFA, 0x02, 0x02, 0x01, 0x01, 0x01, 0x0F]
aggregate(data, dataDict)
print(dataDict)
|
{
"content_hash": "d041826c73424a8125f08ca0e6ac17ae",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 110,
"avg_line_length": 27.875,
"alnum_prop": 0.5795964125560538,
"repo_name": "BoxBlue/Box",
"id": "4f6d36bfbdea37d693b18163775808d1bcccb44b",
"size": "1786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3780"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django_unsaved_changes.admin import UnsavedChangesAdmin
from .models import *
class MatchUserInline(admin.TabularInline):
model = MatchUser
extra = 0
autocomplete_lookup_fields = {
'fk': ['user'],
}
raw_id_fields = ('user', )
class MatchAdmin(UnsavedChangesAdmin):
#prepopulated_fields = {"slug": ("title",)}
#inlines = [MatchUserInline]
pass
class MatchUserAdmin(UnsavedChangesAdmin):
autocomplete_lookup_fields = {
'fk': ['match', 'user'],
}
raw_id_fields = ('match', 'user', )
class MatchLogsAdmin(UnsavedChangesAdmin):
autocomplete_lookup_fields = {
'fk': ['match', 'player', 'affected_player', 'logtype'],
}
raw_id_fields = ('match', 'player', 'affected_player', 'logtype', )
class PlayerAdmin(UnsavedChangesAdmin):
autocomplete_lookup_fields = {
'fk': ['current_match'],
}
raw_id_fields = ('current_match', )
class LogTypeAdmin(UnsavedChangesAdmin):
pass
admin.site.register(Match, MatchAdmin)
admin.site.register(MatchUser, MatchUserAdmin)
admin.site.register(MatchLogs, MatchLogsAdmin)
admin.site.register(Player, PlayerAdmin)
admin.site.register(LogType, LogTypeAdmin)
|
{
"content_hash": "f5940738b0ec1f1a9ccf4e5f24d5f457",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 72,
"avg_line_length": 23.58490566037736,
"alnum_prop": 0.6704,
"repo_name": "ninapavlich/celadon",
"id": "f4f2af9e052e02cfd64be8ba18b8fc2986f2cffd",
"size": "1250",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "celadon/apps/celadon/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1172385"
},
{
"name": "HTML",
"bytes": "52123"
},
{
"name": "JavaScript",
"bytes": "86783"
},
{
"name": "Python",
"bytes": "295373"
}
],
"symlink_target": ""
}
|
import random
def computeNextMove(floors, elevators):
numFloors = len(floors)
numElevs = len(elevators)
# the trailing , is required so that python doesn't reduce ((1,2)) to (1,2)
# the C++ code on the other end of this function expects a tuple of tuples
# of size 2, not a tuple of ints.
# write state of input to the file for verification
dataOut = open("pythonScriptData.txt", "a")
dataOut.write( "floors: " + str(floors) + " elevators: " + str(elevators) + "\n" )
dataOut.close()
# return a random dispatch (for now)
return ( ( random.randint(0, numElevs-1),
random.randint(0, numFloors-1) ), )
|
{
"content_hash": "d84067ff2416b0c6af7dd89e2d36efc4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 85,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.6439393939393939,
"repo_name": "maxdeliso/elevatorSim",
"id": "c8a6dbce22db8ce709ea973587c940dd1bb14cb1",
"size": "682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "defaultElevatorAi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "513967"
},
{
"name": "C++",
"bytes": "86522751"
},
{
"name": "Objective-C",
"bytes": "2174"
},
{
"name": "Perl",
"bytes": "6080"
},
{
"name": "PowerShell",
"bytes": "1487"
},
{
"name": "Python",
"bytes": "19028751"
},
{
"name": "Shell",
"bytes": "728"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
def QUIT(parent, r):
"""Close PyBot server"""
parent.run = False
return 'BYE'
def REFRESH(parent, r):
"""Search for new devices"""
parent.robot.refresh()
return ''
def OPEN(parent, r):
"""Open an 'openable' module such as motors, butia.."""
if len(r) == 1:
module = r[0]
return parent.robot.moduleOpen(module)
return ''
def CLOSE(parent, r):
"""Close an 'openable' module such as motors, butia.."""
if len(r) == 1:
module = r[0]
return parent.robot.moduleClose(module)
return ''
def DESCRIBE(parent, r):
"""Get the list of functions and parameters of a module"""
if len(r) == 1:
module = r[0]
return parent.robot.describe(module)
return ''
def BUTIA_COUNT(parent, r):
"""Get the number of boards connected"""
return parent.robot.getButiaCount()
def LISTI(parent, r):
"""Get a list of instanciables modules of the board"""
board = 0
if len(r) >= 1:
board = r[0]
l = parent.robot.getListi(board)
return ','.join(l)
def LIST(parent, r):
"""Get a list of open modules in a board"""
l = parent.robot.getModulesList()
return ','.join(l)
def CLIENTS(parent, r):
"""Get a list of current clients in PyBot server"""
l = []
for c in parent.clients:
addr = parent.clients[c]
l.append(str(addr[0]) + ', ' + str(addr[1]))
return '\n'.join(l)
def CALL(parent, r): #parent es el pybot server, su robot es usb4butia o chotox
"""Call a function of certain module"""
if len(r) >= 2:
split = parent.robot._split_module(r[0])
return parent.robot.callModule(split[1], split[2], split[0], r[1], r[2:])
return ''
def HELP(parent, r):
"""Return a list of commands or the use of specific one"""
a = dir(parent.comms)
l = a[:]
if '__builtins__' in a:
i = a.index('__builtins__')
l = a[:i]
if len(r) == 0:
return ', '.join(l)
else:
com = r[0].upper()
if com in l:
f = getattr(parent.comms, com)
return f.__doc__
return ""
|
{
"content_hash": "356b599688336ac59ac6199241a1b4c6",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 81,
"avg_line_length": 26.810126582278482,
"alnum_prop": 0.5651558073654391,
"repo_name": "nvazquez/Turtlebots",
"id": "f4a5f8934f1613df14a664712e675933896b59a7",
"size": "3099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/butia/pybot/server_functions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11536"
},
{
"name": "C++",
"bytes": "673"
},
{
"name": "Makefile",
"bytes": "1519"
},
{
"name": "Python",
"bytes": "3582442"
},
{
"name": "Shell",
"bytes": "356"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListUserIdentities(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListUserIdentities Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListUserIdentities, self).__init__(temboo_session, '/Library/Zendesk/UserIdentities/ListUserIdentities')
def new_input_set(self):
return ListUserIdentitiesInputSet()
def _make_result_set(self, result, path):
return ListUserIdentitiesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListUserIdentitiesChoreographyExecution(session, exec_id, path)
class ListUserIdentitiesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListUserIdentities
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Email(self, value):
"""
Set the value of the Email input for this Choreo. ((required, string) The email address you use to login to your Zendesk account.)
"""
super(ListUserIdentitiesInputSet, self)._set_input('Email', value)
def set_ID(self, value):
"""
Set the value of the ID input for this Choreo. ((conditional, string) The ID of the user.)
"""
super(ListUserIdentitiesInputSet, self)._set_input('ID', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page number of the results to be returned. Used together with the PerPage parameter to paginate a large set of results.)
"""
super(ListUserIdentitiesInputSet, self)._set_input('Page', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) Your Zendesk password.)
"""
super(ListUserIdentitiesInputSet, self)._set_input('Password', value)
def set_PerPage(self, value):
"""
Set the value of the PerPage input for this Choreo. ((optional, integer) The number of results to return per page. Maximum is 100 and default is 100.)
"""
super(ListUserIdentitiesInputSet, self)._set_input('PerPage', value)
def set_Server(self, value):
"""
Set the value of the Server input for this Choreo. ((required, string) Your Zendesk domain and subdomain (e.g., temboocare.zendesk.com).)
"""
super(ListUserIdentitiesInputSet, self)._set_input('Server', value)
class ListUserIdentitiesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListUserIdentities Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Zendesk.)
"""
return self._output.get('Response', None)
def get_NextPage(self):
"""
Retrieve the value for the "NextPage" output from this Choreo execution. ((integer) The index for the next page of results.)
"""
return self._output.get('NextPage', None)
def get_PreviousPage(self):
"""
Retrieve the value for the "PreviousPage" output from this Choreo execution. ((integer) The index for the previous page of results.)
"""
return self._output.get('PreviousPage', None)
class ListUserIdentitiesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListUserIdentitiesResultSet(response, path)
|
{
"content_hash": "6cef6f0aba5dff3048d01e1d5bf471fc",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 202,
"avg_line_length": 43.956043956043956,
"alnum_prop": 0.685,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "11844007dbb82dc15a259af85ffa4826e6083ac2",
"size": "4866",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/Zendesk/UserIdentities/ListUserIdentities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import types
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"ReparameterizationType",
"FULLY_REPARAMETERIZED",
"NOT_REPARAMETERIZED",
"Distribution",
]
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape",
"batch_shape_tensor",
"cdf",
"covariance",
"cross_entropy",
"entropy",
"event_shape",
"event_shape_tensor",
"kl_divergence",
"log_cdf",
"log_prob",
"log_survival_function",
"mean",
"mode",
"prob",
"sample",
"stddev",
"survival_function",
"variance",
]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
def _convert_to_tensor(value, name=None, preferred_dtype=None):
"""Converts to tensor avoiding an eager bug that loses float precision."""
# TODO(b/116672045): Remove this function.
if (context.executing_eagerly() and preferred_dtype is not None and
(preferred_dtype.is_integer or preferred_dtype.is_bool)):
v = ops.convert_to_tensor(value, name=name)
if v.dtype.is_floating:
return v
return ops.convert_to_tensor(
value, name=name, preferred_dtype=preferred_dtype)
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = tf_inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@tf_export("distributions.ReparameterizationType")
class ReparameterizationType(object):
"""Instances of this class represent how sampling is reparameterized.
Two static instances exist in the distributions library, signifying
one of two possible properties for samples from a distribution:
`FULLY_REPARAMETERIZED`: Samples from the distribution are fully
reparameterized, and straight-through gradients are supported.
`NOT_REPARAMETERIZED`: Samples from the distribution are not fully
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
sample results in a `stop_gradients` call and use policy
gradients / surrogate loss instead.
"""
def __init__(self, rep_type):
self._rep_type = rep_type
def __repr__(self):
return "<Reparameteriation Type: %s>" % self._rep_type
def __eq__(self, other):
"""Determine if this `ReparameterizationType` is equal to another.
Since RepaparameterizationType instances are constant static global
instances, equality checks if two instances' id() values are equal.
Args:
other: Object to compare against.
Returns:
`self is other`.
"""
return self is other
# Fully reparameterized distribution: samples from a fully
# reparameterized distribution support straight-through gradients with
# respect to all parameters.
FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED")
tf_export("distributions.FULLY_REPARAMETERIZED").export_constant(
__name__, "FULLY_REPARAMETERIZED")
# Not reparameterized distribution: samples from a non-
# reparameterized distribution do not support straight-through gradients for
# at least some of the parameters.
NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED")
tf_export("distributions.NOT_REPARAMETERIZED").export_constant(
__name__, "NOT_REPARAMETERIZED")
@six.add_metaclass(_DistributionMeta)
@tf_export("distributions.Distribution")
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Shapes
There are three important concepts associated with TensorFlow Distributions
shapes:
- Event shape describes the shape of a single draw from the distribution;
it may be dependent across dimensions. For scalar distributions, the event
shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is
`[5]`.
- Batch shape describes independent, not identically distributed draws, aka a
"collection" or "bunch" of distributions.
- Sample shape describes independent, identically distributed draws of batches
from the distribution family.
The event shape and the batch shape are properties of a Distribution object,
whereas the sample shape is associated with a specific call to `sample` or
`log_prob`.
For detailed usage examples of TensorFlow Distributions shapes, see
[this tutorial](
https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb)
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
if not name or name[-1] != "/": # `name` is not a name scope
non_unique_name = name or type(self).__name__
with ops.name_scope(non_unique_name) as name:
pass
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name
@property
def _parameters(self):
return self._parameter_dict
@_parameters.setter
def _parameters(self, value):
"""Intercept assignments to self._parameters to avoid reference cycles.
Parameters are often created using locals(), so we need to clean out any
references to `self` before assigning it to an attribute.
Args:
value: A dictionary of parameters to assign to the `_parameters` property.
"""
if "self" in value:
del value["self"]
self._parameter_dict = value
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used:
# `parameters = dict(locals())`.
return {k: v for k, v in self._parameters.items()
if not k.startswith("__") and k != "self"}
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError(
"batch_shape_tensor is not implemented: {}".format(type(self).__name__))
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return tensor_shape.as_shape(self._batch_shape())
def _event_shape_tensor(self):
raise NotImplementedError(
"event_shape_tensor is not implemented: {}".format(type(self).__name__))
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return tensor_shape.as_shape(self._event_shape())
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented: {}".format(
type(self).__name__))
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented: {}".format(
type(self).__name__))
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented: {}".format(
type(self).__name__))
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob"):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented: {}".format(
type(self).__name__))
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented: {}".format(
type(self).__name__))
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError(
"log_survival_function is not implemented: {}".format(
type(self).__name__))
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented: {}".format(
type(self).__name__))
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **kwargs)
except NotImplementedError:
raise original_exception
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented: {}".format(
type(self).__name__))
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented: {}".format(
type(self).__name__))
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _quantile(self, value):
raise NotImplementedError("quantile is not implemented: {}".format(
type(self).__name__))
def _call_quantile(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
return self._quantile(value, **kwargs)
def quantile(self, value, name="quantile"):
"""Quantile function. Aka "inverse cdf" or "percent point function".
Given random variable `X` and `p in [0, 1]`, the `quantile` is:
```none
quantile(p) := x such that P[X <= x] == p
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_quantile(value, name)
def _variance(self):
raise NotImplementedError("variance is not implemented: {}".format(
type(self).__name__))
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError as original_exception:
try:
return math_ops.square(self._stddev())
except NotImplementedError:
raise original_exception
def _stddev(self):
raise NotImplementedError("stddev is not implemented: {}".format(
type(self).__name__))
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError as original_exception:
try:
return math_ops.sqrt(self._variance())
except NotImplementedError:
raise original_exception
def _covariance(self):
raise NotImplementedError("covariance is not implemented: {}".format(
type(self).__name__))
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented: {}".format(
type(self).__name__))
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def _cross_entropy(self, other):
return kullback_leibler.cross_entropy(
self, other, allow_nan_stats=self.allow_nan_stats)
def cross_entropy(self, other, name="cross_entropy"):
"""Computes the (Shannon) cross entropy.
Denote this distribution (`self`) by `P` and the `other` distribution by
`Q`. Assuming `P, Q` are absolutely continuous with respect to
one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon)
cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
other: `tfp.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shanon) cross entropy.
"""
with self._name_scope(name):
return self._cross_entropy(other)
def _kl_divergence(self, other):
return kullback_leibler.kl_divergence(
self, other, allow_nan_stats=self.allow_nan_stats)
def kl_divergence(self, other, name="kl_divergence"):
"""Computes the Kullback--Leibler divergence.
Denote this distribution (`self`) by `p` and the `other` distribution by
`q`. Assuming `p, q` are absolutely continuous with respect to reference
measure `r`, the KL divergence is defined as:
```none
KL[p, q] = E_p[log(p(X)/q(X))]
= -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x)
= H[p, q] - H[p]
```
where `F` denotes the support of the random variable `X ~ p`, `H[., .]`
denotes (Shanon) cross entropy, and `H[.]` denotes (Shanon) entropy.
Args:
other: `tfp.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of the Kullback-Leibler
divergence.
"""
with self._name_scope(name):
return self._kl_divergence(other)
def __str__(self):
return ("tfp.distributions.{type_name}("
"\"{self_name}\""
"{maybe_batch_shape}"
"{maybe_event_shape}"
", dtype={dtype})".format(
type_name=type(self).__name__,
self_name=self.name,
maybe_batch_shape=(", batch_shape={}".format(self.batch_shape)
if self.batch_shape.ndims is not None
else ""),
maybe_event_shape=(", event_shape={}".format(self.event_shape)
if self.event_shape.ndims is not None
else ""),
dtype=self.dtype.name))
def __repr__(self):
return ("<tfp.distributions.{type_name} "
"'{self_name}'"
" batch_shape={batch_shape}"
" event_shape={event_shape}"
" dtype={dtype}>".format(
type_name=type(self).__name__,
self_name=self.name,
batch_shape=self.batch_shape,
event_shape=self.event_shape,
dtype=self.dtype.name))
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32), array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape()[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
|
{
"content_hash": "2dee794fa5ca6c836cb981e15690c18b",
"timestamp": "",
"source": "github",
"line_count": 1292,
"max_line_length": 158,
"avg_line_length": 35.03792569659443,
"alnum_prop": 0.6515938059157481,
"repo_name": "kobejean/tensorflow",
"id": "12fd0393924221c4b39467221a262454a294b7f8",
"size": "45958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/distributions/distribution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49273038"
},
{
"name": "CMake",
"bytes": "195712"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "836009"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41122917"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "466896"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
from alembic import op
from sqlalchemy.orm import sessionmaker
"""remove WRITE_ONLY from service accounts
Revision ID: 0f1576a4e220
Revises: ed70c144ae46
Create Date: 2018-05-09 14:40:13.818624
"""
# revision identifiers, used by Alembic.
revision = '0f1576a4e220'
down_revision = 'ed70c144ae46'
Session = sessionmaker()
def upgrade():
bind = op.get_bind()
session = Session(bind=bind)
session.execute("""DELETE FROM user_roles ur USING roles r
WHERE r.id = ur.role_id AND ur.user_id IN
(SELECT user_id FROM user_roles JOIN roles ON roles.id = role_id
WHERE roles.name = 'service')
AND r.name = 'write_only'""")
def downgrade():
# no value in restoring that state.
pass
# ### end Alembic commands ###
|
{
"content_hash": "85a1007bd68170b34dd37e6a6c68976b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 72,
"avg_line_length": 24,
"alnum_prop": 0.6796875,
"repo_name": "uwcirg/true_nth_usa_portal",
"id": "769e97c6bbd41062241a6122346196893033be11",
"size": "768",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "portal/migrations/versions/0f1576a4e220_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1733344"
},
{
"name": "Dockerfile",
"bytes": "947"
},
{
"name": "HTML",
"bytes": "435596"
},
{
"name": "JavaScript",
"bytes": "588006"
},
{
"name": "Mako",
"bytes": "414"
},
{
"name": "Python",
"bytes": "1837126"
},
{
"name": "Shell",
"bytes": "13976"
},
{
"name": "Vue",
"bytes": "62901"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib.auth.models import User
super_user_email_list = [settings.ADMIN_USER_EMAIL]
super_user_password_list = [settings.ADMIN_USER_PASSWORD]
def create_super_user(email, password):
if User.objects.filter(email=email).count() is 0:
user = User.objects.create_superuser(
username=email, email=email, password=password)
else:
user = User.objects.get(email=email)
return user
# Create super users
super_user_list = []
for i in range(len(super_user_email_list)):
super_user_list.append(create_super_user(
email=super_user_email_list[i], password=super_user_password_list[i]))
|
{
"content_hash": "587c34e65ad54e38a371628e93b98315",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 33.65,
"alnum_prop": 0.711738484398217,
"repo_name": "0PEIN0/drf-logger",
"id": "0ac0bae78ec8da2184d3dfedf9c9fab82c36085f",
"size": "673",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "heroku/drf_logger_init_data_load.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9740"
},
{
"name": "Shell",
"bytes": "617"
}
],
"symlink_target": ""
}
|
import io
import os
import pytest
import six
from anymarkup_core import *
from test import *
class TestSerialize(object):
"""Note: testing serialization is a bit tricky, since serializing dicts can result
in different order of values in serialized string in different runs.
That means that we can't just test whether the serialized string equals to expected
string. To solve this, we rather parse the serialized string back and make sure
that it equals the original structure.
"""
fixtures = os.path.join(os.path.dirname(__file__), 'fixtures')
def _read_decode(self, file):
if isinstance(file, six.string_types):
file = open(file, 'rb')
else:
file.seek(0)
return file.read().decode('utf-8')
@pytest.mark.parametrize(('struct', 'format'), [
(example_as_dict, 'ini'),
(example_as_dict, 'json'),
(example_as_dict, 'json5'),
(toml_example_as_dict, 'toml'),
(example_as_ordered_dict, 'xml'),
(example_as_dict, 'yaml'),
(example_as_ordered_dict, 'yaml'),
])
def test_serialize_basic(self, struct, format):
serialized = serialize(struct, format)
parsed_back = parse(serialized, format)
assert parsed_back == struct
assert type(parsed_back) == type(struct)
def test_serialize_works_with_wb_opened_file(self, tmpdir):
f = os.path.join(str(tmpdir), 'foo.xml')
fhandle = open(f, 'wb+')
serialize(example_as_ordered_dict, 'xml', fhandle)
assert self._read_decode(fhandle) == example_xml
def test_serialize_raises_with_unicode_opened_file(self, tmpdir):
# on Python 2, this can only be simulated with io.open
f = os.path.join(str(tmpdir), 'foo.json')
fhandle = io.open(f, 'w+', encoding='utf-8')
with pytest.raises(AnyMarkupError):
serialize(example_as_dict, 'json', fhandle)
@pytest.mark.parametrize(('struct', 'fmt', 'fname'), [
(example_as_dict, None, 'example.ini'),
(example_as_dict, None, 'example.json'),
(example_as_dict, 'json5', 'example.json5'),
(toml_example_as_dict, 'toml', 'example.toml'),
(example_as_ordered_dict, None, 'example.xml'),
(example_as_dict, None, 'example.yaml'),
(example_as_ordered_dict, None, 'example_ordered.yaml'),
])
def test_serialize_file_basic(self, struct, fmt, fname, tmpdir):
f = os.path.join(str(tmpdir), fname)
serialize_file(struct, f)
parsed_back = parse(self._read_decode(f), fmt)
assert parsed_back == struct
assert type(parsed_back) == type(struct)
def test_serialize_file_format_overrides_extension(self, tmpdir):
f = os.path.join(str(tmpdir), 'foo.ini')
serialize_file(example_as_dict, f, 'json')
assert parse(self._read_decode(f)) == example_as_dict
def test_parse_and_serialize_yaml_multiline_string(self):
# https://github.com/bkabrda/anymarkup-core/issues/1
inp = b'foo: |-\n line1\n line2\n line3\n'
assert serialize(parse(inp), 'yaml') == inp
|
{
"content_hash": "0be0e475eade777f3fd63ff7cd8c126e",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 87,
"avg_line_length": 38.666666666666664,
"alnum_prop": 0.623882503192848,
"repo_name": "bkabrda/anymarkup-core",
"id": "81915611fdfa6181e3366eef9981e82dde0d77fd",
"size": "3156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_serialize.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "34306"
}
],
"symlink_target": ""
}
|
"""
COHORTE Monitor package
Contains all modules and packages specific to the monitor
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
MONITOR_NAME = 'cohorte.internals.monitor'
""" All monitors have the same name """
# ------------------------------------------------------------------------------
SERVICE_STATUS = "cohorte.monitor.status"
""" Specification of the monitor status service """
SERVICE_MONITOR = "cohorte.monitor.core"
""" Specification of the monitor core service """
# ------------------------------------------------------------------------------
__SIGNALS_PLATFORM_PREFIX = "/cohorte/platform"
""" Prefix to all platform signals """
SIGNALS_PLATFORM_PATTERN = "{0}/*".format(__SIGNALS_PLATFORM_PREFIX)
""" Pattern to catch all platform signals """
SIGNAL_STOP_PLATFORM = "{0}/stop".format(__SIGNALS_PLATFORM_PREFIX)
""" Signals requesting the platform to completely stop """
# ######### added by: Bassem D.
SIGNAL_STOP_NODE = "{0}/stopnode".format(__SIGNALS_PLATFORM_PREFIX)
""" Requests the node to stop """
# #########
SIGNAL_PLATFORM_STOPPING = "{0}/platform-stop"\
.format(__SIGNALS_PLATFORM_PREFIX)
"""
Sets the monitors and forkers into platform stopping mode
- sent by monitor or forker, to all monitors and forkers
- no content
"""
# ------------------------------------------------------------------------------
__SIGNALS_ISOLATE_PREFIX = "/cohorte/isolate"
""" Prefix to all isolate signals """
SIGNALS_ISOLATE_PATTERN = "{0}/*".format(__SIGNALS_ISOLATE_PREFIX)
""" Pattern to catch all isolate status signals """
SIGNAL_STOP_ISOLATE = "{0}/stop".format(__SIGNALS_ISOLATE_PREFIX)
""" Requests the isolate to stop """
SIGNAL_ISOLATE_LOST = "{0}/lost".format(__SIGNALS_ISOLATE_PREFIX)
"""
Isolate lost signal:
- sent by the forker
- contains the UID of the lost isolate
"""
SIGNAL_ISOLATE_READY = "{0}/ready".format(__SIGNALS_ISOLATE_PREFIX)
"""
Isolate status: ready
- sent by the isolate itself
- no content
"""
SIGNAL_ISOLATE_STOPPING = "{0}/stopping".format(__SIGNALS_ISOLATE_PREFIX)
"""
Isolate status: stopping (an "isolate lost" signal should follow)
- sent by the isolate itself
- no content
"""
# ------------------------------------------------------------------------------
|
{
"content_hash": "06695504a44a3767a7ca3a078081ad2b",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 80,
"avg_line_length": 30.632653061224488,
"alnum_prop": 0.620253164556962,
"repo_name": "isandlaTech/cohorte-demos",
"id": "0ab6879093c032710f25f86d770663a972a659db",
"size": "3056",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-20141216.234517-57-python-distribution/repo/cohorte/monitor/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "4339"
},
{
"name": "Batchfile",
"bytes": "3349"
},
{
"name": "CSS",
"bytes": "722861"
},
{
"name": "HTML",
"bytes": "267983"
},
{
"name": "Java",
"bytes": "22060"
},
{
"name": "JavaScript",
"bytes": "11127825"
},
{
"name": "Python",
"bytes": "16153349"
},
{
"name": "Shell",
"bytes": "33275"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DataDisplay'
db.create_table('profiles_datadisplay', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=300)),
('subtitle', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('subsubtitle', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('record', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.GeoRecord'], null=True, blank=True)),
('indicator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Indicator'], null=True, blank=True)),
('image', self.gf('sorl.thumbnail.fields.ImageField')(max_length=100)),
('template', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.DataDisplayTemplate'])),
('source', self.gf('django.db.models.fields.TextField')(blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=100, db_index=True)),
))
db.send_create_signal('profiles', ['DataDisplay'])
# Deleting field 'DataDisplayTemplate.template'
db.delete_column('profiles_datadisplaytemplate', 'template')
def backwards(self, orm):
# Deleting model 'DataDisplay'
db.delete_table('profiles_datadisplay')
# Adding field 'DataDisplayTemplate.template'
db.add_column('profiles_datadisplaytemplate', 'template', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True), keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.datadisplay': {
'Meta': {'object_name': 'DataDisplay'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']", 'null': 'True', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'subsubtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDisplayTemplate']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'profiles.datadisplaytemplate': {
'Meta': {'object_name': 'DataDisplayTemplate'},
'display_type': ('django.db.models.fields.CharField', [], {'default': "'STANDARD'", 'max_length': '11'}),
'domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'symmetrical': 'False', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False', 'blank': 'True'}),
'records': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoRecord']", 'symmetrical': 'False', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'subsubtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'profiles.datadomain': {
'Meta': {'object_name': 'DataDomain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'})
},
'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataDomain']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '10'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
'profiles.indicatordata': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'IndicatorData'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data_as_change_from'", 'null': 'True', 'to': "orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data_as_change_to'", 'null': 'True', 'to': "orm['profiles.Time']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']", 'null': 'True'})
},
'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDomain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"})
},
'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']"})
},
'profiles.time': {
'Meta': {'object_name': 'Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
}
}
complete_apps = ['profiles']
|
{
"content_hash": "d66f1acbf5aa55b9dcf5d8c32976d5fd",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 192,
"avg_line_length": 81.1413612565445,
"alnum_prop": 0.5632339656729901,
"repo_name": "216software/Profiles",
"id": "570fdf5750a2541b67746c6203726cd498aac3f3",
"size": "15516",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "communityprofiles/profiles/oldmigrations/0029_add_datadisplay.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "147379"
},
{
"name": "HTML",
"bytes": "385453"
},
{
"name": "JavaScript",
"bytes": "375694"
},
{
"name": "Jinja",
"bytes": "35800"
},
{
"name": "PLpgSQL",
"bytes": "669968"
},
{
"name": "Python",
"bytes": "2903090"
},
{
"name": "Shell",
"bytes": "6185"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="x", parent_name="sankey.node", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
{
"content_hash": "a82ed32d6acb936da3c28af39e3747b1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 77,
"avg_line_length": 35.833333333333336,
"alnum_prop": 0.5953488372093023,
"repo_name": "plotly/python-api",
"id": "11e796ae004cabbcffc7e9c08a3b2edb837963ec",
"size": "430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/sankey/node/_x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from pyhttp import Request, Response, JsonResponse, MsgpackResponse, Cookie, CookieException
import time
class RequestTestCase(TestCase):
def test_instance(self):
request = Request('GET', 'foo/bar', {'hello': 'world', 'aaa': 111}, {'bbb': 222, 'ccc': 333})
self.assertEqual(request.path, 'foo/bar')
self.assertEqual(request.method, 'GET')
self.assertEqual(request.query.all(), {'hello': 'world', 'aaa': 111})
self.assertEqual(request.data.all(), {'bbb': 222, 'ccc': 333})
def test_trim_path(self):
request = Request('GET', '/foo/bar')
self.assertEqual(request.path, 'foo/bar')
request = Request('GET', '/foo/bar/')
self.assertEqual(request.path, 'foo/bar')
request = Request('GET', 'foo/bar/')
self.assertEqual(request.path, 'foo/bar')
request = Request('GET', 'foo/bar')
self.assertEqual(request.path, 'foo/bar')
def test_upper_method(self):
request = Request('POST')
self.assertEqual(request.method, 'POST')
request = Request('post')
self.assertEqual(request.method, 'POST')
request = Request('PoSt')
self.assertEqual(request.method, 'POST')
class ResponseTestCase(TestCase):
def test_default(self):
response = Response()
self.assertEqual(response.get_status_code(), 200)
self.assertEqual(response.get_status_text(), None)
self.assertEqual(response.data, '')
def test_status(self):
response = Response()
response.set_status(404)
self.assertEqual(response.get_status_code(), 404)
self.assertEqual(response.get_status_text(), 'Not Found')
response.set_status(500)
self.assertEqual(response.get_status_code(), 500)
self.assertEqual(response.get_status_text(), 'Internal Server Error')
response.set_status(307, 'Custom message')
self.assertEqual(response.get_status_code(), 307)
self.assertEqual(response.get_status_text(), 'Custom message')
response.set_status(500)
response.set_status(text='Hello World')
self.assertEqual(response.get_status_code(), 500)
self.assertEqual(response.get_status_text(), 'Hello World')
response.set_status(599)
self.assertEqual(response.get_status_code(), 599)
self.assertEqual(response.get_status_text(), '')
self.assertRaises(Exception, response.set_status, 601)
self.assertRaises(Exception, response.set_status, 99)
def test_get_content(self):
response = Response()
response.data = 'FOO'
self.assertEqual('FOO', response.get_content())
class JsonResponseTestCase(TestCase):
def test_content(self):
response = JsonResponse()
response.data = {'foo': 'bar'}
self.assertEqual(response.get_content(), '{"foo": "bar"}')
response.data = None
self.assertEqual(response.get_content(), 'null')
response.data = ''
self.assertEqual(response.get_content(), '""')
class MsgpackResponseTestCase(TestCase):
def test_content(self):
response = MsgpackResponse()
response.data = {5: True, 2: 0}
self.assertEqual(response.get_content(), b'\x82\x02\x00\x05\xc3')
class CookiesTestCase(TestCase):
def test_instantiation_throws_exception_if_cookie_name_contains_invalid_characters(self):
invalid_names = [
'',
',MyName',
';MyName',
' MyName',
'\tMyName',
'\rMyName',
'\nMyName',
'\013MyName',
'\014MyName'
]
for invalid_name in invalid_names:
self.assertRaises(CookieException, Cookie, *(invalid_name, 'bar'))
def test_invalid_expiration(self):
self.assertRaises(CookieException, Cookie, *('MyCookie', 'foo', 'bar'))
def test_get_value(self):
value = 'MyValue'
cookie = Cookie('MyCookie', value)
self.assertEqual(value, cookie.get_value())
def test_get_path(self):
cookie = Cookie('foo', 'bar')
self.assertEqual('/', cookie.get_path())
def test_get_expires_time(self):
cookie = Cookie('foo', 'bar', 3600)
self.assertEqual(3600, cookie.get_expires_time())
def test_get_domain(self):
cookie = Cookie('foo', 'bar', 3600, '/', 'example.com')
self.assertEqual('example.com', cookie.get_domain())
def test_is_secure(self):
cookie = Cookie('foo', 'bar', 3600, '/', 'example.com', True)
self.assertTrue(cookie.is_secure())
def test_is_http_only(self):
cookie = Cookie('foo', 'bar', 3600, '/', 'example.com', False, True)
self.assertTrue(cookie.is_http_only())
def test_cookie_is_not_cleared(self):
cookie = Cookie('foo', 'bar', int(round(time.time()))+3600*24)
self.assertFalse(cookie.is_cleared())
def test_cookie_is_cleared(self):
cookie = Cookie('foo', 'bar', int(round(time.time()))-20)
self.assertTrue(cookie.is_cleared())
def test_to_string(self):
cookie = Cookie('foo', 'bar', 1, '/', 'example.com', True)
self.assertEquals('foo=bar; expires=Thu, 01-Jan-1970 00:00:01 GMT; path=/; domain=example.com; secure; httponly', str(cookie))
cookie = Cookie('foo', '', 1, '/admin/', 'example.com')
self.assertEqual('foo=deleted; expires=Thu, 01-Jan-1970 00:00:00 GMT; path=/admin/; domain=example.com; httponly', str(cookie))
cookie = Cookie('foo', 'bar', 0, '/', '')
self.assertEqual('foo=bar; path=/; httponly', str(cookie))
|
{
"content_hash": "06e6dc0646293e15c6bd05897a053cf4",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 135,
"avg_line_length": 34.266666666666666,
"alnum_prop": 0.6107180756986205,
"repo_name": "felixcarmona/pyhttp",
"id": "6cf421d520147aa031c7bd5ba31d0f96262a82e7",
"size": "5654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyhttp/tests/test_http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14143"
}
],
"symlink_target": ""
}
|
"""
MoinMoin - a xmlrpc server and client for the notification bot
@copyright: 2007 by Karol Nowak <grywacz@gmail.com>
@license: GNU GPL, see COPYING for details.
"""
import logging, xmlrpclib, Queue
from SimpleXMLRPCServer import SimpleXMLRPCServer
from threading import Thread
import jabberbot.commands as cmd
class ConfigurationError(Exception):
def __init__(self, message):
Exception.__init__(self)
self.message = message
def _xmlrpc_decorator(function):
"""A decorator function, which adds some maintenance code
This function takes care of preparing a MultiCall object and
an authentication token, and deleting them at the end.
"""
def wrapped_func(self, command):
# Dummy function, so that the string appears in a .po file
_ = lambda x: x
self.token = None
self.multicall = xmlrpclib.MultiCall(self.connection)
jid = command.jid
if type(jid) is not list:
jid = [jid]
try:
try:
self.get_auth_token(command.jid)
if self.token:
self.multicall.applyAuthToken(self.token)
function(self, command)
self.commands_out.put_nowait(command)
except xmlrpclib.Fault, fault:
msg = _("Your request has failed. The reason is:\n%(error)s")
self.log.error(str(fault))
self.report_error(jid, msg, {'error': fault.faultString})
except xmlrpclib.Error, err:
msg = _("A serious error occurred while processing your request:\n%(error)s")
self.log.error(str(err))
self.report_error(jid, msg, {'error': str(err)})
except Exception, exc:
msg = _("An internal error has occurred, please contact the administrator.")
self.log.critical(str(exc))
self.report_error(jid, msg)
finally:
del self.token
del self.multicall
return wrapped_func
class XMLRPCClient(Thread):
"""XMLRPC Client
It's responsible for performing XMLRPC operations on
a wiki, as inctructed by command objects received from
the XMPP component"""
def __init__(self, config, commands_in, commands_out):
"""A constructor
@param commands_out: an output command queue (to xmpp)
@param commands_in: an input command queue (from xmpp)
"""
Thread.__init__(self)
self.log = logging.getLogger(__name__)
if not config.secret:
error = "You must set a (long) secret string!"
self.log.critical(error)
raise ConfigurationError(error)
self.commands_in = commands_in
self.commands_out = commands_out
self.config = config
self.url = config.wiki_url + "?action=xmlrpc2"
self.connection = self.create_connection()
self.token = None
self.multicall = None
self.stopping = False
self._cmd_handlers = {cmd.GetPage: self.get_page,
cmd.GetPageHTML: self.get_page_html,
cmd.GetPageList: self.get_page_list,
cmd.GetPageInfo: self.get_page_info,
cmd.GetUserLanguage: self.get_language_by_jid,
cmd.Search: self.do_search,
cmd.RevertPage: self.do_revert}
def run(self):
"""Starts the server / thread"""
while True:
if self.stopping:
break
try:
command = self.commands_in.get(True, 2)
self.execute_command(command)
except Queue.Empty:
pass
def stop(self):
"""Stop the thread"""
self.stopping = True
def create_connection(self):
return xmlrpclib.ServerProxy(self.url, allow_none=True, verbose=self.config.verbose)
def execute_command(self, command):
"""Execute commands coming from the XMPP component"""
cmd_name = command.__class__
try:
handler = self._cmd_handlers[cmd_name]
except KeyError:
self.log.debug("No such command: " + cmd_name.__name__)
return
handler(command)
def report_error(self, jid, text, data={}):
"""Reports an internal error
@param jid: Jabber ID that should be informed about the error condition
@param text: description of the error
@param data: dictionary used to substitute strings in translated message
@type data: dict
"""
# Dummy function, so that the string appears in a .po file
_ = lambda x: x
cmddata = {'text': text, 'data': data}
report = cmd.NotificationCommandI18n(jid, cmddata, msg_type=u"chat", async=False)
self.commands_out.put_nowait(report)
def get_auth_token(self, jid):
"""Get an auth token using user's Jabber ID
@type jid: unicode
"""
# We have to use a bare JID
jid = jid.split('/')[0]
token = self.connection.getJabberAuthToken(jid, self.config.secret)
if token:
self.token = token
def warn_no_credentials(self, jid):
"""Warn a given JID that credentials check failed
@param jid: full JID to notify about failure
@type jid: str
"""
# Dummy function, so that the string appears in a .po file
_ = lambda x: x
cmddata = {'text': _("Credentials check failed, you might be unable to see all information.")}
warning = cmd.NotificationCommandI18n([jid], cmddata, async=False)
self.commands_out.put_nowait(warning)
def _get_multicall_result(self, jid):
"""Returns multicall results and issues a warning if there's an auth error
@param jid: a full JID to use if there's an error
@type jid: str
"""
if not self.token:
result = self.multicall()[0]
token_result = u"FAILURE"
else:
token_result, result = self.multicall()
if token_result != u"SUCCESS":
self.warn_no_credentials(jid)
return result
def get_page(self, command):
"""Returns a raw page"""
self.multicall.getPage(command.pagename)
command.data = self._get_multicall_result(command.jid)
get_page = _xmlrpc_decorator(get_page)
def get_page_html(self, command):
"""Returns a html-formatted page"""
self.multicall.getPageHTML(command.pagename)
command.data = self._get_multicall_result(command.jid)
get_page_html = _xmlrpc_decorator(get_page_html)
def get_page_list(self, command):
"""Returns a list of all accesible pages"""
# Dummy function, so that the string appears in a .po file
_ = lambda x: x
cmd_data = {'text': _("This command may take a while to complete, please be patient...")}
info = cmd.NotificationCommandI18n([command.jid], cmd_data, async=False, msg_type=u"chat")
self.commands_out.put_nowait(info)
self.multicall.getAllPages()
command.data = self._get_multicall_result(command.jid)
get_page_list = _xmlrpc_decorator(get_page_list)
def get_page_info(self, command):
"""Returns detailed information about a given page"""
self.multicall.getPageInfo(command.pagename)
command.data = self._get_multicall_result(command.jid)
get_page_info = _xmlrpc_decorator(get_page_info)
def do_search(self, command):
"""Performs a search"""
# Dummy function, so that the string appears in a .po file
_ = lambda x: x
cmd_data = {'text': _("This command may take a while to complete, please be patient...")}
info = cmd.NotificationCommandI18n([command.jid], cmd_data, async=False, msg_type=u"chat")
self.commands_out.put_nowait(info)
c = command
self.multicall.searchPagesEx(c.term, c.search_type, 30, c.case, c.mtime, c.regexp)
command.data = self._get_multicall_result(command.jid)
do_search = _xmlrpc_decorator(do_search)
def do_revert(self, command):
"""Performs a page revert"""
# Dummy function, so that the string appears in a .po file
_ = lambda x: x
self.multicall.revertPage(command.pagename, command.revision)
data = self._get_multicall_result(command.jid)
if type(data) == bool and data:
cmd_data = {'text': _("Page has been reverted.")}
elif isinstance(str, data) or isinstance(unicode, data):
cmd_data = {'text': _("Revert failed: %(reason)s" % {'reason': data})}
else:
cmd_data = {'text': _("Revert failed.")}
info = cmd.NotificationCommandI18n([command.jid], cmd_data, async=False, msg_type=u"chat")
self.commands_out.put_nowait(info)
do_revert = _xmlrpc_decorator(do_revert)
def get_language_by_jid(self, command):
"""Returns language of the a user identified by the given JID"""
server = xmlrpclib.ServerProxy(self.config.wiki_url + "?action=xmlrpc2")
language = "en"
try:
language = server.getUserLanguageByJID(command.jid)
except xmlrpclib.Fault, fault:
self.log.error(str(fault))
except xmlrpclib.Error, err:
self.log.error(str(err))
except Exception, exc:
self.log.critical(str(exc))
command.language = language
self.commands_out.put_nowait(command)
class XMLRPCServer(Thread):
"""XMLRPC Server
It waits for notifications requests coming from wiki,
creates command objects and puts them on a queue for
later processing by the XMPP component
@param commands: an input command queue
"""
def __init__(self, config, commands):
Thread.__init__(self)
self.commands = commands
self.verbose = config.verbose
self.log = logging.getLogger(__name__)
self.config = config
if config.secret:
self.secret = config.secret
else:
error = "You must set a (long) secret string"
self.log.critical(error)
raise ConfigurationError(error)
self.server = None
def run(self):
"""Starts the server / thread"""
self.server = SimpleXMLRPCServer((self.config.xmlrpc_host, self.config.xmlrpc_port))
# Register methods having an "export" attribute as XML RPC functions and
# decorate them with a check for a shared (wiki-bot) secret.
items = self.__class__.__dict__.items()
methods = [(name, func) for (name, func) in items if callable(func)
and "export" in func.__dict__]
for name, func in methods:
self.server.register_function(self.secret_check(func), name)
self.server.serve_forever()
def secret_check(self, function):
"""Adds a check for a secret to a given function
Using this one does not have to worry about checking for the secret
in every XML RPC function.
"""
def protected_func(secret, *args):
if secret != self.secret:
raise xmlrpclib.Fault(1, "You are not allowed to use this bot!")
else:
return function(self, *args)
return protected_func
def send_notification(self, jids, notification):
"""Instructs the XMPP component to send a notification
The notification dict has following entries:
'text' - notification text (REQUIRED)
'subject' - notification subject
'url_list' - a list of dicts describing attached URLs
@param jids: a list of JIDs to send a message to (bare JIDs)
@type jids: a list of str or unicode
@param notification: dictionary with notification data
@type notification: dict
"""
command = cmd.NotificationCommand(jids, notification, async=True)
self.commands.put_nowait(command)
return True
send_notification.export = True
def addJIDToRoster(self, jid):
"""Instructs the XMPP component to add a new JID to its roster
@param jid: a jid to add, this must be a bare jid
@type jid: str or unicode,
"""
command = cmd.AddJIDToRosterCommand(jid)
self.commands.put_nowait(command)
return True
addJIDToRoster.export = True
def removeJIDFromRoster(self, jid):
"""Instructs the XMPP component to remove a JID from its roster
@param jid: a jid to remove, this must be a bare jid
@type jid: str or unicode
"""
command = cmd.RemoveJIDFromRosterCommand(jid)
self.commands.put_nowait(command)
return True
removeJIDFromRoster.export = True
|
{
"content_hash": "a253f25647a6d8c073b5f8747e2320d9",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 102,
"avg_line_length": 32.70126582278481,
"alnum_prop": 0.6021522025238059,
"repo_name": "RealTimeWeb/wikisite",
"id": "94b09b44b24db27ee4735f0b86c3592580f167e5",
"size": "12946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jabberbot/xmlrpcbot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "49395"
},
{
"name": "CSS",
"bytes": "204104"
},
{
"name": "ColdFusion",
"bytes": "142312"
},
{
"name": "Java",
"bytes": "491798"
},
{
"name": "JavaScript",
"bytes": "2107106"
},
{
"name": "Lasso",
"bytes": "23464"
},
{
"name": "Makefile",
"bytes": "4950"
},
{
"name": "PHP",
"bytes": "144585"
},
{
"name": "Perl",
"bytes": "44627"
},
{
"name": "Python",
"bytes": "7647140"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
}
|
import logging
import threading
from ceilometerclient import client as ceilometer_client
from django.conf import settings
from django.utils import datastructures
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
def get_flavor_names(request):
# TODO(lsmola) The flavors can be set per project,
# so it should show only valid ones.
try:
flavors = nova.flavor_list(request, None)
return [f.name for f in flavors]
except Exception:
return ['m1.tiny', 'm1.small', 'm1.medium',
'm1.large', 'm1.xlarge']
def is_iterable(var):
"""Return True if the given is list or tuple."""
return (isinstance(var, (list, tuple)) or
issubclass(var.__class__, (list, tuple)))
def make_query(user_id=None, tenant_id=None, resource_id=None,
user_ids=None, tenant_ids=None, resource_ids=None):
"""Returns query built form given parameters.
This query can be then used for querying resources, meters and
statistics.
:Parameters:
- `user_id`: user_id, has a priority over list of ids
- `tenant_id`: tenant_id, has a priority over list of ids
- `resource_id`: resource_id, has a priority over list of ids
- `user_ids`: list of user_ids
- `tenant_ids`: list of tenant_ids
- `resource_ids`: list of resource_ids
"""
user_ids = user_ids or []
tenant_ids = tenant_ids or []
resource_ids = resource_ids or []
query = []
if user_id:
user_ids = [user_id]
for u_id in user_ids:
query.append({"field": "user_id", "op": "eq", "value": u_id})
if tenant_id:
tenant_ids = [tenant_id]
for t_id in tenant_ids:
query.append({"field": "project_id", "op": "eq", "value": t_id})
if resource_id:
resource_ids = [resource_id]
for r_id in resource_ids:
query.append({"field": "resource_id", "op": "eq", "value": r_id})
return query
class Meter(base.APIResourceWrapper):
"""Represents one Ceilometer meter."""
_attrs = ['name', 'type', 'unit', 'resource_id', 'user_id',
'project_id']
def __init__(self, apiresource):
super(Meter, self).__init__(apiresource)
self._label = self.name
self._description = ""
def augment(self, label=None, description=None):
if label:
self._label = label
if description:
self._description = description
@property
def description(self):
return self._description
@property
def label(self):
return self._label
class Resource(base.APIResourceWrapper):
"""Represents one Ceilometer resource."""
_attrs = ['resource_id', 'source', 'user_id', 'project_id', 'metadata',
'links']
def __init__(self, apiresource, ceilometer_usage=None):
super(Resource, self).__init__(apiresource)
# Save empty strings to IDs rather than None, so it gets
# serialized correctly. We don't want 'None' strings.
self.project_id = self.project_id or ""
self.user_id = self.user_id or ""
self.resource_id = self.resource_id or ""
self._id = "%s__%s__%s" % (self.project_id,
self.user_id,
self.resource_id)
# Meters with statistics data
self._meters = {}
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and self.project_id:
self._tenant = ceilometer_usage.get_tenant(self.project_id)
else:
self._tenant = None
if ceilometer_usage and self.user_id:
self._user = ceilometer_usage.get_user(self.user_id)
else:
self._user = None
self._query = make_query(tenant_id=self.project_id,
user_id=self.user_id,
resource_id=self.resource_id)
@property
def name(self):
name = self.metadata.get("name", None)
display_name = self.metadata.get("display_name", None)
return name or display_name or ""
@property
def id(self):
return self._id
@property
def tenant(self):
return self._tenant
@property
def user(self):
return self._user
@property
def resource(self):
return self.resource_id
@property
def query(self):
return self._query
@property
def meters(self):
return self._meters
def get_meter(self, meter_name):
return self._meters.get(meter_name, None)
def set_meter(self, meter_name, value):
self._meters[meter_name] = value
class ResourceAggregate(Resource):
"""Represents aggregate of more resources together.
Aggregate of resources can be obtain by specifing
multiple ids in one parameter or by not specifying
one parameter.
Or it can be specified by query directly.
Example:
We obtain can have aggregate of resources by specifying
multiple resource_ids in resource_id parameter in init.
Or we can specify only tenant_id, which will return
all resources of that tenant.
"""
def __init__(self, tenant_id=None, user_id=None, resource_id=None,
tenant_ids=None, user_ids=None, resource_ids=None,
ceilometer_usage=None, query=None, identifier=None):
self._id = identifier
self.tenant_id = None
self.user_id = None
self.resource_id = None
# Meters with statistics data
self._meters = {}
if query:
self._query = query
else:
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if (ceilometer_usage and tenant_id):
self.tenant_id = tenant_id
self._tenant = ceilometer_usage.get_tenant(tenant_id)
else:
self._tenant = None
if (ceilometer_usage and user_id):
self.user_id = user_id
self._user = ceilometer_usage.get_user(user_id)
else:
self._user = None
if (resource_id):
self.resource_id = resource_id
self._query = make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id, tenant_ids=tenant_ids,
user_ids=user_ids, resource_ids=resource_ids)
@property
def id(self):
return self._id
class Sample(base.APIResourceWrapper):
"""Represents one Ceilometer sample."""
_attrs = ['counter_name', 'user_id', 'resource_id', 'timestamp',
'resource_metadata', 'source', 'counter_unit', 'counter_volume',
'project_id', 'counter_type', 'resource_metadata']
@property
def instance(self):
display_name = self.resource_metadata.get('display_name', None)
instance_id = self.resource_metadata.get('instance_id', None)
return display_name or instance_id
@property
def name(self):
name = self.resource_metadata.get("name", None)
display_name = self.resource_metadata.get("display_name", None)
return name or display_name or ""
class Statistic(base.APIResourceWrapper):
"""Represents one Ceilometer statistic."""
_attrs = ['period', 'period_start', 'period_end',
'count', 'min', 'max', 'sum', 'avg',
'duration', 'duration_start', 'duration_end']
@memoized
def ceilometerclient(request):
"""Initialization of Ceilometer client."""
endpoint = base.url_for(request, 'metering')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug('ceilometerclient connection created using token "%s" '
'and endpoint "%s"' % (request.user.token.id, endpoint))
return ceilometer_client.Client('2', endpoint,
token=(lambda: request.user.token.id),
insecure=insecure,
cacert=cacert)
def resource_list(request, query=None, ceilometer_usage_object=None):
"""List the resources."""
resources = ceilometerclient(request).\
resources.list(q=query)
return [Resource(r, ceilometer_usage_object) for r in resources]
def sample_list(request, meter_name, query=None):
"""List the samples for this meters."""
samples = ceilometerclient(request).samples.list(meter_name=meter_name,
q=query)
return [Sample(s) for s in samples]
def meter_list(request, query=None):
"""List the user's meters."""
meters = ceilometerclient(request).meters.list(query)
return [Meter(m) for m in meters]
def statistic_list(request, meter_name, query=None, period=None):
"""List of statistics."""
statistics = ceilometerclient(request).\
statistics.list(meter_name=meter_name, q=query, period=period)
return [Statistic(s) for s in statistics]
class ThreadedUpdateResourceWithStatistics(threading.Thread):
"""Multithread wrapper for update_with_statistics method of
resource_usage.
A join logic is placed in process_list class method. All resources
will have its statistics attribute filled in separate threads.
The resource_usage object is shared between threads. Each thread is
updating one Resource.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `resources`: List of Resource or ResourceAggregate object,
that will be filled by statistic data.
- `resource_usage`: Wrapping resource usage object, that holds
all statistics data.
- `meter_names`: List of meter names of the statistics we want.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will be
returned, divided into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the attribute name of the stats.
E.g. (avg, max, min...) If None is given, whole
statistic object is returned,
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
# TODO(lsmola) Can be removed once Ceilometer supports sample-api
# and group-by, so all of this optimization will not be necessary.
# It is planned somewhere to I.
def __init__(self, resource_usage, resource, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
super(ThreadedUpdateResourceWithStatistics, self).__init__()
self.resource_usage = resource_usage
self.resource = resource
self.meter_names = meter_names
self.period = period
self.stats_attr = stats_attr
self.additional_query = additional_query
def run(self):
# Run the job
self.resource_usage.update_with_statistics(self.resource,
meter_names=self.meter_names, period=self.period,
stats_attr=self.stats_attr, additional_query=self.additional_query)
@classmethod
def process_list(cls, resource_usage, resources, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
threads = []
for resource in resources:
# add statistics data into resource
thread = cls(resource_usage, resource, meter_names=meter_names,
period=period, stats_attr=stats_attr,
additional_query=additional_query)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
class CeilometerUsage(object):
"""Represents wrapper of any Ceilometer queries.
One instance of this class should be shared between resources
as this class provides a place where users and tenants are
cached. So there are no duplicate queries to API.
This class also wraps Ceilometer API calls and provides parallel
HTTP calls to API.
This class should also serve as reasonable abstraction, that will
cover huge amount of optimization due to optimization of Ceilometer
service, without changing of the interface.
"""
def __init__(self, request):
self._request = request
# Cached users and tenants.
self._users = {}
self._tenants = {}
def get_user(self, user_id):
"""Returns user fetched form API
Caching the result, so it doesn't contact API twice with the
same query
"""
user = self._users.get(user_id, None)
if not user:
user = keystone.user_get(self._request, user_id)
# caching the user, for later use
self._users[user_id] = user
return user
def preload_all_users(self):
"""Preloads all users into dictionary.
It's more effective to preload all users, rather the fetching many
users by separate API get calls.
"""
users = keystone.user_list(self._request)
# Cache all users on right indexes, this is more effective than to
# obtain large number of users one by one by keystone.user_get
for u in users:
self._users[u.id] = u
def get_tenant(self, tenant_id):
"""Returns tenant fetched form API.
Caching the result, so it doesn't contact API twice with the
same query
"""
tenant = self._tenants.get(tenant_id, None)
if not tenant:
tenant = keystone.tenant_get(self._request, tenant_id)
# caching the tenant for later use
self._tenants[tenant_id] = tenant
return tenant
def preload_all_tenants(self):
"""Preloads all teannts into dictionary.
It's more effective to preload all tenants, rather the fetching many
tenants by separate API get calls.
"""
tenants, more = keystone.tenant_list(self._request)
# Cache all tenants on right indexes, this is more effective than to
# obtain large number of tenants one by one by keystone.tenant_get
for t in tenants:
self._tenants[t.id] = t
def global_data_get(self, used_cls=None, query=None,
with_statistics=False, additional_query=None,
with_users_and_tenants=True):
"""Obtaining a resources for table view.
It obtains resources with statistics data according to declaration
in used_cls class.
:Parameters:
- `user_cls`: Class wrapper for usage data. It acts as wrapper for
settings needed. See the call of this method for
details.
- `query`: Explicit query definition for fetching the resources. If
no query is provided, it takes a default_query from
used_cls. If no default query is provided, it fetches
all the resources and filters them by meters defined
in used_cls.
- `with_statistic`: Define whether statistics data from the meters
defined in used_cls should be fetched.
Can be used to first obtain only the pure
resources, then with the statistics data by
AJAX.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
default_query = used_cls.default_query
query = query or default_query
filter_func = None
def filter_resources(resource):
"""Method for filtering resources by theirs links.rel attr.
The links.rel attributes contains all meters the resource have.
"""
for link in resource.links:
if link['rel'] in used_cls.meters:
return True
return False
if not query:
# Not all resource types can be obtain by query, if there is not
# a query, we are filtering all resources by this function.
filter_func = filter_resources
if with_statistics:
# Will add statistic data into resources.
resources = self.resources_with_statistics(
query,
used_cls.meters,
filter_func=filter_func,
stats_attr=used_cls.stats_attr,
additional_query=additional_query,
with_users_and_tenants=with_users_and_tenants)
else:
# Will load only resources without statistical data.
resources = self.resources(query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
return [used_cls(resource) for resource in resources]
def query_from_object_id(self, object_id):
"""Obtaining a query from resource id.
Query can be then used to identify a resource in resources or meters
API calls. ID is being built in the Resource initializer, or returned
by Datatable into UpdateRow functionality.
"""
try:
tenant_id, user_id, resource_id = object_id.split("__")
except ValueError:
return []
return make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id)
def update_with_statistics(self, resource, meter_names=None, period=None,
stats_attr=None, additional_query=None):
"""Adding statistical data into one Resource or ResourceAggregate.
It adds each statistic of each meter_names into the resource
attributes. Attribute name is the meter name with replaced '.' to '_'.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given a faceted result will be
returned, dividend into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
if not meter_names:
raise ValueError("meter_names and resource must be defined to be"
"able to obtain the statistics.")
# query for identifying one resource in meters
query = resource.query
if additional_query:
if not is_iterable(additional_query):
raise ValueError("Additional query must be list of"
" conditions. See the docs for format.")
query = query + additional_query
# TODO(lsmola) thread for each meter will be probably overkill
# but I should test lets say thread pool with 100 of threads
# and apply it only to this code.
# Though I do expect Ceilometer will support bulk requests,
# so all of this optimization will not be necessary.
for meter in meter_names:
statistics = statistic_list(self._request, meter,
query=query, period=period)
meter = meter.replace(".", "_")
if statistics:
if stats_attr:
# I want to load only a specific attribute
resource.set_meter(
meter,
getattr(statistics[0], stats_attr, None))
else:
# I want a dictionary of all statistics
resource.set_meter(meter, statistics)
else:
resource.set_meter(meter, None)
return resource
def resources(self, query=None, filter_func=None,
with_users_and_tenants=False):
"""Obtaining resources with the query or filter_func.
Obtains resources and also fetch tenants and users associated
with those resources if with_users_and_tenants flag is true.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
if with_users_and_tenants:
ceilometer_usage_object = self
else:
ceilometer_usage_object = None
resources = resource_list(self._request,
query=query, ceilometer_usage_object=ceilometer_usage_object)
if filter_func:
resources = [resource for resource in resources if
filter_func(resource)]
return resources
def resources_with_statistics(self, query=None, meter_names=None,
period=None, filter_func=None,
stats_attr=None, additional_query=None,
with_users_and_tenants=False):
"""Obtaining resources with statistics data inside.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
resources = self.resources(query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
ThreadedUpdateResourceWithStatistics.process_list(self, resources,
meter_names=meter_names, period=period, stats_attr=stats_attr,
additional_query=additional_query)
return resources
def resource_aggregates(self, queries=None):
"""Obtaining resource aggregates with queries.
Representing a resource aggregate by query is a most general way
how to obtain a resource aggregates.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
"""
resource_aggregates = []
for identifier, query in queries.items():
resource_aggregates.append(ResourceAggregate(query=query,
ceilometer_usage=None,
identifier=identifier))
return resource_aggregates
def resource_aggregates_with_statistics(self, queries=None,
meter_names=None, period=None, filter_func=None, stats_attr=None,
additional_query=None):
"""Obtaining resource aggregates with statistics data inside.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
resource_aggregates = self.resource_aggregates(queries)
ThreadedUpdateResourceWithStatistics.process_list(self,
resource_aggregates, meter_names=meter_names, period=period,
stats_attr=stats_attr, additional_query=additional_query)
return resource_aggregates
def diff_lists(a, b):
if not a:
return []
elif not b:
return a
else:
return list(set(a) - set(b))
class Meters(object):
"""Class for listing of available meters
It is listing meters defined in this class that are available
in Ceilometer meter_list.
It is storing information that is not available in Ceilometer, i.e.
label, description.
"""
def __init__(self, request=None, ceilometer_meter_list=None):
# Storing the request.
self._request = request
# Storing the Ceilometer meter list
if ceilometer_meter_list:
self._ceilometer_meter_list = ceilometer_meter_list
else:
try:
self._ceilometer_meter_list = meter_list(request)
except Exception:
self._ceilometer_meter_list = []
exceptions.handle(self._request,
_('Unable to retrieve Ceilometer meter'
'list.'))
# Storing the meters info categorized by their services.
self._nova_meters_info = self._get_nova_meters_info()
self._neutron_meters_info = self._get_neutron_meters_info()
self._glance_meters_info = self._get_glance_meters_info()
self._cinder_meters_info = self._get_cinder_meters_info()
self._swift_meters_info = self._get_swift_meters_info()
self._kwapi_meters_info = self._get_kwapi_meters_info()
# Storing the meters info of all services together.
all_services_meters = (self._nova_meters_info,
self._neutron_meters_info, self._glance_meters_info,
self._cinder_meters_info, self._swift_meters_info,
self._kwapi_meters_info)
self._all_meters_info = {}
for service_meters in all_services_meters:
self._all_meters_info.update(dict([(meter_name, meter_info)
for meter_name,
meter_info in service_meters.items()]))
# Here will be the cached Meter objects, that will be reused for
# repeated listing.
self._cached_meters = {}
def list_all(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names
:Parameters:
- `only_meters`: The list of meter_names we want to show
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=only_meters,
except_meters=except_meters)
def list_nova(self, except_meters=None):
"""Returns a list of meters tied to nova
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._nova_meters_info.keys(),
except_meters=except_meters)
def list_neutron(self, except_meters=None):
"""Returns a list of meters tied to neutron
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._neutron_meters_info.keys(),
except_meters=except_meters)
def list_glance(self, except_meters=None):
"""Returns a list of meters tied to glance
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._glance_meters_info.keys(),
except_meters=except_meters)
def list_cinder(self, except_meters=None):
"""Returns a list of meters tied to cinder
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._cinder_meters_info.keys(),
except_meters=except_meters)
def list_swift(self, except_meters=None):
"""Returns a list of meters tied to swift
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._swift_meters_info.keys(),
except_meters=except_meters)
def list_kwapi(self, except_meters=None):
"""Returns a list of meters tied to kwapi
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._kwapi_meters_info.keys(),
except_meters=except_meters)
def _list(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names
:Parameters:
- `only_meters`: The list of meter_names we want to show
- `except_meters`: The list of meter names we don't want to show
"""
# Get all wanted meter names.
if only_meters:
meter_names = only_meters
else:
meter_names = [meter_name for meter_name
in self._all_meters_info.keys()]
meter_names = diff_lists(meter_names, except_meters)
# Collect meters for wanted meter names.
return self._get_meters(meter_names)
def _get_meters(self, meter_names):
"""Obtain meters based on meter_names
The meters that do not exist in Ceilometer meter list are left out.
:Parameters:
- `meter_names`: A list of meter names we want to fetch.
"""
meters = []
for meter_name in meter_names:
meter = self._get_meter(meter_name)
if meter:
meters.append(meter)
return meters
def _get_meter(self, meter_name):
"""Obtains a meter
Obtains meter either from cache or from Ceilometer meter list
joined with statically defined meter info like label and description.
:Parameters:
- `meter_name`: A meter name we want to fetch.
"""
meter = self._cached_meters.get(meter_name, None)
if not meter:
meter_candidates = [m for m in self._ceilometer_meter_list
if m.name == meter_name]
if meter_candidates:
meter_info = self._all_meters_info.get(meter_name, None)
if meter_info:
label = meter_info["label"]
description = meter_info["description"]
else:
label = ""
description = ""
meter = meter_candidates[0]
meter.augment(label=label, description=description)
self._cached_meters[meter_name] = meter
return meter
def _get_nova_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
meters_info = datastructures.SortedDict([
("instance", {
'label': '',
'description': _("Duration of instance"),
}),
("instance:<type>", {
'label': '',
'description': _("Duration of instance <type> "
"(openstack types)"),
}),
("memory", {
'label': '',
'description': _("Volume of RAM in MB"),
}),
("cpu", {
'label': '',
'description': _("CPU time used"),
}),
("cpu_util", {
'label': '',
'description': _("Average CPU utilization"),
}),
("vcpus", {
'label': '',
'description': _("Number of VCPUs"),
}),
("disk.read.requests", {
'label': '',
'description': _("Number of read requests"),
}),
("disk.write.requests", {
'label': '',
'description': _("Number of write requests"),
}),
("disk.read.bytes", {
'label': '',
'description': _("Volume of reads in B"),
}),
("disk.write.bytes", {
'label': '',
'description': _("Volume of writes in B"),
}),
("disk.root.size", {
'label': '',
'description': _("Size of root disk in GB"),
}),
("disk.ephemeral.size", {
'label': '',
'description': _("Size of ephemeral disk "
"in GB"),
}),
("network.incoming.bytes", {
'label': '',
'description': _("Number of incoming bytes "
"on the network for a VM interface"),
}),
("network.outgoing.bytes", {
'label': '',
'description': _("Number of outgoing bytes "
"on the network for a VM interface"),
}),
("network.incoming.packets", {
'label': '',
'description': _("Number of incoming "
"packets for a VM interface"),
}),
("network.outgoing.packets", {
'label': '',
'description': _("Number of outgoing "
"packets for a VM interface"),
})
])
# Adding flavor based meters into meters_info dict
# TODO(lsmola) this kind of meter will be probably deprecated
# https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then.
for flavor in get_flavor_names(self._request):
name = 'instance:%s' % flavor
meters_info[name] = dict(meters_info["instance:<type>"])
meters_info[name]['description'] = (
_('Duration of instance type %s (openstack flavor)') %
flavor)
# TODO(lsmola) allow to set specific in local_settings. For all meters
# because users can have their own agents and meters.
return meters_info
def _get_neutron_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('network', {
'label': '',
'description': _("Duration of network"),
}),
('network.create', {
'label': '',
'description': _("Creation requests for this network"),
}),
('network.update', {
'label': '',
'description': _("Update requests for this network"),
}),
('subnet', {
'label': '',
'description': _("Duration of subnet"),
}),
('subnet.create', {
'label': '',
'description': _("Creation requests for this subnet"),
}),
('subnet.update', {
'label': '',
'description': _("Update requests for this subnet"),
}),
('port', {
'label': '',
'description': _("Duration of port"),
}),
('port.create', {
'label': '',
'description': _("Creation requests for this port"),
}),
('port.update', {
'label': '',
'description': _("Update requests for this port"),
}),
('router', {
'label': '',
'description': _("Duration of router"),
}),
('router.create', {
'label': '',
'description': _("Creation requests for this router"),
}),
('router.update', {
'label': '',
'description': _("Update requests for this router"),
}),
('ip.floating', {
'label': '',
'description': _("Duration of floating ip"),
}),
('ip.floating.create', {
'label': '',
'description': _("Creation requests for this floating ip"),
}),
('ip.floating.update', {
'label': '',
'description': _("Update requests for this floating ip"),
}),
])
def _get_glance_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('image', {
'label': '',
'description': _("Image existence check"),
}),
('image.size', {
'label': '',
'description': _("Uploaded image size"),
}),
('image.update', {
'label': '',
'description': _("Number of update on the image"),
}),
('image.upload', {
'label': '',
'description': _("Number of upload of the image"),
}),
('image.delete', {
'label': '',
'description': _("Number of delete on the image"),
}),
('image.download', {
'label': '',
'description': _("Image is downloaded"),
}),
('image.serve', {
'label': '',
'description': _("Image is served out"),
}),
])
def _get_cinder_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('volume', {
'label': '',
'description': _("Duration of volume"),
}),
('volume.size', {
'label': '',
'description': _("Size of volume"),
}),
])
def _get_swift_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('storage.objects', {
'label': '',
'description': _("Number of objects"),
}),
('storage.objects.size', {
'label': '',
'description': _("Total size of stored objects"),
}),
('storage.objects.containers', {
'label': '',
'description': _("Number of containers"),
}),
('storage.objects.incoming.bytes', {
'label': '',
'description': _("Number of incoming bytes"),
}),
('storage.objects.outgoing.bytes', {
'label': '',
'description': _("Number of outgoing bytes"),
}),
('storage.api.request', {
'label': '',
'description': _("Number of API requests against swift"),
}),
])
def _get_kwapi_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('energy', {
'label': '',
'description': _("Amount of energy"),
}),
('power', {
'label': '',
'description': _("Power consumption"),
}),
])
|
{
"content_hash": "c65438af6f1719e7b7bfc9b0a80351ce",
"timestamp": "",
"source": "github",
"line_count": 1175,
"max_line_length": 79,
"avg_line_length": 37.20340425531915,
"alnum_prop": 0.5541702886946973,
"repo_name": "zouyapeng/horizon_change",
"id": "c2736fd44ec3bf884e611ff7ebca0ed8c7e25070",
"size": "44260",
"binary": false,
"copies": "4",
"ref": "refs/heads/juno",
"path": "openstack_dashboard/api/ceilometer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2270222"
},
{
"name": "HTML",
"bytes": "427249"
},
{
"name": "JavaScript",
"bytes": "270670"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4048852"
},
{
"name": "Shell",
"bytes": "17483"
}
],
"symlink_target": ""
}
|
import base64
import calendar
import datetime
import re
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.core.exceptions import TooManyFieldsSent
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes
from django.utils.functional import keep_lazy_text
# based on RFC 7232, Appendix C
ETAG_MATCH = re.compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
FIELDS_MATCH = re.compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(k, [str(i) for i in v] if isinstance(v, (list, tuple)) else str(v))
for k, v in query],
doseq
)
def cookie_date(epoch_seconds=None):
"""
Format the time to ensure compatibility with Netscape's cookie standard.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring in base64 for use in URLs. Strip any trailing equal
signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match.group(1) for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, allowed_hosts=None, require_https=False):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (_is_safe_url(url, allowed_hosts, require_https=require_https) and
_is_safe_url(url.replace('\\', '/'), allowed_hosts, require_https=require_https))
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _is_safe_url(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
r.append((name, value))
return r
|
{
"content_hash": "5052948cc182beda83cf275debe9bdd6",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 93,
"avg_line_length": 34.27884615384615,
"alnum_prop": 0.6286115007012623,
"repo_name": "edmorley/django",
"id": "c13f44602bd3347047a621c1cb1251afda2f67eb",
"size": "14260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/utils/http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "202902"
},
{
"name": "JavaScript",
"bytes": "252653"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11837174"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import core
import core.Rpg
from core import utils, config
from models import player, gender, species, saved_game
from core.localisation import _
from core import command, command_factory
import readline
class main:
_debug = False
def __init__(self, debug=False, world=''):
self._engine = core.Rpg.Rpg(debug)
try:
self._engine.initWorld(world)
(saveId, newGame, login) = self._showMainMenu()
self._engine.initSavedGame(saveId)
# new game
if newGame:
(login, genderId, speciesId) = self._interactivePlayerCreation()
self._engine.createPlayer(login, genderId, speciesId)
print(_('PLAYER_CREATION_CONFIRMATION'))
self._engine.initPlayer()
except (KeyboardInterrupt, EOFError):
print("")
return
except BaseException as e:
e = self._engine.renderException(e)
if e is not None:
print(e)
return
self.run()
def _showMainMenu(self):
savedGames = saved_game.saved_game.parseSavedGames(
saved_game.saved_game.loadAll()
)
hasExistingGames = savedGames['has_existing_games']
savedGames = savedGames['saved_games']
newGame = True
choiceGame = 0
if hasExistingGames:
choiceGame = self.choiceMenu(
_('MAIN_MENU_TITLE'), _('CHOICE_QUESTION'),
[_('CHOICE_NEW_GAME'), _('CHOICE_LOAD_GAME')]
)
newGame = (choiceGame == 0)
choiceSave = None
savedGameLogin = None
while choiceSave is None:
choiceSave = self.choiceMenu(
_('SAVED_GAME_MENU_TITLE'),
_('SAVED_GAME_CHOICE_QUESTION'),
[self.formatSavedGameName(s) for s in savedGames]
)
if savedGames[choiceSave]['id_player'] is not None:
savedGameLogin = savedGames[choiceSave]['login']
# new game
# and saved game used
# and no overwrite, let's choose another saved game
if newGame \
and savedGameLogin is not None \
and not self.yesNoQuestion(_('OVERWRITE_SAVEDGAME_QUESTION_{choices}')):
choiceSave = None
savedGameLogin = None
# load game
# and no saved game exists in this slot
elif not newGame \
and savedGameLogin is None:
choiceSave = None
return (
savedGames[choiceSave]['id_saved_game'],
newGame,
savedGameLogin
)
@staticmethod
def formatSavedGameName(s):
if s['id_player'] is None:
return _('EMPTY_SAVED_GAME')
else:
data = {
'login': s['login']
}
return _('SAVED_GAME_INFO_{login}').format(**data)
def _interactivePlayerCreation(self): # pragma: no cover
login = None
while login is None or login == '':
login = utils.read(_('LOGIN_PROMPT'))
if len(player.model.loadBy({'login': login})):
print(_('ERROR_SIGNUP_LOGIN_ALREADY_USED'))
login = None
genders = gender.model.loadAll()
g = self.choiceMenu(
_('GENDER_SELECTION'), _('GENDER_PROMPT'),
[g['name'] for g in genders]
)
genderId = genders[g]['id_gender']
sps = species.model.loadAll()
nbSpecies = len(sps)
if nbSpecies == 1:
speciesIndex = 0
else:
speciesIndex = self.choiceMenu(
_('SPECIES_SELECTION'), _('SPECIES_PROMPT'),
[g['name'] for g in sps]
)
speciesId = sps[speciesIndex]['id_species']
return (login, genderId, speciesId)
def _gameOver(self): # pragma: no cover
print(_('GAME_OVER_TEXT'))
def run(self): # pragma: no cover
'''
Main method of the Rpg Class, will ask the player to enter a command
'''
c = ''
result = 0
while not self._engine.isGameOver():
try:
c = self.readCommand()
except KeyboardInterrupt:
print("")
continue
except EOFError:
print("")
break
if c != "":
self._engine.setAction(main.parseTypedAction(c))
result = self._engine._runAction()
if result == command_factory.quit:
break
elif c != "":
print(result)
print("")
if self._engine.isGameOver():
self._gameOver()
@staticmethod
def parseTypedAction(action):
"""
Method to parse the action typed by the player to detect the action
and the action's arguments
"""
inOption = False
commands, sep, option, optionStart = list(), ' ', '', 0
commandLen = len(action)
for k,i in enumerate(action):
# first letter of the option
if i != ' ' and not inOption:
# Set the start index of the option
optionStart = k
inOption = True
# Set the option delimiter
sep = i if i in ("'", '"') else ' '
if inOption:
# If the current char is the option delimiter, but not the
# stat one
if i == sep and k > optionStart:
# The option is ended
inOption = False
elif i != sep:
option += i
# The option is complete, append it in the list
if not inOption or k == commandLen - 1:
commands.append(str(option))
option = ''
return commands
def readCommand(self): # pragma: no cover
"""
Method to set the autocompleter and run the prompt, from utils
"""
completer = command.completer(
sorted(command_factory.factory.mapping.keys())
)
readline.set_completer(completer.complete)
readline.parse_and_bind('tab: complete')
readline.set_completer_delims('')
return utils.read(_('COMMAND_PROMPT'))
def choiceMenu(self, question, prompt, choices):
print(question)
for k, v in enumerate(choices):
print(str(k + 1).rjust(3) + ' - ' + v)
v = 0
while v <= 0 or v >= len(choices) + 1:
v = utils.read(prompt)
try:
v = int(v)
except:
v = -0
return v - 1
def yesNoQuestion(self, question):
v = None
yesNo = {'yes': _('ANSWER_YES'), 'no': _('ANSWER_NO')}
questionDataFormat = {'choices': '({yes}/{no})'.format(**yesNo)}
while v not in yesNo.values():
v = utils.read(question.format(**questionDataFormat))
return v == _('ANSWER_YES')
|
{
"content_hash": "331918edf0e9b8aa91eabb6709399298",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 76,
"avg_line_length": 24.26839826839827,
"alnum_prop": 0.6476988940420978,
"repo_name": "rrpg/engine",
"id": "9ec2df790c740fefe9d40ced1a2275b962c938c6",
"size": "5631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CLI/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3590"
},
{
"name": "PLpgSQL",
"bytes": "10558"
},
{
"name": "Python",
"bytes": "131475"
},
{
"name": "Shell",
"bytes": "545"
}
],
"symlink_target": ""
}
|
"""
Provides FileStorage implementation for Amazon S3.
This is useful for storing files in S3.
"""
from __future__ import absolute_import
from datetime import datetime
import uuid
from boto.s3.connection import S3Connection
from depot._compat import unicode_text
from .interfaces import FileStorage, StoredFile
from . import utils
CANNED_ACL_PUBLIC_READ = 'public-read'
CANNED_ACL_PRIVATE = 'private'
class S3StoredFile(StoredFile):
def __init__(self, file_id, key):
_check_file_id(file_id)
self._key = key
metadata_info = {'filename': key.get_metadata('x-depot-filename'),
'content_type': key.content_type,
'content_length': key.size,
'last_modified': None}
try:
last_modified = key.get_metadata('x-depot-modified')
if last_modified:
metadata_info['last_modified'] = datetime.strptime(last_modified,
'%Y-%m-%d %H:%M:%S')
except:
pass
super(S3StoredFile, self).__init__(file_id=file_id, **metadata_info)
def read(self, n=-1):
if self.closed:
raise ValueError("cannot read from a closed file")
return self._key.read(n)
def close(self):
self._key.close()
@property
def closed(self):
return self._key.closed
@property
def public_url(self):
# Old boto versions did support neve.
# but latests seems not to https://github.com/boto/boto/blob/develop/boto/s3/connection.py#L390
expires_in = 31536000 # 1 YEAR
return self._key.generate_url(expires_in=expires_in, query_auth=False)
class BucketDriver(object):
def __init__(self, bucket, prefix):
self.bucket = bucket
self.prefix = prefix
def get_key(self, key_name):
return self.bucket.get_key('%s%s' % (self.prefix, key_name))
def new_key(self, key_name):
return self.bucket.new_key('%s%s' % (self.prefix, key_name))
def list_key_names(self):
keys = self.bucket.list(prefix=self.prefix)
return [k.name[len(self.prefix):] for k in keys]
class S3Storage(FileStorage):
""":class:`depot.io.interfaces.FileStorage` implementation that stores files on S3.
All the files are stored inside a bucket named ``bucket`` on ``host`` which Depot
connects to using ``access_key_id`` and ``secret_access_key``.
Additional options include:
* ``host`` which can be used to specify an host different from Amazon
AWS S3 Storage
* ``policy`` which can be used to specify a canned ACL policy of either
``private`` or ``public-read``.
* ``encrypt_key`` which can be specified to use the server side
encryption feature.
* ``prefix`` parameter can be used to store all files under
specified prefix. Use a prefix like **dirname/** (*see trailing slash*)
to store in a subdirectory.
"""
def __init__(self, access_key_id, secret_access_key, bucket=None, host=None,
policy=None, encrypt_key=False, prefix=''):
policy = policy or CANNED_ACL_PUBLIC_READ
assert policy in [CANNED_ACL_PUBLIC_READ, CANNED_ACL_PRIVATE], (
"Key policy must be %s or %s" % (CANNED_ACL_PUBLIC_READ, CANNED_ACL_PRIVATE))
self._policy = policy or CANNED_ACL_PUBLIC_READ
self._encrypt_key = encrypt_key
if bucket is None:
bucket = 'filedepot-%s' % (access_key_id.lower(),)
kw = {}
if host is not None:
kw['host'] = host
self._conn = S3Connection(access_key_id, secret_access_key, **kw)
bucket = self._conn.lookup(bucket) or self._conn.create_bucket(bucket)
self._bucket_driver = BucketDriver(bucket, prefix)
def get(self, file_or_id):
fileid = self.fileid(file_or_id)
_check_file_id(fileid)
key = self._bucket_driver.get_key(fileid)
if key is None:
raise IOError('File %s not existing' % fileid)
return S3StoredFile(fileid, key)
def __save_file(self, key, content, filename, content_type=None):
key.set_metadata('content-type', content_type)
key.set_metadata('x-depot-filename', filename)
key.set_metadata('x-depot-modified', utils.timestamp())
key.set_metadata('Content-Disposition', 'inline; filename="%s"' % filename)
if hasattr(content, 'read'):
can_seek_and_tell = True
try:
pos = content.tell()
content.seek(pos)
except:
can_seek_and_tell = False
if can_seek_and_tell:
key.set_contents_from_file(content, policy=self._policy,
encrypt_key=self._encrypt_key)
else:
key.set_contents_from_string(content.read(), policy=self._policy,
encrypt_key=self._encrypt_key)
else:
if isinstance(content, unicode_text):
raise TypeError('Only bytes can be stored, not unicode')
key.set_contents_from_string(content, policy=self._policy,
encrypt_key=self._encrypt_key)
def create(self, content, filename=None, content_type=None):
content, filename, content_type = self.fileinfo(content, filename, content_type)
new_file_id = str(uuid.uuid1())
key = self._bucket_driver.new_key(new_file_id)
self.__save_file(key, content, filename or 'unknown', content_type)
return new_file_id
def replace(self, file_or_id, content, filename=None, content_type=None):
fileid = self.fileid(file_or_id)
_check_file_id(fileid)
content, filename, content_type = self.fileinfo(content, filename, content_type)
if filename is None:
f = self.get(fileid)
filename = f.filename
content_type = f.content_type
key = self._bucket_driver.get_key(fileid)
self.__save_file(key, content, filename or 'unknown', content_type)
return fileid
def delete(self, file_or_id):
fileid = self.fileid(file_or_id)
_check_file_id(fileid)
k = self._bucket_driver.get_key(fileid)
if k:
k.delete()
def exists(self, file_or_id):
fileid = self.fileid(file_or_id)
_check_file_id(fileid)
k = self._bucket_driver.get_key(fileid)
return k is not None
def list(self):
return self._bucket_driver.list_key_names()
def _check_file_id(file_id):
# Check that the given file id is valid, this also
# prevents unsafe paths.
try:
uuid.UUID('{%s}' % file_id)
except:
raise ValueError('Invalid file id %s' % file_id)
|
{
"content_hash": "7f3c3df258ffdc5c90ed111de07ecd98",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 103,
"avg_line_length": 34.898989898989896,
"alnum_prop": 0.5905933429811867,
"repo_name": "eprikazc/depot",
"id": "10ea97d9a1ebab2e1f1ec7873b09753ee489b45d",
"size": "6910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "depot/io/awss3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131147"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
class BeforeRestartHook(Hook):
def hook(self, env):
self.run_custom_hook('before-START')
if __name__ == "__main__":
BeforeRestartHook().execute()
|
{
"content_hash": "8734b9c27e7e52b94071d390951e3cc0",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 34.17857142857143,
"alnum_prop": 0.7701149425287356,
"repo_name": "alexryndin/ambari",
"id": "2731dd8821752e6386e12d8a30eb7663655eea1c",
"size": "957",
"binary": false,
"copies": "4",
"ref": "refs/heads/branch-adh-1.5",
"path": "ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-RESTART/scripts/hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "44884"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "786184"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "89958"
},
{
"name": "HTML",
"bytes": "2514774"
},
{
"name": "Java",
"bytes": "29565801"
},
{
"name": "JavaScript",
"bytes": "19033151"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "316489"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "17215686"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "33764"
},
{
"name": "SQLPL",
"bytes": "4277"
},
{
"name": "Shell",
"bytes": "886011"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
"""
Normalizes Smart Fantasy Baseball's registry
"""
import datetime
import logging
from .models import Row
def normalize_sfbb(row):
"""Normalizes a single row of SFBB data
Args:
row: `dict`-ified row from SFBB spreadsheet
Returns:
Formatted model (`Model`)
"""
log = logging.getLogger(__name__)
model = Row()
model.src = 'sfbb'
model.name_last = row['LASTNAME']
model.name_first = row['FIRSTNAME']
model.name_full = row['PLAYERNAME']
model.name_lfc = row['LASTCOMMAFIRST']
if row['BIRTHDATE']:
try:
birth_date = datetime.datetime.strptime(row['BIRTHDATE'],
'%m/%d/%Y')
model.birth_date = datetime.date(
birth_date.year,
birth_date.month,
birth_date.day)
except ValueError:
log.exception('Invalid birth date for SFBB: %s %s',
row['IDPLAYER'],
row['BIRTHDATE'])
model.bats = row['BATS']
model.throws = row['THROWS']
model.team = row['TEAM']
model.lg = row['LG']
model.pos = row['POS']
fg_id = row['IDFANGRAPHS']
if fg_id.startswith('sa'):
model.key_fangraphs_minors = fg_id
else:
model.key_fangraphs = fg_id
model.key_mlbam = row['MLBID']
model.key_cbs = row['CBSID']
model.key_retro = row['RETROID']
model.key_bbref = row['BREFID']
model.key_nfbc = row['NFBCID']
model.key_espn = row['ESPNID']
model.key_kffl = row['KFFLNAME']
model.key_davenport = row['DAVENPORTID']
model.key_bpro = row['BPID']
model.key_yahoo = row['YAHOOID']
model.key_rotowire = row['ROTOWIREID']
model.key_fanduel = row['FANDUELID']
model.key_ottoneu = row['OTTONEUID']
return model
|
{
"content_hash": "7b9598d45a3d69b8bb77d4eab16db691",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 69,
"avg_line_length": 26.314285714285713,
"alnum_prop": 0.5618892508143323,
"repo_name": "mattdennewitz/mlb-normalize-player-ids",
"id": "136b3908526929a23b4841901318a8afb759c44e",
"size": "1842",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "normalize_ids/sfbb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7887"
}
],
"symlink_target": ""
}
|
"""
Module for making various web calls. Primarily designed for webhooks and the
like, but also useful for basic http testing.
.. versionadded:: 2015.5.0
"""
import time
import salt.utils.http
from salt.exceptions import CommandExecutionError
def query(url, **kwargs):
"""
.. versionadded:: 2015.5.0
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
raise_error : True
If ``False``, and if a connection cannot be made, the error will be
suppressed and the body of the return will simply be ``None``.
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST \
params='{"key1": "val1", "key2": "val2"}'
salt '*' http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
"""
opts = __opts__.copy()
if "opts" in kwargs:
opts.update(kwargs["opts"])
del kwargs["opts"]
try:
return salt.utils.http.query(url=url, opts=opts, **kwargs)
except Exception as exc: # pylint: disable=broad-except
raise CommandExecutionError(str(exc))
def wait_for_successful_query(url, wait_for=300, **kwargs):
"""
Query a resource until a successful response, and decode the return data
CLI Example:
.. code-block:: bash
salt '*' http.wait_for_successful_query http://somelink.com/ wait_for=160 request_interval=1
"""
starttime = time.time()
while True:
caught_exception = None
result = None
try:
result = query(url=url, **kwargs)
if not result.get("Error") and not result.get("error"):
return result
except Exception as exc: # pylint: disable=broad-except
caught_exception = exc
if time.time() > starttime + wait_for:
if not result and caught_exception:
# workaround pylint bug https://www.logilab.org/ticket/3207
raise caught_exception # pylint: disable=E0702
return result
elif "request_interval" in kwargs:
# Space requests out by delaying for an interval
time.sleep(kwargs["request_interval"])
def update_ca_bundle(target=None, source=None, merge_files=None):
"""
Update the local CA bundle file from a URL
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' http.update_ca_bundle
salt '*' http.update_ca_bundle target=/path/to/cacerts.pem
salt '*' http.update_ca_bundle source=https://example.com/cacerts.pem
If the ``target`` is not specified, it will be pulled from the ``ca_cert``
configuration variable available to the minion. If it cannot be found there,
it will be placed at ``<<FILE_ROOTS>>/cacerts.pem``.
If the ``source`` is not specified, it will be pulled from the
``ca_cert_url`` configuration variable available to the minion. If it cannot
be found, it will be downloaded from the cURL website, using an http (not
https) URL. USING THE DEFAULT URL SHOULD BE AVOIDED!
``merge_files`` may also be specified, which includes a string or list of
strings representing a file or files to be appended to the end of the CA
bundle, once it is downloaded.
CLI Example:
.. code-block:: bash
salt '*' http.update_ca_bundle merge_files=/path/to/mycert.pem
"""
if target is None:
target = __salt__["config.get"]("ca_bundle", None)
if source is None:
source = __salt__["config.get"]("ca_bundle_url", None)
return salt.utils.http.update_ca_bundle(target, source, __opts__, merge_files)
|
{
"content_hash": "58d0ceb4df6f20cfee959cbcde25394d",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 100,
"avg_line_length": 31.088709677419356,
"alnum_prop": 0.6337224383916991,
"repo_name": "saltstack/salt",
"id": "44464eaa11bf211b9a7e3c7eaf5a097dae240679",
"size": "3855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/modules/http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
"""Script for the pyim-align command.
The align command is responsible for extracting genomic reads from the
sequencing data, aligning these reads to the reference genome and extracting
insertion sites from these alignments. The command provides access to several
distinct pipelines, which perform these tasks for different types
of sequencing data.
"""
import argparse
import logging
from pyim.align.aligners import AlignerCommand
logging.basicConfig(
format='[%(asctime)-15s] %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
def main():
"""Main function for pyim-align."""
args = parse_args()
args.command.run(args)
def parse_args():
"""Parses arguments for pyim-align."""
# Setup main parser.
parser = argparse.ArgumentParser(prog='pyim-align')
subparsers = parser.add_subparsers(dest='aligner')
subparsers.required = True
# Register pipelines.
commands = AlignerCommand.available_commands()
for name, command in commands.items():
cmd_parser = subparsers.add_parser(name)
command.configure(cmd_parser)
cmd_parser.set_defaults(command=command)
return parser.parse_args()
if __name__ == '__main__':
main()
|
{
"content_hash": "8b2883b1c7b4450d25e90d1a7e59ea22",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 77,
"avg_line_length": 25.520833333333332,
"alnum_prop": 0.7020408163265306,
"repo_name": "jrderuiter/pyim",
"id": "2aa2c2ed88a47705e0c972c8f81992f891f82b70",
"size": "1225",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/pyim/main/pyim_align.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2585"
},
{
"name": "Python",
"bytes": "97613"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
}
|
import sublime, sublime_plugin;
import os;
try:
# ST3
from ..apis.core import Core
except (ImportError, ValueError):
# ST2
from apis.core import Core
class CreateLayoutCommand(sublime_plugin.TextCommand):
project_dir = None
def run(self, edit):
core = Core()
path = self.view.file_name();
self.project_dir = core.get_project_path(path)
if self.project_dir is not None:
self.view.window().show_input_panel('Enter layout filename.', '_custom_layout.html.erb', self.on_done, None, None)
else:
sublime.active_window().new_file()
def on_done(self, text):
file_path = os.path.join(self.project_dir, text)
f = open(file_path, 'w+')
f.close()
sublime.active_window().open_file(file_path)
|
{
"content_hash": "259c0cb63e5308857633d5c510b6d046",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 126,
"avg_line_length": 32.24,
"alnum_prop": 0.6215880893300249,
"repo_name": "CasperLaiTW/ERBAutocomplete",
"id": "573a5660c0ac4142641efdbf81ce79add7cd6172",
"size": "806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/commands/create_layout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11980"
}
],
"symlink_target": ""
}
|
"""Fichier contenant le contexte éditeur EdtNoms"""
from primaires.interpreteur.editeur import Editeur
class EdtNoms(Editeur):
"""Classe définissant le contexte éditeur 'noms'.
Ce contexte permet d'éditer les noms et états d'un état de bonhomme
de neige.
"""
def __init__(self, pere, objet=None, attribut=None):
"""Constructeur de l'éditeur"""
Editeur.__init__(self, pere, objet, attribut)
self.opts.echp_sp_cars = False
self.ajouter_option("n", self.opt_nom_singulier)
self.ajouter_option("e", self.opt_etat_singulier)
self.ajouter_option("s", self.opt_nom_pluriel)
self.ajouter_option("p", self.opt_etat_pluriel)
def accueil(self):
"""Message d'accueil"""
etat = self.objet
ret = "Options :\n"
ret += " - |cmd|/n <nom singulier avec déterminant>|ff| : |bc|"
ret += etat.nom_singulier + "|ff|\n"
ret += " - |cmd|/s <nom pluriel sans déterminant>|ff| : |bc|"
ret += etat.nom_pluriel + "|ff|\n"
ret += " - |cmd|/e <état singulier>|ff| : |bc|"
ret += etat.etat_singulier + "|ff|\n"
ret += " - |cmd|/p <état pluriel>|ff| : |bc|"
ret += etat.etat_pluriel + "|ff|\n"
return ret
def opt_nom_singulier(self, arguments):
"""Change le nom singulier de l'état"""
self.objet.nom_singulier = arguments
self.actualiser()
def opt_etat_singulier(self, arguments):
"""Change l'état singulier de l'état"""
self.objet.etat_singulier = arguments
self.actualiser()
def opt_nom_pluriel(self, arguments):
"""Change le nom pluriel de l'état"""
self.objet.nom_pluriel = arguments
self.actualiser()
def opt_etat_pluriel(self, arguments):
"""Change l'état pluriel de l'état"""
self.objet.etat_pluriel = arguments
self.actualiser()
|
{
"content_hash": "af6edfdbf7829434fb4785e97625bb8f",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 71,
"avg_line_length": 35.345454545454544,
"alnum_prop": 0.581275720164609,
"repo_name": "stormi/tsunami",
"id": "68129a133d50fa34da53a7a8d5f5c80e2f8f7e66",
"size": "3526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/salle/editeurs/sbedit/edt_noms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
from unittest import skipIf
from django.db import models
from django.test import TestCase
from django.test.utils import isolate_apps
from django.utils import six
from .models import Article, InternationalArticle
class SimpleTests(TestCase):
@skipIf(six.PY3, "tests a __str__ method returning unicode under Python 2")
def test_basic(self):
a = Article.objects.create(
headline=b'Parrot programs in Python',
pub_date=datetime.datetime(2005, 7, 28)
)
self.assertEqual(str(a), str('Parrot programs in Python'))
self.assertEqual(repr(a), str('<Article: Parrot programs in Python>'))
def test_international(self):
a = InternationalArticle.objects.create(
headline='Girl wins €12.500 in lottery',
pub_date=datetime.datetime(2005, 7, 28)
)
if six.PY3:
self.assertEqual(str(a), 'Girl wins €12.500 in lottery')
else:
# On Python 2, the default str() output will be the UTF-8 encoded
# output of __unicode__() -- or __str__() when the
# python_2_unicode_compatible decorator is used.
self.assertEqual(str(a), b'Girl wins \xe2\x82\xac12.500 in lottery')
@isolate_apps('str')
def test_defaults(self):
"""
The default implementation of __str__ and __repr__ should return
instances of str.
"""
class Default(models.Model):
pass
obj = Default()
# Explicit call to __str__/__repr__ to make sure str()/repr() don't
# coerce the returned value.
self.assertIsInstance(obj.__str__(), str)
self.assertIsInstance(obj.__repr__(), str)
self.assertEqual(str(obj), str('Default object'))
self.assertEqual(repr(obj), str('<Default: Default object>'))
|
{
"content_hash": "7349f0f648a78a406ac565118d3d1e38",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 80,
"avg_line_length": 36.62264150943396,
"alnum_prop": 0.6027820710973725,
"repo_name": "yephper/django",
"id": "917887e0be7ee0439961e5b2c7f43e85ce3ab5e5",
"size": "1970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/str/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
}
|
import datetime
import re
from django import forms
from django.core.exceptions import ValidationError
from django.forms.widgets import MultiWidget, NumberInput
__all__ = ("DayMonthYearWidget",)
RE_DATE = re.compile(r"(\d{4})-(\d\d?)-(\d\d?)$")
class DayMonthYearWidget(MultiWidget):
template_name = "leaflets/includes/dc_date_widget.html"
def __init__(self, attrs=None):
self.widgets = [
forms.NumberInput(attrs={"label": "Day", "size": 2}),
forms.NumberInput(attrs={"label": "Month", "size": 2}),
forms.NumberInput(attrs={"label": "Year", "size": 4}),
]
super(MultiWidget, self).__init__(attrs)
def decompress(self, value):
if not value:
return []
return value
class DCDateField(forms.MultiValueField):
widget = DayMonthYearWidget
def __init__(self, *args, **kwargs):
error_messages = {}
fields = (
forms.CharField(max_length=2),
forms.CharField(max_length=2),
forms.CharField(max_length=4),
)
super().__init__(
error_messages=error_messages,
fields=fields,
require_all_fields=True,
**kwargs
)
self.field_class = "form-date"
def compress(self, data_list):
if not data_list:
return
data_list = list(data_list)
data_list.reverse()
return datetime.datetime(*map(int, data_list))
def clean(self, *args, **kwargs):
try:
super().clean(*args, **kwargs)
return self.compress(*args)
except ValueError as e:
raise ValidationError(e)
|
{
"content_hash": "02def82822feb470a5dfc5923d17e389",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 67,
"avg_line_length": 26.714285714285715,
"alnum_prop": 0.5704099821746881,
"repo_name": "DemocracyClub/electionleaflets",
"id": "bb0ba49baa3c2ad14d4372587e8a19343b688c14",
"size": "1683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electionleaflets/apps/leaflets/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "7910"
},
{
"name": "HTML",
"bytes": "92760"
},
{
"name": "JavaScript",
"bytes": "5712"
},
{
"name": "Makefile",
"bytes": "2940"
},
{
"name": "Python",
"bytes": "194406"
},
{
"name": "SCSS",
"bytes": "12241"
}
],
"symlink_target": ""
}
|
import se34euca
from se34euca.testcase.testcase_ip_address import testcase_ip_address
class IpAddress(se34euca.TestRunner):
# default test case, if no provided
testcase = "allocate_ip_address"
# class to use for running tests
testclass = testcase_ip_address
if __name__ == "__main__":
IpAddress().start_test()
|
{
"content_hash": "552897a57d993999e843950b14143b2d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 69,
"avg_line_length": 23.928571428571427,
"alnum_prop": 0.7074626865671642,
"repo_name": "eucalyptus/se34euca",
"id": "9dcac277b848901b794688432359332384a6084f",
"size": "354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "se34euca/runtest_ip_address.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "50463"
},
{
"name": "Python",
"bytes": "282952"
}
],
"symlink_target": ""
}
|
import discord
import asyncio
import padshitpostlib
import padmathlib
import padphysicslib
import padloglib
import padgamelib
class Main:
bot = discord.Client()
shitpost = padshitpostlib.Shitpost
math = padmathlib.Mathematics
physics = padphysicslib.Physics
game = padgamelib.Game
logger = padloglib.Printer
def log_string(command, author, author_id, channel, server_name):
return command + ' command engaged by ' + str(author) + ' (ID: ' + str(author_id) + ') in #' + str(channel) + ' @ ' + str(server_name)
@bot.event
async def on_message(message):
try:
if message.content.startswith('$about'):
string = Main.general.about()
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('$about', message.author, message.author.id, message.channel, message.server))
if message.content.startswith('$checkem'):
string = Main.shitpost.check_em()
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('$checkem', message.author, message.author.id, message.channel, message.server))
if message.content.startswith('$daisy'):
string = Main.shitpost.daisy()
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('$daisy', message.author, message.author.id, message.channel, message.server))
if 'big guy' in message.content:
string = Main.shitpost.big_guy()
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('big guy', message.author, message.author.id, message.channel, message.server))
if message.content.startswith('garlic'):
string = Main.shitpost.garlic()
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('garlic', message.author, message.author.id, message.channel, message.server))
if message.content.startswith('anakin'):
string = Main.shitpost.anakin()
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('anakin', message.author, message.author.id, message.channel, message.server))
if message.content.startswith("who's your daddy?"):
string = Main.shitpost.daddy()
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string("who's your daddy?", message.author, message.author.id, message.channel, message.server))
if message.content.startswith('weed'):
string = Main.shitpost.weed()
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('weed', message.author, message.author.id, message.channel, message.server))
if message.content.startswith('lol'):
string = Main.shitpost.lol()
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('lol', message.author, message.author.id, message.channel, message.server))
if message.content.startswith('$rand32') or '$random' in message.content:
string = Main.math.generate_32bit()
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('$rand32/$random', message.author, message.author.id, message.channel, message.server))
if message.content.startswith('$rand64'):
string = Main.math.generate_64bit()
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('$rand64', message.author, message.author.id, message.channel, message.server))
if message.content.startswith('$sin'):
number = message.content[len('$sin'):].strip()
string = Main.math.get_sin(number)
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('$sin', message.author, message.author.id, message.channel, message.server))
if message.content.startswith('$cos'):
number = message.content[len('$cos'):].strip()
string = Main.math.get_cos(number)
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('$cos', message.author, message.author.id, message.channel, message.server))
if message.content.startswith('$tan'):
number = message.content[len('$tan'):].strip()
string = Main.math.get_tan(number)
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('$tan', message.author, message.author.id, message.channel, message.server))
if message.content.startswith('$weight'):
number = message.content[len('$weight'):].strip()
string = str(Main.physics.get_weight(number))
await Main.bot.send_message(message.channel, string)
Main.logger.say(Main.log_string('$weight', message.author, message.author.id, message.channel, message.server))
except Exception as e:
Main.logger.say(str(e))
await Main.bot.send_message(message.channel, 'Whoops, something went wrong. Tell the bot owner to check the logs for more information!')
@bot.event
async def on_ready():
await Main.bot.change_presence(game=discord.Game(name='PadmeBot v2.1 alpha test'))
Main.logger.say('PadmeBot is ready!')
def main():
Main.logger.say('Starting PadmeBot.....')
token_file = open('cred.txt', 'r')
cred = token_file.read()
token_file.close()
try:
Main.bot.run(str(cred))
cred = ''
except discord.errors.LoginFailure:
error_string = 'Login failed. Your token is either invalid or empty. Please check cred.txt if your token is valid or not empty'
Main.logger.say(error_string)
quit()
Main.main()
|
{
"content_hash": "2647590822ba10f919a8dad4d8a12bf1",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 148,
"avg_line_length": 50,
"alnum_prop": 0.6182539682539683,
"repo_name": "georgyorgy1/padmebot_v2",
"id": "8f5b15661e76fc43f19b4fc28c603e993d57e5ec",
"size": "6395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12064"
}
],
"symlink_target": ""
}
|
"""Remove resource record."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('record_id')
@environment.pass_env
def cli(env, record_id):
"""Remove resource record."""
manager = SoftLayer.DNSManager(env.client)
if not (env.skip_confirmations or formatting.no_going_back('yes')):
raise exceptions.CLIAbort("Aborted.")
manager.delete_record(record_id)
|
{
"content_hash": "0c7d21c008a977ddf01d3f22fa1fc8a3",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 25.73913043478261,
"alnum_prop": 0.7415540540540541,
"repo_name": "softlayer/softlayer-python",
"id": "5d538d55ccc24f9a2e9ede2466a1ed2f3003ddbf",
"size": "592",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "SoftLayer/CLI/dns/record_remove.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "854"
},
{
"name": "Makefile",
"bytes": "7458"
},
{
"name": "Python",
"bytes": "2657752"
}
],
"symlink_target": ""
}
|
"""
pyexcel_io.database.common
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Common classes shared among database importers and exporters
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
from pyexcel_io.book import BookReader
class DbExporter(BookReader):
""" Transcode the book reader interface to db interface """
def open(self, file_name, **keywords):
self.export_tables(self, file_name, **keywords)
def open_stream(self, file_stream, **keywords):
self.export_tables(self, file_stream, **keywords)
def open_content(self, file_content, **keywords):
self.export_tables(file_content, **keywords)
def export_tables(self, exporter, **keywords):
""" read database tables """
raise NotImplementedError("Please implement this method")
class DjangoModelExportAdapter(object):
""" django export parameter holder """
def __init__(self, model, export_columns=None):
self.model = model
self.export_columns = export_columns
@property
def name(self):
""" get database table name """
return self.get_name()
def get_name(self):
""" get database table name """
return self.model._meta.model_name
class DjangoModelImportAdapter(DjangoModelExportAdapter):
""" parameter holder for django data import """
class InOutParameter(object):
""" local class to manipulate variable io """
def __init__(self):
self.output = None
self.input = None
def __init__(self, model):
DjangoModelExportAdapter.__init__(self, model)
self.__column_names = self.InOutParameter()
self.__column_name_mapping_dict = self.InOutParameter()
self.__row_initializer = self.InOutParameter()
self._process_parameters()
@property
def row_initializer(self):
""" contructor for a database table entry """
return self.__row_initializer.output
@property
def column_names(self):
""" the desginated database column names """
return self.__column_names.output
@property
def column_name_mapping_dict(self):
""" if not the same, a mapping dictionary is looked up"""
return self.__column_name_mapping_dict.output
@row_initializer.setter
def row_initializer(self, a_function):
""" set the contructor """
self.__row_initializer.input = a_function
self._process_parameters()
@column_names.setter
def column_names(self, column_names):
""" set the column names """
self.__column_names.input = column_names
self._process_parameters()
@column_name_mapping_dict.setter
def column_name_mapping_dict(self, mapping_dict):
""" set the mapping dict """
self.__column_name_mapping_dict.input = mapping_dict
self._process_parameters()
def _process_parameters(self):
if self.__row_initializer.input is None:
self.__row_initializer.output = None
else:
self.__row_initializer.output = self.__row_initializer.input
if isinstance(self.__column_name_mapping_dict.input, list):
self.__column_names.output = self.__column_name_mapping_dict.input
self.__column_name_mapping_dict.output = None
elif isinstance(self.__column_name_mapping_dict.input, dict):
if self.__column_names.input:
self.__column_names.output = [
self.__column_name_mapping_dict.input[name]
for name in self.__column_names.input]
self.__column_name_mapping_dict.output = None
if self.__column_names.output is None:
self.__column_names.output = self.__column_names.input
class DjangoModelExporter(object):
""" public interface for django model export """
def __init__(self):
self.adapters = []
def append(self, import_adapter):
""" store model parameter for more than one model """
self.adapters.append(import_adapter)
class DjangoModelImporter(object):
""" public interface for django model import """
def __init__(self):
self.__adapters = {}
def append(self, import_adapter):
""" store model parameter for more than one model """
self.__adapters[import_adapter.get_name()] = import_adapter
def get(self, name):
""" get a parameter out """
return self.__adapters.get(name, None)
class SQLTableExportAdapter(DjangoModelExportAdapter):
""" parameter holder for sql table data export """
def __init__(self, model, export_columns=None):
DjangoModelExportAdapter.__init__(self, model, export_columns)
self.table = model
def get_name(self):
return getattr(self.table, '__tablename__', None)
class SQLTableImportAdapter(DjangoModelImportAdapter):
""" parameter holder for sqlalchemy table import """
def __init__(self, model):
DjangoModelImportAdapter.__init__(self, model)
self.table = model
def get_name(self):
return getattr(self.table, '__tablename__', None)
class SQLTableExporter(DjangoModelExporter):
""" public interface for sql table export """
def __init__(self, session):
DjangoModelExporter.__init__(self)
self.session = session
class SQLTableImporter(DjangoModelImporter):
""" public interface to do data import via sqlalchemy """
def __init__(self, session):
DjangoModelImporter.__init__(self)
self.session = session
|
{
"content_hash": "ee4d06fa729bd59b22169ebea23b6222",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 78,
"avg_line_length": 33.704819277108435,
"alnum_prop": 0.6328865058087578,
"repo_name": "caspartse/QQ-Groups-Spider",
"id": "afe0c39a94a4076439161215df8f32bfac5d1be5",
"size": "5595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor/pyexcel_io/database/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "157970"
},
{
"name": "Python",
"bytes": "10416"
},
{
"name": "Smarty",
"bytes": "9490"
}
],
"symlink_target": ""
}
|
import json
blob = json.load(open('kylewm.com.json'))
from config import Configuration
Configuration.SQLALCHEMY_DATABASE_URI = 'postgres:///redwind'
from redwind import db
from redwind.importer import *
db.drop_all()
db.create_all()
import_all(blob)
|
{
"content_hash": "d0033b745dd0b2a947e64c6868cf5fe3",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 61,
"avg_line_length": 25.1,
"alnum_prop": 0.7729083665338645,
"repo_name": "thedod/redwind",
"id": "b2fab19d0301823e116d02d5add266780998ce68",
"size": "251",
"binary": false,
"copies": "3",
"ref": "refs/heads/deployment",
"path": "scripts/doimport.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "18996"
},
{
"name": "JavaScript",
"bytes": "40212"
},
{
"name": "Python",
"bytes": "178084"
}
],
"symlink_target": ""
}
|
DEBUG = True
ROOT_URLCONF = 'tests.urls'
SECRET_KEY = 'Guard of the Citadel indeed.'
|
{
"content_hash": "b9fdace47deab3d85a5726fb2fd9f62b",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 43,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.7176470588235294,
"repo_name": "Visgean/urljects",
"id": "31b2a11730ec191a04e9cc6d85c556881aa83bdf",
"size": "144",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1671"
},
{
"name": "Python",
"bytes": "17170"
}
],
"symlink_target": ""
}
|
"""Parse CLI text products
The CLI report has lots of good data that is hard to find in other products,
so we take what data we find in this product and overwrite the database
storage of what we got from the automated observations
"""
# Local
from pywwa.workflows.cli_parser import main
if __name__ == "__main__":
# Do Stuff
main()
|
{
"content_hash": "a0a9048177e3137d0b7f3576953c46ff",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 76,
"avg_line_length": 26.384615384615383,
"alnum_prop": 0.7201166180758017,
"repo_name": "akrherz/pyWWA",
"id": "133515f703a20d386b810abe6526b37f61f35f38",
"size": "343",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parsers/cli_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "207659"
},
{
"name": "Shell",
"bytes": "4472"
}
],
"symlink_target": ""
}
|
def parse_nick_modes(mode_string):
"""Parse a nick mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is "+" or "-". The argument is
always None.
Example:
>>> parse_nick_modes("+ab-c")
[['+', 'a', None], ['+', 'b', None], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "")
def parse_channel_modes(mode_string):
"""Parse a channel mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is "+" or "-". The argument is
None if mode isn't one of "b", "k", "l", "v", "o", "h", or "q".
Example:
>>> parse_channel_modes("+ab-c foo")
[['+', 'a', None], ['+', 'b', 'foo'], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "bklvohq")
def _parse_modes(mode_string, unary_modes=""):
"""
Parse the mode_string and return a list of triples.
If no string is supplied return an empty list.
>>> _parse_modes('')
[]
If no sign is supplied, return an empty list.
>>> _parse_modes('ab')
[]
Discard unused args.
>>> _parse_modes('+a foo bar baz')
[['+', 'a', None]]
Return none for unary args when not provided
>>> _parse_modes('+abc foo', unary_modes='abc')
[['+', 'a', 'foo'], ['+', 'b', None], ['+', 'c', None]]
This function never throws an error:
>>> import random
>>> import six
>>> unichr = chr if six.PY3 else unichr
>>> def random_text(min_len = 3, max_len = 80):
... len = random.randint(min_len, max_len)
... chars_to_choose = [unichr(x) for x in range(0,1024)]
... chars = (random.choice(chars_to_choose) for x in range(len))
... return ''.join(chars)
>>> def random_texts(min_len = 3, max_len = 80):
... while True:
... yield random_text(min_len, max_len)
>>> import itertools
>>> texts = itertools.islice(random_texts(), 1000)
>>> set(type(_parse_modes(text)) for text in texts) == set([list])
True
"""
# mode_string must be non-empty and begin with a sign
if not mode_string or not mode_string[0] in '+-':
return []
modes = []
parts = mode_string.split()
mode_part, args = parts[0], parts[1:]
for ch in mode_part:
if ch in "+-":
sign = ch
continue
arg = args.pop(0) if ch in unary_modes and args else None
modes.append([sign, ch, arg])
return modes
|
{
"content_hash": "e24f7995b41f5107a3fa901e24914e75",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 72,
"avg_line_length": 29.642857142857142,
"alnum_prop": 0.5522088353413654,
"repo_name": "tcoppi/scrappy",
"id": "09470e26607e784600f76a1829b5ba67d1b5396f",
"size": "2490",
"binary": false,
"copies": "21",
"ref": "refs/heads/master",
"path": "irclib/modes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "241000"
},
{
"name": "Shell",
"bytes": "31"
}
],
"symlink_target": ""
}
|
"""
IPython extension: autoreload modules before executing the next line
Try::
%autoreload?
for documentation.
"""
# Pauli Virtanen <pav@iki.fi>, 2008.
# Thomas Heller, 2000.
#
# This IPython module is written by Pauli Virtanen, based on the autoreload
# code by Thomas Heller.
#------------------------------------------------------------------------------
# Autoreload functionality
#------------------------------------------------------------------------------
import time, os, threading, sys, types, imp, inspect, traceback, atexit
import weakref
def _get_compiled_ext():
"""Official way to get the extension of compiled files (.pyc or .pyo)"""
for ext, mode, typ in imp.get_suffixes():
if typ == imp.PY_COMPILED:
return ext
PY_COMPILED_EXT = _get_compiled_ext()
class ModuleReloader(object):
failed = {}
"""Modules that failed to reload: {module: mtime-on-failed-reload, ...}"""
modules = {}
"""Modules specially marked as autoreloadable."""
skip_modules = {}
"""Modules specially marked as not autoreloadable."""
check_all = True
"""Autoreload all modules, not just those listed in 'modules'"""
old_objects = {}
"""(module-name, name) -> weakref, for replacing old code objects"""
def check(self, check_all=False):
"""Check whether some modules need to be reloaded."""
if check_all or self.check_all:
modules = sys.modules.keys()
else:
modules = self.modules.keys()
for modname in modules:
m = sys.modules.get(modname, None)
if modname in self.skip_modules:
continue
if not hasattr(m, '__file__'):
continue
if m.__name__ == '__main__':
# we cannot reload(__main__)
continue
filename = m.__file__
dirname = os.path.dirname(filename)
path, ext = os.path.splitext(filename)
if ext.lower() == '.py':
ext = PY_COMPILED_EXT
filename = os.path.join(dirname, path + PY_COMPILED_EXT)
if ext != PY_COMPILED_EXT:
continue
try:
pymtime = os.stat(filename[:-1]).st_mtime
if pymtime <= os.stat(filename).st_mtime:
continue
if self.failed.get(filename[:-1], None) == pymtime:
continue
except OSError:
continue
try:
superreload(m, reload, self.old_objects)
if filename[:-1] in self.failed:
del self.failed[filename[:-1]]
except:
print >> sys.stderr, "[autoreload of %s failed: %s]" % (
modname, traceback.format_exc(1))
self.failed[filename[:-1]] = pymtime
#------------------------------------------------------------------------------
# superreload
#------------------------------------------------------------------------------
def update_function(old, new):
"""Upgrade the code object of a function"""
for name in ['func_code', 'func_defaults', 'func_doc',
'func_closure', 'func_globals', 'func_dict']:
try:
setattr(old, name, getattr(new, name))
except (AttributeError, TypeError):
pass
def update_class(old, new):
"""Replace stuff in the __dict__ of a class, and upgrade
method code objects"""
for key in old.__dict__.keys():
old_obj = getattr(old, key)
try:
new_obj = getattr(new, key)
except AttributeError:
# obsolete attribute: remove it
try:
delattr(old, key)
except (AttributeError, TypeError):
pass
continue
if update_generic(old_obj, new_obj): continue
try:
setattr(old, key, getattr(new, key))
except (AttributeError, TypeError):
pass # skip non-writable attributes
def update_property(old, new):
"""Replace get/set/del functions of a property"""
update_generic(old.fdel, new.fdel)
update_generic(old.fget, new.fget)
update_generic(old.fset, new.fset)
def isinstance2(a, b, typ):
return isinstance(a, typ) and isinstance(b, typ)
UPDATE_RULES = [
(lambda a, b: isinstance2(a, b, types.ClassType),
update_class),
(lambda a, b: isinstance2(a, b, types.TypeType),
update_class),
(lambda a, b: isinstance2(a, b, types.FunctionType),
update_function),
(lambda a, b: isinstance2(a, b, property),
update_property),
(lambda a, b: isinstance2(a, b, types.MethodType),
lambda a, b: update_function(a.im_func, b.im_func)),
]
def update_generic(a, b):
for type_check, update in UPDATE_RULES:
if type_check(a, b):
update(a, b)
return True
return False
class StrongRef(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
return self.obj
def superreload(module, reload=reload, old_objects={}):
"""Enhanced version of the builtin reload function.
superreload remembers objects previously in the module, and
- upgrades the class dictionary of every old class in the module
- upgrades the code object of every old function and method
- clears the module's namespace before reloading
"""
# collect old objects in the module
for name, obj in module.__dict__.items():
if not hasattr(obj, '__module__') or obj.__module__ != module.__name__:
continue
key = (module.__name__, name)
try:
old_objects.setdefault(key, []).append(weakref.ref(obj))
except TypeError:
# weakref doesn't work for all types;
# create strong references for 'important' cases
if isinstance(obj, types.ClassType):
old_objects.setdefault(key, []).append(StrongRef(obj))
# reload module
try:
# clear namespace first from old cruft
old_name = module.__name__
module.__dict__.clear()
module.__dict__['__name__'] = old_name
except (TypeError, AttributeError, KeyError):
pass
module = reload(module)
# iterate over all objects and update functions & classes
for name, new_obj in module.__dict__.items():
key = (module.__name__, name)
if key not in old_objects: continue
new_refs = []
for old_ref in old_objects[key]:
old_obj = old_ref()
if old_obj is None: continue
new_refs.append(old_ref)
update_generic(old_obj, new_obj)
if new_refs:
old_objects[key] = new_refs
else:
del old_objects[key]
return module
reloader = ModuleReloader()
#------------------------------------------------------------------------------
# IPython connectivity
#------------------------------------------------------------------------------
import IPython.ipapi
ip = IPython.ipapi.get()
autoreload_enabled = False
def runcode_hook(self):
if not autoreload_enabled:
raise IPython.ipapi.TryNext
try:
reloader.check()
except:
pass
def enable_autoreload():
global autoreload_enabled
autoreload_enabled = True
def disable_autoreload():
global autoreload_enabled
autoreload_enabled = False
def autoreload_f(self, parameter_s=''):
r""" %autoreload => Reload modules automatically
%autoreload
Reload all modules (except those excluded by %aimport) automatically now.
%autoreload 0
Disable automatic reloading.
%autoreload 1
Reload all modules imported with %aimport every time before executing
the Python code typed.
%autoreload 2
Reload all modules (except those excluded by %aimport) every time
before executing the Python code typed.
Reloading Python modules in a reliable way is in general
difficult, and unexpected things may occur. %autoreload tries to
work around common pitfalls by replacing function code objects and
parts of classes previously in the module with new versions. This
makes the following things to work:
- Functions and classes imported via 'from xxx import foo' are upgraded
to new versions when 'xxx' is reloaded.
- Methods and properties of classes are upgraded on reload, so that
calling 'c.foo()' on an object 'c' created before the reload causes
the new code for 'foo' to be executed.
Some of the known remaining caveats are:
- Replacing code objects does not always succeed: changing a @property
in a class to an ordinary method or a method to a member variable
can cause problems (but in old objects only).
- Functions that are removed (eg. via monkey-patching) from a module
before it is reloaded are not upgraded.
- C extension modules cannot be reloaded, and so cannot be
autoreloaded.
"""
if parameter_s == '':
reloader.check(True)
elif parameter_s == '0':
disable_autoreload()
elif parameter_s == '1':
reloader.check_all = False
enable_autoreload()
elif parameter_s == '2':
reloader.check_all = True
enable_autoreload()
def aimport_f(self, parameter_s=''):
"""%aimport => Import modules for automatic reloading.
%aimport
List modules to automatically import and not to import.
%aimport foo
Import module 'foo' and mark it to be autoreloaded for %autoreload 1
%aimport -foo
Mark module 'foo' to not be autoreloaded for %autoreload 1
"""
modname = parameter_s
if not modname:
to_reload = reloader.modules.keys()
to_reload.sort()
to_skip = reloader.skip_modules.keys()
to_skip.sort()
if reloader.check_all:
print "Modules to reload:\nall-expect-skipped"
else:
print "Modules to reload:\n%s" % ' '.join(to_reload)
print "\nModules to skip:\n%s" % ' '.join(to_skip)
elif modname.startswith('-'):
modname = modname[1:]
try: del reloader.modules[modname]
except KeyError: pass
reloader.skip_modules[modname] = True
else:
try: del reloader.skip_modules[modname]
except KeyError: pass
reloader.modules[modname] = True
# Inject module to user namespace; handle also submodules properly
__import__(modname)
basename = modname.split('.')[0]
mod = sys.modules[basename]
ip.to_user_ns({basename: mod})
def init():
ip.expose_magic('autoreload', autoreload_f)
ip.expose_magic('aimport', aimport_f)
ip.set_hook('pre_runcode_hook', runcode_hook)
init()
|
{
"content_hash": "9d2905a9c5b98f0bf90f25a1c306c931",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 79,
"avg_line_length": 30.87106017191977,
"alnum_prop": 0.577872656395025,
"repo_name": "yongshengwang/hue",
"id": "230f57531746c55f0c34ac421a3b6abdcb3de7a9",
"size": "10774",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "build/env/lib/python2.7/site-packages/ipython-0.10-py2.7.egg/IPython/Extensions/ipy_autoreload.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13685"
},
{
"name": "C",
"bytes": "2479183"
},
{
"name": "C++",
"bytes": "177090"
},
{
"name": "CSS",
"bytes": "1133541"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "28547"
},
{
"name": "HTML",
"bytes": "26230478"
},
{
"name": "Java",
"bytes": "133906"
},
{
"name": "JavaScript",
"bytes": "9757355"
},
{
"name": "Makefile",
"bytes": "94066"
},
{
"name": "Mako",
"bytes": "2185828"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "88056623"
},
{
"name": "Scala",
"bytes": "191428"
},
{
"name": "Shell",
"bytes": "59514"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "Thrift",
"bytes": "101931"
},
{
"name": "VimL",
"bytes": "1530"
},
{
"name": "XSLT",
"bytes": "357625"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import platform
from flask import flash, jsonify, redirect, request
from requests.exceptions import HTTPError, RequestException, Timeout
from werkzeug.urls import url_join
import indico
from indico.core.config import config
from indico.core.db.sqlalchemy.util.queries import get_postgres_version
from indico.modules.admin import RHAdminBase
from indico.modules.cephalopod import cephalopod_settings
from indico.modules.cephalopod.forms import CephalopodForm
from indico.modules.cephalopod.util import register_instance, sync_instance, unregister_instance
from indico.modules.cephalopod.views import WPCephalopod
from indico.modules.core.settings import core_settings
from indico.util.i18n import _
from indico.util.network import is_private_url
from indico.util.system import get_os
from indico.web.flask.util import url_for
from indico.web.forms.base import FormDefaults
from indico.web.rh import RH
class RHCephalopodBase(RHAdminBase):
pass
class RHCephalopod(RHCephalopodBase):
def _process(self):
form = CephalopodForm(obj=FormDefaults(**cephalopod_settings.get_all()))
if form.validate_on_submit():
return self._process_form(form)
hub_url = url_join(config.COMMUNITY_HUB_URL, 'api/instance/{}'.format(cephalopod_settings.get('uuid')))
cephalopod_settings.set('show_migration_message', False)
return WPCephalopod.render_template('cephalopod.html', 'cephalopod',
affiliation=core_settings.get('site_organization'),
enabled=cephalopod_settings.get('joined'),
form=form,
indico_version=indico.__version__,
instance_url=config.BASE_URL,
language=config.DEFAULT_LOCALE,
operating_system=get_os(),
postgres_version=get_postgres_version(),
python_version=platform.python_version(),
hub_url=hub_url,
show_local_warning=(config.DEBUG or is_private_url(request.url_root)))
def _process_form(self, form):
name = form.contact_name.data
email = form.contact_email.data
enabled = form.joined.data
uuid = cephalopod_settings.get('uuid')
try:
if not enabled:
unregister_instance()
elif enabled and uuid:
sync_instance(name, email)
elif enabled and not uuid:
register_instance(name, email)
except HTTPError as err:
flash(_("Operation failed, the community hub returned: {err.message}").format(err=err), 'error')
except Timeout:
flash(_("The operation timed-out. Please try again in a while."), 'error')
except RequestException as err:
flash(_("Unexpected exception while contacting the Community Hub: {err.message}").format(err=err))
return redirect(url_for('.index'))
class RHCephalopodSync(RHCephalopodBase):
def _process(self):
if not cephalopod_settings.get('joined'):
flash(_("Synchronization is not possible if you don't join the community first."),
'error')
else:
contact_name = cephalopod_settings.get('contact_name')
contact_email = cephalopod_settings.get('contact_email')
try:
sync_instance(contact_name, contact_email)
except HTTPError as err:
flash(_("Synchronization failed, the community hub returned: {err.message}").format(err=err),
'error')
except Timeout:
flash(_("Synchronization timed-out. Please try again in a while."), 'error')
except RequestException as err:
flash(_("Unexpected exception while contacting the Community Hub: {err.message}").format(err=err))
return redirect(url_for('.index'))
class RHSystemInfo(RH):
def _process(self):
stats = {'python_version': platform.python_version(),
'indico_version': indico.__version__,
'operating_system': get_os(),
'postgres_version': get_postgres_version(),
'language': config.DEFAULT_LOCALE,
'debug': config.DEBUG}
return jsonify(stats)
|
{
"content_hash": "ac749ea36a4027318f84015bd457d59b",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 114,
"avg_line_length": 45.68316831683168,
"alnum_prop": 0.6025140875596012,
"repo_name": "OmeGak/indico",
"id": "eb72b4cf180504f7c84808b91678d53a5735b71c",
"size": "4828",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/cephalopod/controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "547418"
},
{
"name": "HTML",
"bytes": "1366687"
},
{
"name": "JavaScript",
"bytes": "1678182"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4488419"
},
{
"name": "Shell",
"bytes": "2724"
},
{
"name": "TeX",
"bytes": "23051"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
description="Easy peasy wrapper for HipChat's v1 API",
name='python-simple-hipchat',
url='https://github.com/kurttheviking/python-simple-hipchat',
version='0.3.3',
packages=['hipchat'],
author='Kurt Ericson',
author_email='kurttheviking@outlook.com',
license='MIT',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
],
)
|
{
"content_hash": "38b8274d7afb4ea74e4ae0dca5fd7368",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 70,
"avg_line_length": 34.25,
"alnum_prop": 0.6253041362530414,
"repo_name": "claudyus/python-simple-hipchat",
"id": "7684a979e3d2b93d9d0f99981a0755a5de4a7ae7",
"size": "822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4116"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class DjangoCloudbridgeConfig(AppConfig):
name = 'djcloudbridge'
def ready(self):
# Connect up app signals
import djcloudbridge.signals # noqa
|
{
"content_hash": "ea69a495b4fcee247c54720b865f3be6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 44,
"avg_line_length": 22.77777777777778,
"alnum_prop": 0.7024390243902439,
"repo_name": "CloudVE/djcloudbridge",
"id": "f86142f8c6e7e8e06b73362acd2e7c7750ac6866",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djcloudbridge/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "661"
},
{
"name": "Makefile",
"bytes": "1582"
},
{
"name": "Python",
"bytes": "138171"
}
],
"symlink_target": ""
}
|
__author__ = 'francis'
def permute(xs):
"""Returns a generator for the permutations of elements in the sequence
:param xs: a sequence of elements
"""
if len(xs) == 1:
yield xs
else:
for i in xrange(0, len(xs)):
for p in permute(xs[0:i] + xs[i + 1:]):
yield [xs[i]] + p
def longest_inc_seq(xs):
"""Finds the longest increasing sequences in the given sequence
:param xs: a sortable sequence of elements
"""
seq = [] # all increasing sequences
indices = [] # indices of longest increasing sequences
size = 0 # current longest size
for i in xrange(0, len(xs)):
for j in xrange(0, len(seq)):
if xs[i] > seq[j][-1]:
t = seq[j] + [xs[i]]
if len(t) > size:
indices = [len(seq)]
size = len(t)
elif len(t) == size:
indices.append(len(seq))
seq.append(t)
seq.append([xs[i]])
return [seq[k] for k in indices]
def longest_common_seq(first, second):
"""Find the longest common sequence of the given sequences
:param first: the first sequence
:param second: the second sequence
"""
res = []
for i in range(0, len(first)):
for j in range(0, len(second)):
if second[j] == first[i]:
t = [first[i]] + longest_common_seq(first[i + 1:], second[j + 1:])
if len(t) > len(res):
res = t
return res
def fib(n):
"""Computes the fibonacci number for the given term
:param n: the term of the fibonacci sequence
"""
if not n:
return 0
previous = 0
current = 1
while n - 1:
current, previous = (previous + current), current
n -= 1
return current
|
{
"content_hash": "c20ab6e7fb8285f5f8729a9ec5e4fdf1",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 82,
"avg_line_length": 28.59375,
"alnum_prop": 0.5229508196721312,
"repo_name": "kofrasa/pyutils",
"id": "e3e6b643898d67419ebe2e8dc4016d18ea858896",
"size": "1875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyutils/algorithms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2641"
}
],
"symlink_target": ""
}
|
import os
from google.appengine.ext.webapp import template
from base_controller import CacheableHandler
# Note, broken out into its own class for future extensibility -PJL 04272015
class EventWizardHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "event_wizard"
def __init__(self, *args, **kw):
super(EventWizardHandler, self).__init__(*args, **kw)
self.cache_expiration = 60 * 60
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/eventwizard.html")
return template.render(path, self.template_values)
|
{
"content_hash": "24dc54456790b5fa6ba92a15f3ae41ad",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 87,
"avg_line_length": 34,
"alnum_prop": 0.6879084967320261,
"repo_name": "bvisness/the-blue-alliance",
"id": "580ead88cf9da677b18030cc7c42b7b2665c67b2",
"size": "612",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "controllers/event_wizard_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "101"
},
{
"name": "CSS",
"bytes": "372199"
},
{
"name": "HTML",
"bytes": "5376975"
},
{
"name": "JavaScript",
"bytes": "267581"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Python",
"bytes": "1597373"
},
{
"name": "Ruby",
"bytes": "4677"
},
{
"name": "Shell",
"bytes": "413"
}
],
"symlink_target": ""
}
|
"""Integration with the Rachio Iro sprinkler system controller."""
from abc import abstractmethod
from datetime import timedelta
import logging
from homeassistant.components.switch import SwitchDevice
from homeassistant.helpers.dispatcher import dispatcher_connect
from . import (
CONF_MANUAL_RUN_MINS, DOMAIN as DOMAIN_RACHIO, KEY_DEVICE_ID, KEY_ENABLED,
KEY_ID, KEY_NAME, KEY_ON, KEY_SUBTYPE, KEY_SUMMARY, KEY_ZONE_ID,
KEY_ZONE_NUMBER, SIGNAL_RACHIO_CONTROLLER_UPDATE,
SIGNAL_RACHIO_ZONE_UPDATE, SUBTYPE_SLEEP_MODE_OFF, SUBTYPE_SLEEP_MODE_ON,
SUBTYPE_ZONE_COMPLETED, SUBTYPE_ZONE_STARTED, SUBTYPE_ZONE_STOPPED)
_LOGGER = logging.getLogger(__name__)
ATTR_ZONE_SUMMARY = 'Summary'
ATTR_ZONE_NUMBER = 'Zone number'
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Rachio switches."""
manual_run_time = timedelta(minutes=hass.data[DOMAIN_RACHIO].config.get(
CONF_MANUAL_RUN_MINS))
_LOGGER.info("Rachio run time is %s", str(manual_run_time))
# Add all zones from all controllers as switches
devices = []
for controller in hass.data[DOMAIN_RACHIO].controllers:
devices.append(RachioStandbySwitch(hass, controller))
for zone in controller.list_zones():
devices.append(RachioZone(hass, controller, zone, manual_run_time))
add_entities(devices)
_LOGGER.info("%d Rachio switch(es) added", len(devices))
class RachioSwitch(SwitchDevice):
"""Represent a Rachio state that can be toggled."""
def __init__(self, controller, poll=True):
"""Initialize a new Rachio switch."""
self._controller = controller
if poll:
self._state = self._poll_update()
else:
self._state = None
@property
def should_poll(self) -> bool:
"""Declare that this entity pushes its state to HA."""
return False
@property
def name(self) -> str:
"""Get a name for this switch."""
return "Switch on {}".format(self._controller.name)
@property
def is_on(self) -> bool:
"""Return whether the switch is currently on."""
return self._state
@abstractmethod
def _poll_update(self, data=None) -> bool:
"""Poll the API."""
pass
def _handle_any_update(self, *args, **kwargs) -> None:
"""Determine whether an update event applies to this device."""
if args[0][KEY_DEVICE_ID] != self._controller.controller_id:
# For another device
return
# For this device
self._handle_update(args, kwargs)
@abstractmethod
def _handle_update(self, *args, **kwargs) -> None:
"""Handle incoming webhook data."""
pass
class RachioStandbySwitch(RachioSwitch):
"""Representation of a standby status/button."""
def __init__(self, hass, controller):
"""Instantiate a new Rachio standby mode switch."""
dispatcher_connect(hass, SIGNAL_RACHIO_CONTROLLER_UPDATE,
self._handle_any_update)
super().__init__(controller, poll=False)
self._poll_update(controller.init_data)
@property
def name(self) -> str:
"""Return the name of the standby switch."""
return "{} in standby mode".format(self._controller.name)
@property
def unique_id(self) -> str:
"""Return a unique id by combinining controller id and purpose."""
return "{}-standby".format(self._controller.controller_id)
@property
def icon(self) -> str:
"""Return an icon for the standby switch."""
return "mdi:power"
def _poll_update(self, data=None) -> bool:
"""Request the state from the API."""
if data is None:
data = self._controller.rachio.device.get(
self._controller.controller_id)[1]
return not data[KEY_ON]
def _handle_update(self, *args, **kwargs) -> None:
"""Update the state using webhook data."""
if args[0][KEY_SUBTYPE] == SUBTYPE_SLEEP_MODE_ON:
self._state = True
elif args[0][KEY_SUBTYPE] == SUBTYPE_SLEEP_MODE_OFF:
self._state = False
self.schedule_update_ha_state()
def turn_on(self, **kwargs) -> None:
"""Put the controller in standby mode."""
self._controller.rachio.device.off(self._controller.controller_id)
def turn_off(self, **kwargs) -> None:
"""Resume controller functionality."""
self._controller.rachio.device.on(self._controller.controller_id)
class RachioZone(RachioSwitch):
"""Representation of one zone of sprinklers connected to the Rachio Iro."""
def __init__(self, hass, controller, data, manual_run_time):
"""Initialize a new Rachio Zone."""
self._id = data[KEY_ID]
self._zone_name = data[KEY_NAME]
self._zone_number = data[KEY_ZONE_NUMBER]
self._zone_enabled = data[KEY_ENABLED]
self._manual_run_time = manual_run_time
self._summary = str()
super().__init__(controller)
# Listen for all zone updates
dispatcher_connect(hass, SIGNAL_RACHIO_ZONE_UPDATE,
self._handle_update)
def __str__(self):
"""Display the zone as a string."""
return 'Rachio Zone "{}" on {}'.format(self.name,
str(self._controller))
@property
def zone_id(self) -> str:
"""How the Rachio API refers to the zone."""
return self._id
@property
def name(self) -> str:
"""Return the friendly name of the zone."""
return self._zone_name
@property
def unique_id(self) -> str:
"""Return a unique id by combinining controller id and zone number."""
return "{}-zone-{}".format(self._controller.controller_id,
self.zone_id)
@property
def icon(self) -> str:
"""Return the icon to display."""
return "mdi:water"
@property
def zone_is_enabled(self) -> bool:
"""Return whether the zone is allowed to run."""
return self._zone_enabled
@property
def state_attributes(self) -> dict:
"""Return the optional state attributes."""
return {
ATTR_ZONE_NUMBER: self._zone_number,
ATTR_ZONE_SUMMARY: self._summary,
}
def turn_on(self, **kwargs) -> None:
"""Start watering this zone."""
# Stop other zones first
self.turn_off()
# Start this zone
self._controller.rachio.zone.start(self.zone_id,
self._manual_run_time.seconds)
_LOGGER.debug("Watering %s on %s", self.name, self._controller.name)
def turn_off(self, **kwargs) -> None:
"""Stop watering all zones."""
self._controller.stop_watering()
def _poll_update(self, data=None) -> bool:
"""Poll the API to check whether the zone is running."""
schedule = self._controller.current_schedule
return self.zone_id == schedule.get(KEY_ZONE_ID)
def _handle_update(self, *args, **kwargs) -> None:
"""Handle incoming webhook zone data."""
if args[0][KEY_ZONE_ID] != self.zone_id:
return
self._summary = kwargs.get(KEY_SUMMARY, str())
if args[0][KEY_SUBTYPE] == SUBTYPE_ZONE_STARTED:
self._state = True
elif args[0][KEY_SUBTYPE] in [SUBTYPE_ZONE_STOPPED,
SUBTYPE_ZONE_COMPLETED]:
self._state = False
self.schedule_update_ha_state()
|
{
"content_hash": "1cc2c2de8ab88fe79187813fe59c382e",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 79,
"avg_line_length": 33.60176991150443,
"alnum_prop": 0.6027126678957071,
"repo_name": "jnewland/home-assistant",
"id": "1b650d7281a94d9bd95b7bec8e3bd20772223449",
"size": "7594",
"binary": false,
"copies": "7",
"ref": "refs/heads/ci",
"path": "homeassistant/components/rachio/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15240512"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17862"
}
],
"symlink_target": ""
}
|
class Objectify(object):
def __init__(self,tree,parent=None):
self._parent = parent
if isinstance(tree,str):
self._tree = fromstring(tree)
else:
self._tree = tree
#this is required to call on all the children
self._children = [pythonic_objectify(child,self) for child in self._tree]
#assigning attributes to the parent
if parent is not None:
#making the tags more pythonic - don't hate me!
tag = self._tree.tag
tag = tag.replace('-','_')
#getting the tags value
value = self._tree.text
#known type conversion
if 'type' in self._tree.attrib and value is not None:
kind = self._tree.attrib['type']
if kind == 'integer':
value = int(value)
elif kind == 'float':
value = float(value)
elif kind == 'boolean':
value = bool(value)
elif kind == 'date':
year, month, day = value.split('-')
value = datetime.datetime(int(year),int(month),int(day))
#apply it to it's parent
setattr(self._parent,tag,value)
def __repr__(self):
return self._tree.tag
def __iter__(self):
return self._children.__iter__()
def __getitem__(self,index):
try:
return self._children[index]
except AttributeError:
return getattr(self,index)
def get_children(self):
return self._children
children = property(get_children)
data = property(get_children)
|
{
"content_hash": "e5cbc1b1dcf3d1e6a7bbe0b1a0c60abd",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 81,
"avg_line_length": 31.727272727272727,
"alnum_prop": 0.4991404011461318,
"repo_name": "erdosmiller/pybase",
"id": "9321fed2d8448f1546d7f986660b8246d4545b61",
"size": "1745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybase/objectify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15491"
}
],
"symlink_target": ""
}
|
__author__ = "Ryan Faulkner and Aaron Halfaker"
__date__ = "October 3rd, 2012"
__license__ = "GPL (version 2 or later)"
from user_metrics.config import logging
from collections import namedtuple
import user_metric as um
from user_metrics.metrics import query_mod
from user_metrics.etl.aggregator import weighted_rate, decorator_builder
class Blocks(um.UserMetric):
"""
Adapted from Aaron Hafaker's implementation -- uses the logging table
to count blocks. This is a user quality metric used to assess whether
a user went on to do damage.
As a UserMetric type this class utilizes the process() function
attribute to produce an internal list of metrics by user handle
(typically ID but user names may also be specified). The execution of
process() produces a nested list that stores in each element:
* User ID
* Block count
* Date of first block
* Date of Last block
* Date of ban
The process method for this metric breaks conformity with other
metrics in that it expects usernames, and not user IDs, by default
(see example below).
Example: ::
>>> import user_metrics.metrics.blocks as b
>>> block_obj = b.Blocks(date_start='2011-01-01 00:00:00')
>>> for r in block_obj.process(['11174885', '15132776']).
__iter__(): print r
...
['15132776', 1L, '20110809143215', '20110809143215', -1]
['11174885', 2L, '20110830010835', '20120526192657', -1]
"""
# Structure that defines parameters for Blocks class
_param_types = \
{
'init': {},
'process': {}
}
# Define the metrics data model meta
_data_model_meta = \
{
'id_fields': [0],
'date_fields': [2, 3],
'float_fields': [],
'integer_fields': [1, 4],
'boolean_fields': [],
}
_agg_indices = \
{
'list_sum_indices': _data_model_meta['integer_fields'] +
_data_model_meta['float_fields'],
}
@um.pre_metrics_init
def __init__(self, **kwargs):
super(Blocks, self).__init__(**kwargs)
@staticmethod
def header():
return ['user_id',
'block_count',
'block_first',
'block_last',
'ban']
@um.UserMetric.pre_process_metric_call
def process(self, users, **kwargs):
"""
Process method for the "blocks" metric. Computes a list of
block and ban events for users.
Parameters:
- **user_handle** - List. List of user IDs.
- **is_id** - Boolean. Defaults to False.
Return:
- UserMetric::Blocks (self).
"""
rowValues = {}
for i in xrange(len(users)):
rowValues[users[i]] = {'block_count': 0, 'block_first': -1,
'block_last': -1, 'ban': -1}
# Data calls
user_map = query_mod.blocks_user_map_query(users, self.project)
query_args = namedtuple('QueryArgs', 'date_start')(self.datetime_start)
results = query_mod.blocks_user_query(users, self.project,
query_args)
# Process rows - extract block and ban events
for row in results:
userid = str(user_map[row[0]])
type = row[1]
count = row[2]
first = row[3]
last = row[4]
if type == "block":
rowValues[userid]['block_count'] = count
rowValues[userid]['block_first'] = first
rowValues[userid]['block_last'] = last
elif type == "ban":
rowValues[userid][type] = first
self._results = [[user, rowValues.get(user)['block_count'],
rowValues.get(user)['block_first'],
rowValues.get(user)['block_last'],
rowValues.get(user)['ban']]
for user in rowValues.keys()]
return self
# ==========================
# DEFINE METRIC AGGREGATORS
# ==========================
# Build "rate" decorator
block_rate_agg = weighted_rate
block_rate_agg = decorator_builder(Blocks.header())(block_rate_agg)
setattr(block_rate_agg, um.METRIC_AGG_METHOD_FLAG, True)
setattr(block_rate_agg, um.METRIC_AGG_METHOD_NAME, 'b_rate_agg')
setattr(block_rate_agg, um.METRIC_AGG_METHOD_HEAD, ['total_users',
'total_weight',
'rate'])
setattr(block_rate_agg, um.METRIC_AGG_METHOD_KWARGS, {
'val_idx': 1,
})
if __name__ == "__main__":
for r in Blocks().process(['11174885', '15132776']):
print r
|
{
"content_hash": "1cb4ed7e1eb5dc33fe975319581791d4",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 79,
"avg_line_length": 32.39473684210526,
"alnum_prop": 0.5259951259138912,
"repo_name": "rfaulkner/wikipedia_user_metrics",
"id": "7bb7e22efbf314de54ab860b843111cab85bd3fe",
"size": "4925",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "user_metrics/metrics/blocks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "58528"
},
{
"name": "Python",
"bytes": "320531"
},
{
"name": "Shell",
"bytes": "462"
}
],
"symlink_target": ""
}
|
import datetime
from urlparse import urlparse
from utils import log as logging
from django.shortcuts import get_object_or_404, render_to_response
from django.views.decorators.http import condition
from django.http import HttpResponseForbidden, HttpResponseRedirect, HttpResponse, Http404
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
# from django.db import IntegrityError
from apps.rss_feeds.models import Feed, merge_feeds
from apps.rss_feeds.models import MFetchHistory
from apps.rss_feeds.models import MFeedIcon
from apps.push.models import PushSubscription
from apps.analyzer.models import get_classifiers_for_user
from apps.reader.models import UserSubscription
from apps.rss_feeds.models import MStory
from utils.user_functions import ajax_login_required
from utils import json_functions as json, feedfinder2 as feedfinder
from utils.feed_functions import relative_timeuntil, relative_timesince
from utils.user_functions import get_user
from utils.view_functions import get_argument_or_404
from utils.view_functions import required_params
from utils.view_functions import is_true
from vendor.timezones.utilities import localtime_for_timezone
from utils.ratelimit import ratelimit
IGNORE_AUTOCOMPLETE = [
"facebook.com/feeds/notifications.php",
"inbox",
"secret",
"password",
"latitude",
]
@ajax_login_required
@json.json_view
def search_feed(request):
address = request.REQUEST.get('address')
offset = int(request.REQUEST.get('offset', 0))
if not address:
return dict(code=-1, message="Please provide a URL/address.")
logging.user(request.user, "~FBFinding feed (search_feed): %s" % address)
ip = request.META.get('HTTP_X_FORWARDED_FOR', None) or request.META['REMOTE_ADDR']
logging.user(request.user, "~FBIP: %s" % ip)
aggressive = request.user.is_authenticated()
feed = Feed.get_feed_from_url(address, create=False, aggressive=aggressive, offset=offset)
if feed:
return feed.canonical()
else:
return dict(code=-1, message="No feed found matching that XML or website address.")
@json.json_view
def load_single_feed(request, feed_id):
user = get_user(request)
feed = get_object_or_404(Feed, pk=feed_id)
classifiers = get_classifiers_for_user(user, feed_id=feed.pk)
payload = feed.canonical(full=True)
payload['classifiers'] = classifiers
return payload
def feed_favicon_etag(request, feed_id):
try:
feed_icon = MFeedIcon.objects.get(feed_id=feed_id)
except MFeedIcon.DoesNotExist:
return
return feed_icon.color
@condition(etag_func=feed_favicon_etag)
def load_feed_favicon(request, feed_id):
not_found = False
try:
feed_icon = MFeedIcon.objects.get(feed_id=feed_id)
except MFeedIcon.DoesNotExist:
not_found = True
if not_found or not feed_icon.data:
return HttpResponseRedirect(settings.MEDIA_URL + 'img/icons/circular/world.png')
icon_data = feed_icon.data.decode('base64')
return HttpResponse(icon_data, mimetype='image/png')
@json.json_view
def feed_autocomplete(request):
query = request.GET.get('term') or request.GET.get('query')
version = int(request.GET.get('v', 1))
format = request.GET.get('format', 'autocomplete')
# user = get_user(request)
# if True or not user.profile.is_premium:
# return dict(code=-1, message="Overloaded, no autocomplete results.", feeds=[], term=query)
if not query:
return dict(code=-1, message="Specify a search 'term'.", feeds=[], term=query)
if '.' in query:
try:
parts = urlparse(query)
if not parts.hostname and not query.startswith('http'):
parts = urlparse('http://%s' % query)
if parts.hostname:
query = [parts.hostname]
query.extend([p for p in parts.path.split('/') if p])
query = ' '.join(query)
except:
logging.user(request, "~FGAdd search, could not parse url in ~FR%s" % query)
query_params = query.split(' ')
tries_left = 5
while len(query_params) and tries_left:
tries_left -= 1
feed_ids = Feed.autocomplete(' '.join(query_params))
if feed_ids:
break
else:
query_params = query_params[:-1]
feeds = list(set([Feed.get_by_id(feed_id) for feed_id in feed_ids]))
feeds = [feed for feed in feeds if feed and not feed.branch_from_feed]
feeds = [feed for feed in feeds if all([x not in feed.feed_address for x in IGNORE_AUTOCOMPLETE])]
if format == 'autocomplete':
feeds = [{
'id': feed.pk,
'value': feed.feed_address,
'label': feed.feed_title,
'tagline': feed.data and feed.data.feed_tagline,
'num_subscribers': feed.num_subscribers,
} for feed in feeds]
else:
feeds = [feed.canonical(full=True) for feed in feeds]
feeds = sorted(feeds, key=lambda f: -1 * f['num_subscribers'])
feed_ids = [f['id'] for f in feeds]
feed_icons = dict((icon.feed_id, icon) for icon in MFeedIcon.objects.filter(feed_id__in=feed_ids))
for feed in feeds:
if feed['id'] in feed_icons:
feed_icon = feed_icons[feed['id']]
if feed_icon.data:
feed['favicon_color'] = feed_icon.color
feed['favicon'] = feed_icon.data
logging.user(request, "~FGAdd Search: ~SB%s ~SN(%s matches)" % (query, len(feeds),))
if version > 1:
return {
'feeds': feeds,
'term': query,
}
else:
return feeds
@ratelimit(minutes=1, requests=30)
@json.json_view
def load_feed_statistics(request, feed_id):
user = get_user(request)
timezone = user.profile.timezone
stats = dict()
feed = get_object_or_404(Feed, pk=feed_id)
feed.update_all_statistics()
feed.set_next_scheduled_update(verbose=True, skip_scheduling=True)
feed.save_feed_story_history_statistics()
feed.save_classifier_counts()
# Dates of last and next update
stats['active'] = feed.active
stats['last_update'] = relative_timesince(feed.last_update)
stats['next_update'] = relative_timeuntil(feed.next_scheduled_update)
stats['push'] = feed.is_push
if feed.is_push:
try:
stats['push_expires'] = localtime_for_timezone(feed.push.lease_expires,
timezone).strftime("%Y-%m-%d %H:%M:%S")
except PushSubscription.DoesNotExist:
stats['push_expires'] = 'Missing push'
feed.is_push = False
feed.save()
# Minutes between updates
update_interval_minutes = feed.get_next_scheduled_update(force=True, verbose=False)
stats['update_interval_minutes'] = update_interval_minutes
original_active_premium_subscribers = feed.active_premium_subscribers
original_premium_subscribers = feed.premium_subscribers
feed.active_premium_subscribers = max(feed.active_premium_subscribers+1, 1)
feed.premium_subscribers += 1
premium_update_interval_minutes = feed.get_next_scheduled_update(force=True, verbose=False,
premium_speed=True)
feed.active_premium_subscribers = original_active_premium_subscribers
feed.premium_subscribers = original_premium_subscribers
stats['premium_update_interval_minutes'] = premium_update_interval_minutes
stats['errors_since_good'] = feed.errors_since_good
# Stories per month - average and month-by-month breakout
average_stories_per_month, story_count_history = feed.average_stories_per_month, feed.data.story_count_history
stats['average_stories_per_month'] = average_stories_per_month
story_count_history = story_count_history and json.decode(story_count_history)
if story_count_history and isinstance(story_count_history, dict):
stats['story_count_history'] = story_count_history['months']
stats['story_days_history'] = story_count_history['days']
stats['story_hours_history'] = story_count_history['hours']
else:
stats['story_count_history'] = story_count_history
# Rotate hours to match user's timezone offset
localoffset = timezone.utcoffset(datetime.datetime.utcnow())
hours_offset = int(localoffset.total_seconds() / 3600)
rotated_hours = {}
for hour, value in stats['story_hours_history'].items():
rotated_hours[str(int(hour)+hours_offset)] = value
stats['story_hours_history'] = rotated_hours
# Subscribers
stats['subscriber_count'] = feed.num_subscribers
stats['num_subscribers'] = feed.num_subscribers
stats['stories_last_month'] = feed.stories_last_month
stats['last_load_time'] = feed.last_load_time
stats['premium_subscribers'] = feed.premium_subscribers
stats['active_subscribers'] = feed.active_subscribers
stats['active_premium_subscribers'] = feed.active_premium_subscribers
# Classifier counts
stats['classifier_counts'] = json.decode(feed.data.feed_classifier_counts)
# Fetch histories
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
stats['feed_fetch_history'] = fetch_history['feed_fetch_history']
stats['page_fetch_history'] = fetch_history['page_fetch_history']
stats['feed_push_history'] = fetch_history['push_history']
logging.user(request, "~FBStatistics: ~SB%s" % (feed))
return stats
@json.json_view
def load_feed_settings(request, feed_id):
stats = dict()
feed = get_object_or_404(Feed, pk=feed_id)
user = get_user(request)
timezone = user.profile.timezone
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
stats['feed_fetch_history'] = fetch_history['feed_fetch_history']
stats['page_fetch_history'] = fetch_history['page_fetch_history']
stats['feed_push_history'] = fetch_history['push_history']
stats['duplicate_addresses'] = feed.duplicate_addresses.all()
return stats
@ratelimit(minutes=5, requests=30)
@json.json_view
def exception_retry(request):
user = get_user(request)
feed_id = get_argument_or_404(request, 'feed_id')
reset_fetch = json.decode(request.POST['reset_fetch'])
feed = Feed.get_by_id(feed_id)
original_feed = feed
if not feed:
raise Http404
feed.schedule_feed_fetch_immediately()
changed = False
if feed.has_page_exception:
changed = True
feed.has_page_exception = False
if feed.has_feed_exception:
changed = True
feed.has_feed_exception = False
if not feed.active:
changed = True
feed.active = True
if changed:
feed.save(update_fields=['has_page_exception', 'has_feed_exception', 'active'])
original_fetched_once = feed.fetched_once
if reset_fetch:
logging.user(request, "~FRRefreshing exception feed: ~SB%s" % (feed))
feed.fetched_once = False
else:
logging.user(request, "~FRForcing refreshing feed: ~SB%s" % (feed))
feed.fetched_once = True
if feed.fetched_once != original_fetched_once:
feed.save(update_fields=['fetched_once'])
feed = feed.update(force=True, compute_scores=False, verbose=True)
feed = Feed.get_by_id(feed.pk)
try:
usersub = UserSubscription.objects.get(user=user, feed=feed)
except UserSubscription.DoesNotExist:
usersubs = UserSubscription.objects.filter(user=user, feed=original_feed)
if usersubs:
usersub = usersubs[0]
usersub.switch_feed(feed, original_feed)
else:
return {'code': -1}
usersub.calculate_feed_scores(silent=False)
feeds = {feed.pk: usersub and usersub.canonical(full=True), feed_id: usersub.canonical(full=True)}
return {'code': 1, 'feeds': feeds}
@ajax_login_required
@json.json_view
def exception_change_feed_address(request):
feed_id = request.POST['feed_id']
feed = get_object_or_404(Feed, pk=feed_id)
original_feed = feed
feed_address = request.POST['feed_address']
timezone = request.user.profile.timezone
code = -1
if False and (feed.has_page_exception or feed.has_feed_exception):
# Fix broken feed
logging.user(request, "~FRFixing feed exception by address: %s - ~SB%s~SN to ~SB%s" % (feed, feed.feed_address, feed_address))
feed.has_feed_exception = False
feed.active = True
feed.fetched_once = False
feed.feed_address = feed_address
duplicate_feed = feed.schedule_feed_fetch_immediately()
code = 1
if duplicate_feed:
new_feed = Feed.objects.get(pk=duplicate_feed.pk)
feed = new_feed
new_feed.schedule_feed_fetch_immediately()
new_feed.has_feed_exception = False
new_feed.active = True
new_feed = new_feed.save()
if new_feed.pk != feed.pk:
merge_feeds(new_feed.pk, feed.pk)
else:
# Branch good feed
logging.user(request, "~FRBranching feed by address: ~SB%s~SN to ~SB%s" % (feed.feed_address, feed_address))
try:
feed = Feed.objects.get(hash_address_and_link=Feed.generate_hash_address_and_link(feed_address, feed.feed_link))
except Feed.DoesNotExist:
feed = Feed.objects.create(feed_address=feed_address, feed_link=feed.feed_link)
code = 1
if feed.pk != original_feed.pk:
try:
feed.branch_from_feed = original_feed.branch_from_feed or original_feed
except Feed.DoesNotExist:
feed.branch_from_feed = original_feed
feed.feed_address_locked = True
feed = feed.save()
feed = feed.update()
feed = Feed.get_by_id(feed.pk)
try:
usersub = UserSubscription.objects.get(user=request.user, feed=feed)
except UserSubscription.DoesNotExist:
usersubs = UserSubscription.objects.filter(user=request.user, feed=original_feed)
if usersubs:
usersub = usersubs[0]
usersub.switch_feed(feed, original_feed)
else:
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
return {
'code': -1,
'feed_fetch_history': fetch_history['feed_fetch_history'],
'page_fetch_history': fetch_history['page_fetch_history'],
'push_history': fetch_history['push_history'],
}
usersub.calculate_feed_scores(silent=False)
feed.update_all_statistics()
classifiers = get_classifiers_for_user(usersub.user, feed_id=usersub.feed_id)
feeds = {
original_feed.pk: usersub and usersub.canonical(full=True, classifiers=classifiers),
}
if feed and feed.has_feed_exception:
code = -1
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
return {
'code': code,
'feeds': feeds,
'new_feed_id': usersub.feed_id,
'feed_fetch_history': fetch_history['feed_fetch_history'],
'page_fetch_history': fetch_history['page_fetch_history'],
'push_history': fetch_history['push_history'],
}
@ajax_login_required
@json.json_view
def exception_change_feed_link(request):
feed_id = request.POST['feed_id']
feed = get_object_or_404(Feed, pk=feed_id)
original_feed = feed
feed_link = request.POST['feed_link']
timezone = request.user.profile.timezone
code = -1
if False and (feed.has_page_exception or feed.has_feed_exception):
# Fix broken feed
logging.user(request, "~FRFixing feed exception by link: ~SB%s~SN to ~SB%s" % (feed.feed_link, feed_link))
found_feed_urls = feedfinder.find_feeds(feed_link)
if len(found_feed_urls):
code = 1
feed.has_page_exception = False
feed.active = True
feed.fetched_once = False
feed.feed_link = feed_link
feed.feed_address = found_feed_urls[0]
duplicate_feed = feed.schedule_feed_fetch_immediately()
if duplicate_feed:
new_feed = Feed.objects.get(pk=duplicate_feed.pk)
feed = new_feed
new_feed.schedule_feed_fetch_immediately()
new_feed.has_page_exception = False
new_feed.active = True
new_feed.save()
else:
# Branch good feed
logging.user(request, "~FRBranching feed by link: ~SB%s~SN to ~SB%s" % (feed.feed_link, feed_link))
try:
feed = Feed.objects.get(hash_address_and_link=Feed.generate_hash_address_and_link(feed.feed_address, feed_link))
except Feed.DoesNotExist:
feed = Feed.objects.create(feed_address=feed.feed_address, feed_link=feed_link)
code = 1
if feed.pk != original_feed.pk:
try:
feed.branch_from_feed = original_feed.branch_from_feed or original_feed
except Feed.DoesNotExist:
feed.branch_from_feed = original_feed
feed.feed_link_locked = True
feed.save()
feed = feed.update()
feed = Feed.get_by_id(feed.pk)
try:
usersub = UserSubscription.objects.get(user=request.user, feed=feed)
except UserSubscription.DoesNotExist:
usersubs = UserSubscription.objects.filter(user=request.user, feed=original_feed)
if usersubs:
usersub = usersubs[0]
usersub.switch_feed(feed, original_feed)
else:
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
return {
'code': -1,
'feed_fetch_history': fetch_history['feed_fetch_history'],
'page_fetch_history': fetch_history['page_fetch_history'],
'push_history': fetch_history['push_history'],
}
usersub.calculate_feed_scores(silent=False)
feed.update_all_statistics()
classifiers = get_classifiers_for_user(usersub.user, feed_id=usersub.feed_id)
if feed and feed.has_feed_exception:
code = -1
feeds = {
original_feed.pk: usersub.canonical(full=True, classifiers=classifiers),
}
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
return {
'code': code,
'feeds': feeds,
'new_feed_id': usersub.feed_id,
'feed_fetch_history': fetch_history['feed_fetch_history'],
'page_fetch_history': fetch_history['page_fetch_history'],
'push_history': fetch_history['push_history'],
}
@login_required
def status(request):
if not request.user.is_staff:
logging.user(request, "~SKNON-STAFF VIEWING RSS FEEDS STATUS!")
assert False
return HttpResponseForbidden()
minutes = int(request.GET.get('minutes', 1))
now = datetime.datetime.now()
hour_ago = now - datetime.timedelta(minutes=minutes)
feeds = Feed.objects.filter(last_update__gte=hour_ago).order_by('-last_update')
return render_to_response('rss_feeds/status.xhtml', {
'feeds': feeds
}, context_instance=RequestContext(request))
@json.json_view
def original_text(request):
story_id = request.REQUEST.get('story_id')
feed_id = request.REQUEST.get('feed_id')
story_hash = request.REQUEST.get('story_hash', None)
force = request.REQUEST.get('force', False)
debug = request.REQUEST.get('debug', False)
if story_hash:
story, _ = MStory.find_story(story_hash=story_hash)
else:
story, _ = MStory.find_story(story_id=story_id, story_feed_id=feed_id)
if not story:
logging.user(request, "~FYFetching ~FGoriginal~FY story text: ~FRstory not found")
return {'code': -1, 'message': 'Story not found.', 'original_text': None, 'failed': True}
original_text = story.fetch_original_text(force=force, request=request, debug=debug)
return {
'feed_id': story.story_feed_id,
'story_hash': story.story_hash,
'story_id': story.story_guid,
'original_text': original_text,
'failed': not original_text or len(original_text) < 100,
}
@required_params('story_hash')
def original_story(request):
story_hash = request.REQUEST.get('story_hash')
force = request.REQUEST.get('force', False)
debug = request.REQUEST.get('debug', False)
story, _ = MStory.find_story(story_hash=story_hash)
if not story:
logging.user(request, "~FYFetching ~FGoriginal~FY story page: ~FRstory not found")
return {'code': -1, 'message': 'Story not found.', 'original_page': None, 'failed': True}
original_page = story.fetch_original_page(force=force, request=request, debug=debug)
return HttpResponse(original_page or "")
@required_params('story_hash')
@json.json_view
def story_changes(request):
story_hash = request.REQUEST.get('story_hash', None)
show_changes = is_true(request.REQUEST.get('show_changes', True))
story, _ = MStory.find_story(story_hash=story_hash)
if not story:
logging.user(request, "~FYFetching ~FGoriginal~FY story page: ~FRstory not found")
return {'code': -1, 'message': 'Story not found.', 'original_page': None, 'failed': True}
return {
'story': Feed.format_story(story, show_changes=show_changes)
}
|
{
"content_hash": "6635178e318ce84b47ce8b59d3c6bfb4",
"timestamp": "",
"source": "github",
"line_count": 548,
"max_line_length": 134,
"avg_line_length": 39.26459854014598,
"alnum_prop": 0.6417716224380722,
"repo_name": "dosiecki/NewsBlur",
"id": "b92d4de6a94209effdc43cca5f28ad37916d9d54",
"size": "21517",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/rss_feeds/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "454"
},
{
"name": "CSS",
"bytes": "718958"
},
{
"name": "CoffeeScript",
"bytes": "7914"
},
{
"name": "Dockerfile",
"bytes": "1331"
},
{
"name": "HTML",
"bytes": "491943"
},
{
"name": "Java",
"bytes": "925655"
},
{
"name": "JavaScript",
"bytes": "1677551"
},
{
"name": "Objective-C",
"bytes": "2505306"
},
{
"name": "Perl",
"bytes": "55598"
},
{
"name": "Python",
"bytes": "2729277"
},
{
"name": "R",
"bytes": "527"
},
{
"name": "Ruby",
"bytes": "870"
},
{
"name": "Shell",
"bytes": "40999"
},
{
"name": "Swift",
"bytes": "3508"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from cryptography.hazmat.backends.multibackend import MultiBackend
from cryptography.hazmat.bindings.commoncrypto.binding import (
Binding as CommonCryptoBinding
)
from cryptography.hazmat.bindings.openssl.binding import (
Binding as OpenSSLBinding
)
_available_backends_list = None
def _available_backends():
global _available_backends_list
if _available_backends_list is None:
_available_backends_list = []
if CommonCryptoBinding.is_available():
from cryptography.hazmat.backends import commoncrypto
_available_backends_list.append(commoncrypto.backend)
if OpenSSLBinding.is_available():
from cryptography.hazmat.backends import openssl
_available_backends_list.append(openssl.backend)
return _available_backends_list
_default_backend = None
def default_backend():
global _default_backend
if _default_backend is None:
_default_backend = MultiBackend(_available_backends())
return _default_backend
|
{
"content_hash": "b6eb176f91cad9b7f72377a89789dd89",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 66,
"avg_line_length": 26.609756097560975,
"alnum_prop": 0.7268560953253895,
"repo_name": "Lukasa/cryptography",
"id": "ae78822c6a2c523fde9d47de02222584e2456727",
"size": "1637",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cryptography/hazmat/backends/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "4062"
},
{
"name": "Python",
"bytes": "532603"
},
{
"name": "Shell",
"bytes": "8682"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from staticfiles.storage import CachedFilesMixin
from storages.backends.s3boto import S3BotoStorage
class CachedStaticS3BotoStorage(CachedFilesMixin, S3BotoStorage):
def __init__(self, *args, **kwargs):
kwargs.update(getattr(settings, "STATICFILES_S3_OPTIONS", {}))
super(CachedStaticS3BotoStorage, self).__init__(*args, **kwargs)
|
{
"content_hash": "9b31d269b6dabcacf98135fcca290d50",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 42.888888888888886,
"alnum_prop": 0.7564766839378239,
"repo_name": "crateio/crate.io",
"id": "9cfd0eb034c48cef97df1dc3fae92012355d461c",
"size": "386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crateweb/storage.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "20466"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Weapon()
result.template = "object/weapon/melee/sword/shared_sword_lightsaber_anakin.iff"
result.attribute_template_id = 10
result.stfName("weapon_name","sword_lightsaber_anakin")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "472a8a343f161aca0b368fb50b5c4156",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 25,
"alnum_prop": 0.7107692307692308,
"repo_name": "anhstudios/swganh",
"id": "aa5eff79a610d1497fb65d2bd3baf888c814a914",
"size": "470",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/weapon/melee/sword/shared_sword_lightsaber_anakin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from flask import current_app
description = """
帮助信息,触发条件: "help [私聊]". 比如:
* help
"""
def format_desc(plugin, prefix=' '):
name = plugin.__name__.split('.')[-1]
desc = getattr(plugin, 'description', '').strip()
# 为每行内容增加前缀
desc = ('\n' + prefix).join(desc.split('\n'))
return '{name}:\n{prefix}{desc}'.format(
name=name, prefix=prefix, desc=desc
)
def test(data):
return 'help' in data['message']
def handle(data):
app = current_app
plugin_modules = app.plugin_modules if app else []
docs = []
for plugin in plugin_modules:
docs.append(format_desc(plugin))
return '\n'.join(docs)
|
{
"content_hash": "6c57a19a6b09bc9d7c57ba96ebad51e2",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 54,
"avg_line_length": 22.517241379310345,
"alnum_prop": 0.5972434915773354,
"repo_name": "STANAPO/slack_bot",
"id": "2b6c1020afb5886934365cc8f33dccb99bdc28ac",
"size": "712",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "slack_bot/plugins/help.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86040"
}
],
"symlink_target": ""
}
|
import uuid
from oslo.config import cfg
import webob
from cinder.api import extensions
from cinder.api.v2 import snapshot_metadata
from cinder.api.v2 import snapshots
import cinder.db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder import test
from cinder.tests.api import fakes
CONF = cfg.CONF
def return_create_snapshot_metadata_max(context,
snapshot_id,
metadata,
delete):
return stub_max_snapshot_metadata()
def return_create_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_snapshot_metadata()
def return_create_snapshot_metadata_insensitive(context, snapshot_id,
metadata, delete):
return stub_snapshot_metadata_insensitive()
def return_new_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_new_snapshot_metadata()
def return_snapshot_metadata(context, snapshot_id):
if not isinstance(snapshot_id, str) or not len(snapshot_id) == 36:
msg = 'id %s must be a uuid in return snapshot metadata' % snapshot_id
raise Exception(msg)
return stub_snapshot_metadata()
def return_empty_snapshot_metadata(context, snapshot_id):
return {}
def return_empty_container_metadata(context, snapshot_id, metadata, delete):
return {}
def delete_snapshot_metadata(context, snapshot_id, key):
pass
def stub_snapshot_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_snapshot_metadata_insensitive():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4",
}
return metadata
def stub_new_snapshot_metadata():
metadata = {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
}
return metadata
def stub_max_snapshot_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_snapshot(context, snapshot_id):
return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'status': 'available',
'metadata': {}}
def return_volume(context, volume_id):
return {'id': 'fake-vol-id',
'size': 100,
'name': 'fake',
'host': 'fake-host',
'status': 'available',
'encryption_key_id': None,
'volume_type_id': None,
'migration_status': None,
'metadata': {},
'project_id': context.project_id}
def return_snapshot_nonexistent(context, snapshot_id):
raise exception.SnapshotNotFound('bogus test message')
def fake_update_snapshot_metadata(self, context, snapshot, diff):
pass
class SnapshotMetaDataTest(test.TestCase):
def setUp(self):
super(SnapshotMetaDataTest, self).setUp()
self.volume_api = cinder.volume.api.API()
fakes.stub_out_key_pair_funcs(self.stubs)
self.stubs.Set(cinder.db, 'volume_get', return_volume)
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot)
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(self.volume_api, 'update_snapshot_metadata',
fake_update_snapshot_metadata)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr)
self.controller = snapshot_metadata.Controller()
self.req_id = str(uuid.uuid4())
self.url = '/v2/fake/snapshots/%s/metadata' % self.req_id
snap = {"volume_size": 100,
"volume_id": "fake-vol-id",
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"host": "fake-host",
"metadata": {}}
body = {"snapshot": snap}
req = fakes.HTTPRequest.blank('/v2/snapshots')
self.snapshot_controller.create(req, body)
def test_index(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.req_id, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key2')
def test_show_meta_not_found(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key6')
def test_delete(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_delete',
delete_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.req_id, 'key2')
self.assertEqual(200, res.status_int)
def test_delete_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key1')
def test_delete_meta_not_found(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key6')
def test_create(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(body, res_dict)
def test_create_with_keys_in_uppercase_and_lowercase(self):
# if the keys in uppercase_and_lowercase, should return the one
# which server added
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata_insensitive)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"KEY1": "value1",
"key2": "value2",
"KEY2": "value2",
"key3": "value3",
"KEY4": "value4"}}
expected = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_create_empty_body(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, None)
def test_create_item_empty_key(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, body)
def test_create_item_key_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, self.req_id, body)
def test_create_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.req_id, body)
def test_update_all(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_with_keys_in_uppercase_and_lowercase(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_create_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {
'metadata': {
'key10': 'value10',
'KEY10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_empty_container_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_malformed_container(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_malformed_data(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
def test_update_item(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.req_id, 'key1', body)
expected = {'meta': {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(
'/v2/fake/snapshots/asdf/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_empty_body(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
None)
def test_update_item_empty_key(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, '', body)
def test_update_item_key_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, ("a" * 260), body)
def test_update_item_value_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, "key1", body)
def test_update_item_too_many_keys(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'bad',
body)
def test_invalid_metadata_items_on_create(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
#test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, data)
|
{
"content_hash": "5ab01278d491132296b851d979beb2e8",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 78,
"avg_line_length": 38.14100185528757,
"alnum_prop": 0.5680027240003891,
"repo_name": "NeCTAR-RC/cinder",
"id": "32d9eb205b454dcd6fb103287814df69ce27d667",
"size": "21194",
"binary": false,
"copies": "1",
"ref": "refs/heads/nectar/icehouse",
"path": "cinder/tests/api/v2/test_snapshot_metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "9824"
},
{
"name": "Python",
"bytes": "6176241"
},
{
"name": "Shell",
"bytes": "15237"
}
],
"symlink_target": ""
}
|
import re
# TODO(crbug.com/1227140): Clean up when py2 is no longer supported.
try:
_STRING_TYPE = basestring
except NameError: # pragma: no cover
_STRING_TYPE = str
from recipe_engine import recipe_api
class DepsDiffException(Exception):
pass
class RevisionResolver(object):
"""Resolves the revision based on build properties."""
def resolve(self, properties): # pragma: no cover
raise NotImplementedError()
class RevisionFallbackChain(RevisionResolver):
"""Specify that a given project's sync revision follows the fallback
chain."""
def __init__(self, default=None):
self._default = default
def resolve(self, properties):
"""Resolves the revision via the revision fallback chain.
If the given revision was set using the revision_fallback_chain() function,
this function will follow the chain, looking at relevant build properties
until it finds one set or reaches the end of the chain and returns the
default. If the given revision was not set using revision_fallback_chain(),
this function just returns it as-is.
"""
return (properties.get('parent_got_revision') or
properties.get('orig_revision') or
properties.get('revision') or
self._default)
def jsonish_to_python(spec, is_top=False):
"""Turns a json spec into a python parsable object.
This exists because Gclient specs, while resembling json, is actually
ingested using a python "eval()". Therefore a bit of plumming is required
to turn our newly constructed Gclient spec into a gclient-readable spec.
"""
ret = ''
if is_top: # We're the 'top' level, so treat this dict as a suite.
ret = '\n'.join(
'%s = %s' % (k, jsonish_to_python(spec[k])) for k in sorted(spec)
)
else:
if isinstance(spec, dict):
ret += '{'
ret += ', '.join(
"%s: %s" % (repr(str(k)), jsonish_to_python(spec[k]))
for k in sorted(spec)
)
ret += '}'
elif isinstance(spec, list):
ret += '['
ret += ', '.join(jsonish_to_python(x) for x in spec)
ret += ']'
elif isinstance(spec, _STRING_TYPE):
ret = repr(str(spec))
else:
ret = repr(spec)
return ret
class GclientApi(recipe_api.RecipeApi):
# Singleton object to indicate to checkout() that we should run a revert if
# we detect that we're on the tryserver.
RevertOnTryserver = object()
def __init__(self, **kwargs):
super(GclientApi, self).__init__(**kwargs)
self.USE_MIRROR = None
self._spec_alias = None
def __call__(self, name, cmd, infra_step=True, **kwargs):
"""Wrapper for easy calling of gclient steps."""
assert isinstance(cmd, (list, tuple))
prefix = 'gclient '
if self.spec_alias:
prefix = ('[spec: %s] ' % self.spec_alias) + prefix
env_suffixes = {'PATH': [self.repo_resource()]}
env = {}
if self.m.buildbucket.build.id != 0:
env['DEPOT_TOOLS_REPORT_BUILD'] = '%s/%s/%s/%s' % (
self.m.buildbucket.build.builder.project,
self.m.buildbucket.build.builder.bucket,
self.m.buildbucket.build.builder.builder,
self.m.buildbucket.build.id)
with self.m.context(env=env, env_suffixes=env_suffixes):
cmd = ['vpython3', '-u', self.repo_resource('gclient.py')] + cmd
return self.m.step(prefix + name,
cmd,
infra_step=infra_step,
**kwargs)
@property
def use_mirror(self):
"""Indicates if gclient will use mirrors in its configuration."""
if self.USE_MIRROR is None:
self.USE_MIRROR = self.m.properties.get('use_mirror', True)
return self.USE_MIRROR
@use_mirror.setter
def use_mirror(self, val): # pragma: no cover
self.USE_MIRROR = val
@property
def spec_alias(self):
"""Optional name for the current spec for step naming."""
return self._spec_alias
@spec_alias.setter
def spec_alias(self, name):
self._spec_alias = name
@spec_alias.deleter
def spec_alias(self):
self._spec_alias = None
def get_config_defaults(self):
return {
'USE_MIRROR': self.use_mirror,
'CACHE_DIR': self.m.path['cache'].join('git'),
}
@staticmethod
def config_to_pythonish(cfg):
return jsonish_to_python(cfg.as_jsonish(), True)
# TODO(machenbach): Remove this method when the old mapping is deprecated.
@staticmethod
def got_revision_reverse_mapping(cfg):
"""Returns the merged got_revision_reverse_mapping.
Returns (dict): A mapping from property name -> project name. It merges the
values of the deprecated got_revision_mapping and the new
got_revision_reverse_mapping.
"""
rev_map = cfg.got_revision_mapping.as_jsonish()
reverse_rev_map = cfg.got_revision_reverse_mapping.as_jsonish()
combined_length = len(rev_map) + len(reverse_rev_map)
reverse_rev_map.update({v: k for k, v in rev_map.items()})
# Make sure we never have duplicate values in the old map.
assert combined_length == len(reverse_rev_map)
return reverse_rev_map
def resolve_revision(self, revision):
if hasattr(revision, 'resolve'):
return revision.resolve(self.m.properties)
return revision
def sync(self, cfg, extra_sync_flags=None, **kwargs):
revisions = []
self.set_patch_repo_revision(gclient_config=cfg)
for i, s in enumerate(cfg.solutions):
if i == 0 and s.revision is None:
s.revision = RevisionFallbackChain()
if s.revision is not None and s.revision != '':
fixed_revision = self.resolve_revision(s.revision)
if fixed_revision:
revisions.extend(['--revision', '%s@%s' % (s.name, fixed_revision)])
for name, revision in sorted(cfg.revisions.items()):
fixed_revision = self.resolve_revision(revision)
if fixed_revision:
revisions.extend(['--revision', '%s@%s' % (name, fixed_revision)])
test_data_paths = set(
list(self.got_revision_reverse_mapping(cfg).values()) +
[s.name for s in cfg.solutions])
step_test_data = lambda: (
self.test_api.output_json(test_data_paths))
try:
# clean() isn't used because the gclient sync flags passed in checkout()
# do much the same thing, and they're more correct than doing a separate
# 'gclient revert' because it makes sure the other args are correct when
# a repo was deleted and needs to be re-cloned (notably
# --with_branch_heads), whereas 'revert' uses default args for clone
# operations.
#
# TODO(mmoss): To be like current official builders, this step could
# just delete the whole <slave_name>/build/ directory and start each
# build from scratch. That might be the least bad solution, at least
# until we have a reliable gclient method to produce a pristine working
# dir for git-based builds (e.g. maybe some combination of 'git
# reset/clean -fx' and removing the 'out' directory).
j = '-j2' if self.m.platform.is_win else '-j8'
args = ['sync', '--verbose', '--nohooks', j, '--reset', '--force',
'--upstream', '--no-nag-max', '--with_branch_heads',
'--with_tags']
args.extend(extra_sync_flags or [])
if cfg.delete_unversioned_trees:
args.append('--delete_unversioned_trees')
self('sync', args + revisions +
['--output-json', self.m.json.output()],
step_test_data=step_test_data,
**kwargs)
finally:
result = self.m.step.active_result
if result.json.output is not None:
solutions = result.json.output['solutions']
for propname, path in sorted(
self.got_revision_reverse_mapping(cfg).items()):
# gclient json paths always end with a slash
info = solutions.get(path + '/') or solutions.get(path)
if info:
result.presentation.properties[propname] = info['revision']
return result
def inject_parent_got_revision(self, gclient_config=None, override=False):
"""Match gclient config to build revisions obtained from build_properties.
Args:
gclient_config (gclient config object) - The config to manipulate. A value
of None manipulates the module's built-in config (self.c).
override (bool) - If True, will forcibly set revision and custom_vars
even if the config already contains values for them.
"""
cfg = gclient_config or self.c
for prop, custom_var in cfg.parent_got_revision_mapping.items():
val = str(self.m.properties.get(prop, ''))
# TODO(infra): Fix coverage.
if val: # pragma: no cover
# Special case for 'src', inject into solutions[0]
if custom_var is None:
# This is not covered because we are deprecating this feature and
# it is no longer used by the public recipes.
if cfg.solutions[0].revision is None or override: # pragma: no cover
cfg.solutions[0].revision = val
else:
if custom_var not in cfg.solutions[0].custom_vars or override:
cfg.solutions[0].custom_vars[custom_var] = val
def checkout(self, gclient_config=None, revert=RevertOnTryserver,
inject_parent_got_revision=True, extra_sync_flags=None,
**kwargs):
"""Return a step generator function for gclient checkouts."""
cfg = gclient_config or self.c
assert cfg.complete()
if revert is self.RevertOnTryserver:
revert = self.m.tryserver.is_tryserver
if inject_parent_got_revision:
self.inject_parent_got_revision(cfg, override=True)
self('setup', ['config', '--spec', self.config_to_pythonish(cfg)], **kwargs)
sync_step = None
try:
sync_step = self.sync(cfg, extra_sync_flags=extra_sync_flags, **kwargs)
cfg_cmds = [
('user.name', 'local_bot'),
('user.email', 'local_bot@example.com'),
]
for var, val in cfg_cmds:
name = 'recurse (git config %s)' % var
self(name, ['recurse', 'git', 'config', var, val], **kwargs)
finally:
cwd = self.m.context.cwd or self.m.path['start_dir']
if 'checkout' not in self.m.path:
self.m.path['checkout'] = cwd.join(
*cfg.solutions[0].name.split(self.m.path.sep))
return sync_step
def runhooks(self, args=None, name='runhooks', **kwargs):
args = args or []
assert isinstance(args, (list, tuple))
with self.m.context(cwd=(self.m.context.cwd or self.m.path['checkout'])):
return self(name, ['runhooks'] + list(args), infra_step=False, **kwargs)
def break_locks(self):
"""Remove all index.lock files. If a previous run of git crashed, bot was
reset, etc... we might end up with leftover index.lock files.
"""
self.m.python.inline(
'cleanup index.lock',
"""
from __future__ import print_function
import os, sys
build_path = sys.argv[1]
if os.path.exists(build_path):
for (path, dir, files) in os.walk(build_path):
for cur_file in files:
if cur_file.endswith('index.lock'):
path_to_file = os.path.join(path, cur_file)
print('deleting %s' % path_to_file)
os.remove(path_to_file)
""",
args=[self.m.path['start_dir']],
infra_step=True,
)
def get_gerrit_patch_root(self, gclient_config=None):
"""Returns local path to the repo where gerrit patch will be applied.
If there is no patch, returns None.
If patch is specified, but such repo is not found among configured solutions
or repo_path_map, returns name of the first solution. This is done solely
for backward compatibility with existing tests.
Please do not rely on this logic in new code.
Instead, properly map a repository to a local path using repo_path_map.
TODO(nodir): remove this. Update all recipe tests to specify a git_repo
matching the recipe.
"""
cfg = gclient_config or self.c
repo_url = self.m.tryserver.gerrit_change_repo_url
if not repo_url:
return None
root = self.get_repo_path(repo_url, gclient_config=cfg)
# This is wrong, but that's what a ton of recipe tests expect today
root = root or cfg.solutions[0].name
return root
def _canonicalize_repo_url(self, repo_url):
"""Attempts to make repo_url canonical. Supports Gitiles URL."""
return self.m.gitiles.canonicalize_repo_url(repo_url)
def get_repo_path(self, repo_url, gclient_config=None):
"""Returns local path to the repo checkout given its url.
Consults cfg.repo_path_map and fallbacks to urls in configured solutions.
Returns None if not found.
"""
rel_path = self._get_repo_path(repo_url, gclient_config=gclient_config)
if rel_path:
return self.m.path.join(*rel_path.split('/'))
return None
def _get_repo_path(self, repo_url, gclient_config=None):
repo_url = self._canonicalize_repo_url(repo_url)
cfg = gclient_config or self.c
rel_path, _ = cfg.repo_path_map.get(repo_url, ('', ''))
if rel_path:
return rel_path
# repo_path_map keys may be non-canonical.
for key, (rel_path, _) in cfg.repo_path_map.items():
if self._canonicalize_repo_url(key) == repo_url:
return rel_path
for s in cfg.solutions:
if self._canonicalize_repo_url(s.url) == repo_url:
return s.name
return None
def set_patch_repo_revision(self, gclient_config=None):
"""Updates config revision corresponding to patched project.
Useful for bot_update only, as this is the only consumer of gclient's config
revision map. This doesn't overwrite the revision if it was already set.
"""
cfg = gclient_config or self.c
repo_url = self.m.tryserver.gerrit_change_repo_url
path, revision = cfg.repo_path_map.get(repo_url, (None, None))
if path and revision and path not in cfg.revisions:
cfg.revisions[path] = revision
def diff_deps(self, cwd):
with self.m.context(cwd=cwd):
step_result = self.m.git(
'-c',
'core.quotePath=false',
'checkout',
'HEAD~',
'--',
'DEPS',
name='checkout the previous DEPS',
stdout=self.m.raw_io.output()
)
try:
cfg = self.c
step_result = self(
'recursively git diff all DEPS',
['recurse', 'python', self.resource('diff_deps.py')],
stdout=self.m.raw_io.output_text(add_output_log=True),
)
paths = []
# gclient recurse prepends a number and a > to each line
# Let's take that out
for line in step_result.stdout.strip().splitlines():
if 'fatal: bad object' in line:
msg = "Couldn't checkout previous ref: %s" % line
step_result.presentation.logs['DepsDiffException'] = msg
raise self.DepsDiffException(msg)
elif re.match('\d+>', line):
paths.append(line[line.index('>') + 1:])
# Normalize paths
if self.m.platform.is_win:
# Looks like "analyze" wants POSIX slashes even on Windows (since git
# uses that format even on Windows).
paths = [path.replace('\\', '/') for path in paths]
if len(paths) > 0:
return paths
else:
msg = 'Unexpected result: autoroll diff found 0 files changed'
step_result.presentation.logs['DepsDiffException'] = msg
raise self.DepsDiffException(msg)
finally:
self.m.git(
'-c',
'core.quotePath=false',
'checkout',
'HEAD',
'--',
'DEPS',
name="checkout the original DEPS")
@property
def DepsDiffException(self):
return DepsDiffException
|
{
"content_hash": "f3fabd3946e72c5aa8fa9db9fdcb7869",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 80,
"avg_line_length": 36.04109589041096,
"alnum_prop": 0.6284049157481313,
"repo_name": "CoherentLabs/depot_tools",
"id": "92b4ee2b6ae5d1fddb24e4cd4167bd2838c2aae7",
"size": "15949",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "recipes/recipe_modules/gclient/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "27896"
},
{
"name": "PowerShell",
"bytes": "5337"
},
{
"name": "Python",
"bytes": "2549026"
},
{
"name": "Roff",
"bytes": "5283"
},
{
"name": "Shell",
"bytes": "64165"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, unicode_literals
__doc__ = """
Looks for all exit and entry anchors anywhere in the font, and disables curs feature generation.
"""
def processLayer(thisLayer):
foundExitOrEntry = False
for thisAnchor in thisLayer.anchors:
if thisAnchor.name in ("exit", "entry"):
thisAnchor.name = "#%s" % thisAnchor.name
foundExitOrEntry = True
return foundExitOrEntry
def processGlyph(thisGlyph):
layerCount = 0
for thisLayer in thisGlyph.layers:
if processLayer(thisLayer):
layerCount += 1
if layerCount:
print("%s: changed anchor names on %i layer%s" % (
thisGlyph.name,
layerCount,
"" if layerCount == 1 else "s",
))
return 1
return 0
# brings macro window to front and clears its log:
Glyphs.clearLog()
thisFont = Glyphs.font # frontmost font
print("Looking for exit/entry in %s:" % thisFont.familyName)
print(thisFont.filepath)
print("Scanning %i glyphs..." % len(thisFont.glyphs))
print()
glyphCount = 0
for thisGlyph in thisFont.glyphs:
# thisGlyph.beginUndo() # undo grouping causes crashes
glyphCount += processGlyph(thisGlyph)
# thisGlyph.beginUndo() # undo grouping causes crashes
reportMessage = "Hashtagged exit/entry anchors in %i glyph%s." % (
glyphCount,
"" if glyphCount == 1 else "s",
)
print("\n%s\nDone." % reportMessage)
Message(title="Exit/Entry Prefix Report", message="Font ‘%s’: %s Detailed report in Macro Window." % (thisFont.familyName, reportMessage), OKButton=None)
|
{
"content_hash": "102cf89aaba159cae70d56519374a60f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 153,
"avg_line_length": 30.20408163265306,
"alnum_prop": 0.7236486486486486,
"repo_name": "mekkablue/Glyphs-Scripts",
"id": "e4df7b3c29336661c75ce6bc01f70b2006658445",
"size": "1567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Anchors/Prefix all exit:entry anchors with a hashtag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2142474"
}
],
"symlink_target": ""
}
|
import os
import sys
here = os.path.abspath(os.path.dirname(__file__))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join(here, '..')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
autodoc_member_order = 'bysource'
autodoc_default_flags = ['show-inheritance']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'acme-python'
copyright = u'2015-2015, Let\'s Encrypt Project'
author = u'Let\'s Encrypt Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'_build',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# https://docs.readthedocs.io/en/stable/faq.html#i-want-to-use-the-read-the-docs-theme-locally
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'acme-pythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'acme-python.tex', u'acme-python Documentation',
u'Let\'s Encrypt Project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'acme-python', u'acme-python Documentation',
[author], 1),
('man/jws', 'jws', u'jws script documentation', [project], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'acme-python', u'acme-python Documentation',
author, 'acme-python', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'josepy': ('https://josepy.readthedocs.io/en/latest/', None),
}
|
{
"content_hash": "d76d7673b85335dd2b6c516470d93ba1",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 94,
"avg_line_length": 32.90268456375839,
"alnum_prop": 0.6994390617032127,
"repo_name": "stweil/letsencrypt",
"id": "d419326df926c57aaf6d8b3acc367b52e05eb78d",
"size": "10229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acme/docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "14147"
},
{
"name": "Augeas",
"bytes": "4997"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4270"
},
{
"name": "Python",
"bytes": "1355274"
},
{
"name": "Shell",
"bytes": "120566"
},
{
"name": "Standard ML",
"bytes": "256"
}
],
"symlink_target": ""
}
|
default_app_config = 'mymoney.apps.banktransactionschedulers.apps.BankTransactionSchedulerConfig'
|
{
"content_hash": "42833696fcae8b3259559f0a03308f01",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 97,
"avg_line_length": 98,
"alnum_prop": 0.8775510204081632,
"repo_name": "ychab/mymoney",
"id": "06906497c50641330e9e8c7e82dc6749a77a3932",
"size": "98",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mymoney/apps/banktransactionschedulers/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "494"
},
{
"name": "HTML",
"bytes": "68172"
},
{
"name": "JavaScript",
"bytes": "5995"
},
{
"name": "Python",
"bytes": "392344"
},
{
"name": "Shell",
"bytes": "874"
}
],
"symlink_target": ""
}
|
import os
import re
import json
import random
import tempfile
from django import forms
from django.urls import reverse
from django.http import HttpResponse, JsonResponse
from django.views.generic.base import View
from django.views.decorators.http import require_http_methods
from django.shortcuts import get_object_or_404
from django.db.models import Q
from django.db import transaction
from edge.forms import FragmentForm
from edge.models import Fragment, Genome, Operation
from edge.io import IO
from edge import import_gff
from edge.tasks import build_genome_blastdb, build_genome_fragment_indices
IS_RO_SERVER = os.getenv("RO_SERVER", False) == "True"
def genome_fasta_export(request, genome_id):
get_genome_or_404(genome_id)
io = IO(Genome.objects.get(pk=genome_id))
response = HttpResponse(content_type="text/plain")
response["Content-Disposition"] = 'attachment; filename="g%s.fa"' % genome_id
io.to_fasta_file(response)
return response
def genome_gff_export(request, genome_id):
get_genome_or_404(genome_id)
io = IO(Genome.objects.get(pk=genome_id))
response = HttpResponse(content_type="text/plain")
response["Content-Disposition"] = 'attachment; filename="g%s.gff"' % genome_id
io.to_gff_file(response)
return response
@require_http_methods(["POST"])
def genome_import(request):
res = {
"imported_genomes": [],
}
for name in request.FILES:
with tempfile.NamedTemporaryFile(mode="w+b", delete=False) as gff:
for chuck in request.FILES.get(name).chunks():
gff.write(chuck)
g = import_gff(name, gff.name)
os.unlink(gff.name)
blastdb_task = build_genome_blastdb.delay(g.id)
res["imported_genomes"].append(
{"id": g.id, "name": g.name, "blastdb_task_id": blastdb_task.id,}
)
return JsonResponse(res)
def schedule_building_blast_db(genome_id, countdown=None):
# scheduling building genome DB in the future, so a) our transaction has a
# chance to commit and b) we avoid an immediate followup operation building
# db on demand at the same time as this delayed building of database
countdown = 30 if countdown is None else countdown
build_genome_blastdb.apply_async((genome_id,), countdown=countdown)
def get_genome_or_404(pk):
return get_object_or_404(Genome, pk=pk)
def get_fragment_or_404(pk):
return get_object_or_404(Fragment, pk=pk)
class ViewBase(View):
def get(self, request, *args, **kwargs):
res = self.on_get(request, *args, **kwargs)
return HttpResponse(json.dumps(res), content_type="application/json")
def put(self, request, *args, **kwargs):
res, status = self.on_put(request, *args, **kwargs)
return HttpResponse(
json.dumps(res), status=status, content_type="application/json"
)
def post(self, request, *args, **kwargs):
res, status = self.on_post(request, *args, **kwargs)
return HttpResponse(
json.dumps(res), status=status, content_type="application/json"
)
class RequestParser(object):
def __init__(self):
self.__args = []
def add_argument(
self, name, field_type, required=False, default=None, location="get"
):
if type(field_type) not in (list, tuple):
if field_type == str:
field_type = [bytes, str]
else:
field_type = [field_type]
self.__args.append((name, field_type, required, default, location))
def parse_args(self, request):
json_payload = None
args = {}
for name, field_type, required, default, location in self.__args:
if location == "get":
d = request.GET
elif location == "post":
d = request.POST
else:
if json_payload is None:
json_payload = json.loads(request.body)
d = json_payload
if name not in d and required:
raise Exception('Missing required field "%s"' % (name,))
if name not in d:
args[name] = default
else:
v = d[name]
if type(v) not in field_type:
if int in field_type:
v = int(v)
elif float in field_type:
v = float(v)
elif v is None and not required:
pass
else:
raise Exception(
'Field "%s" should be of type "%s", got "%s"'
% (name, field_type, type(v))
)
args[name] = v
return args
fragment_parser = RequestParser()
fragment_parser.add_argument("name", field_type=str, required=True, location="json")
fragment_parser.add_argument("sequence", field_type=str, required=True, location="json")
fragment_parser.add_argument(
"circular", field_type=bool, default=False, location="json"
)
class FragmentView(ViewBase):
@staticmethod
def to_dict(fragment, compute_length=True):
length = fragment.est_length
if compute_length is True and fragment.has_location_index:
length = fragment.indexed_fragment().length
return dict(
id=fragment.id,
uri=reverse("fragment", kwargs=dict(fragment_id=fragment.id)),
name=fragment.name,
circular=fragment.circular,
parent_id=fragment.parent.id if fragment.parent else None,
length=length,
)
def on_get(self, request, fragment_id):
fragment = get_fragment_or_404(fragment_id)
return FragmentView.to_dict(fragment)
class FragmentSequenceView(ViewBase):
def on_get(self, request, fragment_id):
q_parser = RequestParser()
q_parser.add_argument("f", field_type=int, location="get")
q_parser.add_argument("l", field_type=int, location="get")
args = q_parser.parse_args(request)
f = args["f"]
ll = args["l"]
fragment = get_fragment_or_404(fragment_id)
s = fragment.indexed_fragment().get_sequence(bp_lo=f, bp_hi=ll)
if f is None:
f = 1
if ll is None:
ll = f + len(s) - 1
return {"sequence": s, "base_first": f, "base_last": ll}
class FragmentAnnotationsView(ViewBase):
@staticmethod
def to_dict(annotation, include_feature_sequence=False):
result = dict(
base_first=annotation.base_first,
base_last=annotation.base_last,
strand=annotation.feature.strand,
feature_base_first=annotation.feature_base_first,
feature_base_last=annotation.feature_base_last,
feature=dict(id=annotation.feature.id,
name=annotation.feature.name,
type=annotation.feature.type,
length=annotation.feature.length,
qualifiers=annotation.feature.qualifiers),
# below fields are for backwards compatibility only, repeated in
# the .feature dictionary
name=annotation.feature.name,
type=annotation.feature.type,
qualifiers=annotation.feature.qualifiers,
feature_full_length=annotation.feature.length
)
if include_feature_sequence:
result['feature']['sequence'] = annotation.feature.sequence
return result
def on_get(self, request, fragment_id):
q_parser = RequestParser()
q_parser.add_argument("f", field_type=int, location="get")
q_parser.add_argument("l", field_type=int, location="get")
q_parser.add_argument("m", field_type=int, location="get")
q_parser.add_argument(
"include_feature_sequence",
field_type=str,
default='false',
location="get"
)
args = q_parser.parse_args(request)
f = args["f"]
ll = args["l"]
m = args["m"]
include_feature_sequence = args["include_feature_sequence"].lower() == "true"
fragment = get_fragment_or_404(fragment_id)
annotations = fragment.indexed_fragment().annotations(bp_lo=f, bp_hi=ll)
if m is not None and len(annotations) > m:
to_return = []
while len(to_return) < m:
i = random.randint(0, len(annotations) - 1)
to_return.append(annotations[i])
new_a = annotations[0:i] + annotations[i + 1 :]
annotations = new_a
annotations = to_return
return [
FragmentAnnotationsView.to_dict(
annotation, include_feature_sequence=include_feature_sequence
) for annotation in annotations
]
@transaction.atomic()
def on_post(self, request, fragment_id):
annotation_parser = RequestParser()
annotation_parser.add_argument(
"base_first", field_type=int, required=True, location="json"
)
annotation_parser.add_argument(
"base_last", field_type=int, required=True, location="json"
)
annotation_parser.add_argument(
"name", field_type=str, required=True, location="json"
)
annotation_parser.add_argument(
"type", field_type=str, required=True, location="json"
)
annotation_parser.add_argument(
"strand", field_type=int, required=True, location="json"
)
annotation_parser.add_argument(
"qualifiers", field_type=dict, required=False, default=None, location="json"
)
args = annotation_parser.parse_args(request)
fragment = get_fragment_or_404(fragment_id)
fragment = fragment.indexed_fragment()
fragment.annotate(
first_base1=args["base_first"],
last_base1=args["base_last"],
name=args["name"],
type=args["type"],
strand=args["strand"],
qualifiers=args["qualifiers"],
)
return {}, 201
class FragmentAnnotateChunksView(ViewBase):
@transaction.atomic()
def on_post(self, request, fragment_id):
annotation_parser = RequestParser()
annotation_parser.add_argument(
"bases", field_type=list, required=True, location="json"
)
annotation_parser.add_argument(
"name", field_type=str, required=True, location="json"
)
annotation_parser.add_argument(
"type", field_type=str, required=True, location="json"
)
annotation_parser.add_argument(
"strand", field_type=int, required=True, location="json"
)
annotation_parser.add_argument(
"qualifiers", field_type=dict, required=False, default=None, location="json"
)
args = annotation_parser.parse_args(request)
fragment = get_fragment_or_404(fragment_id)
fragment = fragment.indexed_fragment()
fragment.annotate_chunks(
bases=args["bases"],
name=args["name"],
type=args["type"],
strand=args["strand"],
qualifiers=args["qualifiers"],
)
return {}, 201
class FragmentListView(ViewBase):
def on_get(self, request):
q_parser = RequestParser()
q_parser.add_argument("q", field_type=str, location="get")
q_parser.add_argument("s", field_type=int, location="get", default=0)
q_parser.add_argument("p", field_type=int, location="get", default=100)
args = q_parser.parse_args(request)
s = args["s"]
p = args["p"]
q = args["q"]
p = 200 if p > 200 else p
if q is not None and q.strip() != "":
fragments = Fragment.user_defined_fragments(Q(name__icontains=q), s, s + p)
else:
fragments = Fragment.user_defined_fragments(None, s, s + p)
return [FragmentView.to_dict(fragment) for fragment in fragments]
@transaction.atomic()
def on_post(self, request):
args = fragment_parser.parse_args(request)
fragment = Fragment.create_with_sequence(
name=args["name"], sequence=args["sequence"], circular=args["circular"]
)
return FragmentView.to_dict(fragment), 201
class GenomeView(ViewBase):
@staticmethod
def op_to_dict(genome, op):
choices = Operation._meta.get_field("type").choices
type_str = [t[1] for t in choices if t[0] == op.type]
if len(type_str) > 0:
type_str = type_str[0]
else:
type_str = ""
annotations = {}
if genome.has_location_index:
genome = genome.indexed_genome()
for feature in op.feature_set.all():
fragment_annotations = genome.find_annotation_by_feature(feature)
for fragment_id, fragment_annotations in fragment_annotations.items():
if fragment_id not in annotations:
annotations[fragment_id] = []
annotations[fragment_id].extend(fragment_annotations)
annotation_list = []
for fragment_id in annotations:
f = genome.fragments.filter(id=fragment_id)[0]
v = annotations[fragment_id]
v = [FragmentAnnotationsView.to_dict(x) for x in v]
for a in v:
a["fragment_id"] = fragment_id
a["fragment_name"] = f.name
annotation_list.append(a)
d = dict(
type=type_str, params=json.loads(op.params), annotations=annotation_list
)
return d
@staticmethod
def to_dict(
genome, compute_length=True, include_fragments=True, include_operations=True
):
operations = []
if include_operations:
for op in genome.operation_set.order_by("id"):
d = GenomeView.op_to_dict(genome, op)
operations.append(d)
fragments = None
if include_fragments:
fragments = []
for f in genome.fragments.all():
d = FragmentView.to_dict(f, compute_length=compute_length)
fragments.append(d)
d = dict(
id=genome.id,
uri=reverse("genome", kwargs=dict(genome_id=genome.id)),
name=genome.name,
notes=genome.notes,
parent_id=genome.parent_id,
parent_name=genome.parent.name if genome.parent is not None else "",
fragments=fragments,
)
if len(operations):
d["operations"] = operations
return d
def on_get(self, request, genome_id):
genome = get_genome_or_404(genome_id)
return GenomeView.to_dict(genome)
class GenomeAnnotationsView(ViewBase):
def on_get(self, request, genome_id):
genome = get_genome_or_404(genome_id)
q_parser = RequestParser()
q_parser.add_argument("q", field_type=str, required=True)
q_parser.add_argument("field", field_type=str, required=False, default=None)
args = q_parser.parse_args(request)
field = args["field"]
if not genome.has_location_index:
build_genome_fragment_indices.delay(genome.id)
return dict(
error="Missing genome indices, building. Please check back later."
)
res = []
if field is None:
fragment_annotations = genome.indexed_genome().find_annotation_by_name(
args["q"]
)
else:
fragment_annotations = genome.indexed_genome().find_annotation_by_qualifier(
args["q"], fields=[field]
)
for fragment_id in fragment_annotations:
fragment = get_fragment_or_404(fragment_id)
annotations = fragment_annotations[fragment_id]
d = FragmentView.to_dict(fragment)
a = [FragmentAnnotationsView.to_dict(x) for x in annotations]
res.append((d, a))
return res
class GenomeFragmentListView(ViewBase):
@transaction.atomic()
def on_post(self, request, genome_id): # adding new fragment
args = fragment_parser.parse_args(request)
genome = get_genome_or_404(genome_id)
fragment = None
fragment = genome.add_fragment(
name=args["name"], sequence=args["sequence"], circular=args["circular"]
)
return FragmentView.to_dict(fragment), 201
class GenomeDeriveView(ViewBase):
@transaction.atomic()
def on_post(self, request, genome_id):
genome = get_genome_or_404(genome_id)
data = json.loads(request.body)
cleaned_data = []
for entry in data:
form = FragmentForm(data=entry)
if not form.is_valid():
raise forms.ValidationError(form.errors)
cleaned_data.append(form.cleaned_data)
child = genome.update()
for entry in cleaned_data:
child.add_fragment(
entry["name"], entry["sequence"], circular=entry["circular"]
)
return GenomeView.to_dict(child), 201
class GenomeListView(ViewBase):
def on_get(self, request):
if "f" in request.GET:
fragment_ids = []
for f in request.GET.getlist("f"):
try:
fragment_ids.append(int(f))
except ValueError:
return []
if len(fragment_ids) == 0:
return []
sql_joins = 5
q = Genome.objects.filter(genome_fragment__fragment_id=fragment_ids[0])
for i in range(1, sql_joins):
if i < len(fragment_ids):
q = q.filter(genome_fragment__fragment_id=fragment_ids[i])
candidates = list(q)
genomes = []
for g in candidates:
x = Genome.objects.raw(
"""
SELECT edge_genome.id,
GROUP_CONCAT(edge_genome_fragment.fragment_id) as fragment_id_list
FROM edge_genome
JOIN edge_genome_fragment ON edge_genome_fragment.genome_id = edge_genome.id
WHERE edge_genome.id = %s
""",
[g.id],
)
x = list(x)[0]
id_list = [int(n) for n in x.fragment_id_list.split(",")]
if set(id_list) == set(fragment_ids):
genomes.append(g)
else:
q_parser = RequestParser()
q_parser.add_argument("q", field_type=str, location="get")
q_parser.add_argument("s", field_type=int, location="get", default=0)
q_parser.add_argument("p", field_type=int, location="get", default=100)
args = q_parser.parse_args(request)
s = args["s"]
p = args["p"]
q = args["q"]
p = 200 if p > 200 else p
if q is not None and q.strip() != "":
where = Q(name__icontains=q)
try:
int(q) # See if q can be converted to an int
except BaseException:
pass
else:
where = where | Q(id=q)
genomes = (
Genome.objects.filter(active=True)
.filter(where)
.order_by("-id")[s : s + p]
)
else:
genomes = Genome.objects.filter(active=True).order_by("-id")[s : s + p]
return [
GenomeView.to_dict(
genome,
compute_length=False,
include_fragments=False,
include_operations=False,
)
for genome in genomes
]
@transaction.atomic()
def on_post(self, request):
genome_parser = RequestParser()
genome_parser.add_argument(
"name", field_type=str, required=True, location="json"
)
genome_parser.add_argument("notes", field_type=str, location="json")
args = genome_parser.parse_args(request)
genome = Genome.create(name=args["name"], notes=args["notes"])
return GenomeView.to_dict(genome), 201
class GenomeBlastView(ViewBase):
def on_post(self, request, genome_id):
from edge.blast import blast_genome, EDGE_BLAST_DEFAULT_WORD_SIZE
from edge.blastdb import check_and_build_genome_db
genome = get_genome_or_404(genome_id)
check_and_build_genome_db(genome)
parser = RequestParser()
parser.add_argument("query", field_type=str, required=True, location="json")
parser.add_argument("program", field_type=str, required=True, location="json")
parser.add_argument(
"word_size",
field_type=int,
required=False,
default=EDGE_BLAST_DEFAULT_WORD_SIZE,
location="json"
)
args = parser.parse_args(request)
results = blast_genome(genome, args["program"], args["query"], word_size=args['word_size'])
results = [r.to_dict() for r in results]
print(results)
return results, 200
class GenomePcrView(ViewBase):
def on_post(self, request, genome_id):
from edge.pcr import pcr_from_genome
from edge.blast import EDGE_BLAST_DEFAULT_WORD_SIZE
from edge.blastdb import check_and_build_genome_db
genome = get_genome_or_404(genome_id)
check_and_build_genome_db(genome)
parser = RequestParser()
parser.add_argument("primers", field_type=list, required=True, location="json")
parser.add_argument(
"include_feature_sequence",
field_type=bool,
required=False,
default=False,
location="json"
)
parser.add_argument(
"blast_word_size",
field_type=int,
required=False,
default=EDGE_BLAST_DEFAULT_WORD_SIZE,
location="json"
)
args = parser.parse_args(request)
primers = args["primers"]
include_feature_sequence = args["include_feature_sequence"]
if len(primers) != 2:
raise Exception("Expecting two primers, got %s" % (primers,))
(product, primer_a_results, primer_b_results, template_info) = pcr_from_genome(
genome, primers[0], primers[1], blast_word_size=args["blast_word_size"]
)
# Convert annotations in template_info to dictionary.
if template_info and "annotations" in template_info:
template_info["annotations"] = [
FragmentAnnotationsView.to_dict(
annotation,
include_feature_sequence=include_feature_sequence
)
for annotation in template_info["annotations"]
]
r = (
product,
[b.to_dict() for b in primer_a_results],
[b.to_dict() for b in primer_b_results],
template_info,
)
return r, 200
class GenomeOperationViewBase(ViewBase):
def on_post(self, request, genome_id):
from edge.blastdb import check_and_build_genome_db
genome = get_genome_or_404(genome_id)
if IS_RO_SERVER and (not genome.has_location_index or not genome.blastdb):
return [], 409 # 409 is "Conflict" - caller is expected to retry request on RW server
check_and_build_genome_db(genome)
# always require a 'create' argument
parser = RequestParser()
parser.add_argument("create", field_type=bool, required=True, location="json")
args = parser.parse_args(request)
create = args["create"]
errors = []
parsed = self.parse_arguments(request, errors)
if parsed is None:
return dict(errors=" ".join(errors)), 400
args, op_class = parsed
if create is False:
r = op_class.check(genome, **args)
if r is None:
return [], 200
return [x.to_dict() for x in r], 200
else:
child = None
status_code = 400
with transaction.atomic():
op = op_class.get_operation(**args)
# find another a child genome with same operation
for existing_child in genome.children.all():
if (
existing_child.operation_set.count() == 1
and existing_child.operation_set.all()[0].type == op.type
and existing_child.operation_set.all()[0].params == op.params
):
child = existing_child
if child is None:
child = op_class.perform(genome, **args)
if child:
print(
f"Generated child genome {child.id} from parent genome {genome_id}"
)
status_code = 201
else: # found existing child, update genome name and set to active
if "genome_name" in args:
child.name = args["genome_name"]
child.active = True
child.save()
status_code = 200
if child is None:
return None, 400
else:
# scheduling tasks outside of transaction block
schedule_building_blast_db(child.id)
return GenomeView.to_dict(child), status_code
class GenomeCrisprDSBView(GenomeOperationViewBase):
def parse_arguments(self, request, errors):
from edge.crispr import CrisprOp
parser = RequestParser()
parser.add_argument(
"guide", field_type=str, required=True, default=None, location="json"
)
parser.add_argument(
"pam", field_type=str, required=True, default=None, location="json"
)
parser.add_argument(
"genome_name", field_type=str, required=False, default=None, location="json"
)
parser.add_argument(
"notes", field_type=str, required=False, default=None, location="json"
)
args = parser.parse_args(request)
guide = args["guide"]
pam = args["pam"]
genome_name = args["genome_name"]
notes = args["notes"]
return (
dict(guide=guide, pam=pam, genome_name=genome_name, notes=notes),
CrisprOp,
)
def validate_annotations(cassette, annotations):
"""
If user supplied annotations for the donor sequence, to make sure we
can precisely use the annotation and not leave any ambiguity, we
require the donor dna to not contain overhangs and backbone
modifications. This method returns True if that's the case, and
required fields for annotations exist.
"""
if (
re.match(r"^[A-Za-z]+$", cassette)
and len(
[
a
for a in annotations
if "base_first" not in a
or "base_last" not in a
or "name" not in a
or "type" not in a
or "strand" not in a
]
)
== 0
):
return True
# if there are no supplied annotations, we don't care what format the
# donor sequence is
return len(annotations) == 0
class GenomeRecombinationView(GenomeOperationViewBase):
DEFAULT_HA_LENGTH = 30
def parse_arguments(self, request, errors):
from edge.recombine import RecombineOp
parser = RequestParser()
parser.add_argument("cassette", field_type=str, required=True, location="json")
parser.add_argument(
"homology_arm_length", field_type=int, required=False, location="json"
)
parser.add_argument(
"genome_name", field_type=str, required=False, default=None, location="json"
)
parser.add_argument(
"cassette_name",
field_type=str,
required=False,
default=None,
location="json",
)
parser.add_argument(
"notes", field_type=str, required=False, default=None, location="json"
)
parser.add_argument(
"design_primers",
field_type=bool,
required=False,
default=False,
location="json",
)
parser.add_argument(
"primer3_opts",
field_type=dict,
required=False,
default=None,
location="json",
)
parser.add_argument(
"annotations",
field_type=list,
required=False,
default=None,
location="json",
)
args = parser.parse_args(request)
cassette = args["cassette"].strip()
homology_arm_length = args["homology_arm_length"]
genome_name = args["genome_name"]
cassette_name = args["cassette_name"]
notes = args["notes"]
design_primers = args["design_primers"]
primer3_opts = args["primer3_opts"]
annotations = args["annotations"]
if primer3_opts is None:
primer3_opts = {}
if annotations and (validate_annotations(cassette, annotations) is False):
errors.append(
"Annotations failed validation: \
please make sure donor sequence does not have overhangs \
and annotation array elements have all the required fields."
)
return None
if homology_arm_length is None:
homology_arm_length = GenomeRecombinationView.DEFAULT_HA_LENGTH
return (
dict(
cassette=cassette,
homology_arm_length=homology_arm_length,
genome_name=genome_name,
cassette_name=cassette_name,
notes=notes,
design_primers=design_primers,
primer3_opts=primer3_opts,
annotations=annotations,
),
RecombineOp,
)
class GenomeSSRView(GenomeOperationViewBase):
def parse_arguments(self, request, errors):
from edge.ssr.op import SSROp
parser = RequestParser()
parser.add_argument("donor", field_type=str, required=False, default=None, location="json")
parser.add_argument("is_donor_circular", field_type=bool, required=False, default=None,
location="json")
parser.add_argument("reaction", field_type=str, required=True, location="json")
parser.add_argument("genome_name", field_type=str, required=False, default=None,
location="json")
parser.add_argument("notes", field_type=str, required=False, default=None, location="json")
parser.add_argument(
"annotations",
field_type=list,
required=False,
default=None,
location="json",
)
args = parser.parse_args(request)
donor = args["donor"].strip() if args["donor"] else None
is_donor_circular = args["is_donor_circular"]
reaction = args["reaction"]
genome_name = args["genome_name"]
notes = args["notes"]
annotations = args["annotations"]
if annotations and (validate_annotations(donor, annotations) is False):
errors.append(
"Annotations failed validation: \
please make sure donor sequence does not have overhangs \
and annotation array elements have all the required fields."
)
return None
return (
dict(
donor=donor,
is_donor_circular=is_donor_circular,
reaction_name=reaction,
genome_name=genome_name,
notes=notes,
annotations=annotations,
),
SSROp,
)
class GenomeDiffView(ViewBase):
def on_get(self, request, child_genome_id, parent_genome_id):
child_genome = get_genome_or_404(child_genome_id)
child_genome = child_genome.indexed_genome()
try:
regions = child_genome.get_coordinate_diff_from_parent_genome(parent_genome_id)
return regions
except Exception as e:
return {'error': str(e)}
|
{
"content_hash": "8bb68b9cafdfaa5f33777549cb811575",
"timestamp": "",
"source": "github",
"line_count": 922,
"max_line_length": 99,
"avg_line_length": 35.399132321041215,
"alnum_prop": 0.5642808995649243,
"repo_name": "ginkgobioworks/edge",
"id": "f07226c26f62a6705d876d00fc80f469eed8190e",
"size": "32638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/edge/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2553"
},
{
"name": "Dockerfile",
"bytes": "1203"
},
{
"name": "HTML",
"bytes": "32885"
},
{
"name": "JavaScript",
"bytes": "27599"
},
{
"name": "Makefile",
"bytes": "3665"
},
{
"name": "Python",
"bytes": "826040"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
}
|
"""Client for CKAN data repositories
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
from databundles.run import get_runconfig
import ckanclient
import databundles.client.exceptions as Exceptions
import requests
import json
def get_client(rc=None, name=None):
from databundles.dbexceptions import ConfigurationError
if rc is None:
rc = get_runconfig()
if name is None:
name = 'default'
try:
catalog = rc.group('catalog')
cfg = rc.catalog.get(name)
url = cfg.url
key = cfg.key
except Exception as e:
raise ConfigurationError(("Failed to get configuration for catalog.{0}.url or "+
"catalog.{0}.key: {1}").format(name, e))
return Ckan(url, key)
class Ckan(object):
'''
classdocs
'''
def __init__(self, url, key):
'''
Constructor
'''
import re
if not re.search("/\d$", url): # prefer version 2 of the api
url += '/2'
pass
# Instantiate the CKAN client.
self.url = url
self.key = key
@property
def auth_headers(self):
return {'Authorization': self.key,
'X-CKAN-API-Key': self.key,
'Content-Type': 'application/json; charset=utf-8'
}
def translate_name(self,name):
return name.lower().replace('.','_')
def get_or_new_group(self,name):
url = self.url+'/rest/group/{name}'
try:
r = requests.get(url.format(name=name.lower()))
r.raise_for_status()
except requests.exceptions.HTTPError:
payload = {
'name': name.lower(),
'title': name,
'description': name
}
r = requests.post(self.url+'/rest/group',
headers = self.auth_headers,
data=json.dumps(payload))
try:
r.raise_for_status()
except Exception as e:
print r.content
raise
return r.json()
#
# Packages
#
def list_packages(self):
r = requests.get(self.url+'/rest/package')
r.raise_for_status()
return r.json()
def entity_from_bundle(self, name, bundle ):
props = bundle.config.group('properties')
if not name:
name = bundle.identity.name
import datetime
t = str(datetime.datetime.now())
return {
'title': (props.get('title',None)),
'name': name,
'author_email' : bundle.identity.creator,
'author': props.get('author',None),
'maintainer_email' : bundle.identity.creator,
'maintainer': props.get('maintainer',None),
'extras': {
'bundle/type' : 'bundle',
'bundle/source' : bundle.identity.source,
'bundle/dataset' : bundle.identity.dataset,
'bundle/subset' : bundle.identity.subset,
'bundle/variation' : bundle.identity.variation,
'bundle/revision' : bundle.identity.revision,
'bundle/id' : bundle.identity.id_,
'bundle/name' : bundle.identity.name
},
'version': bundle.identity.revision,
'homepage': props.get('homepage',None),
'url': props.get('url',None),
'notes': props.get('notes',None),
'url': props.get('url',None),
'tags': props.get('tags',None),
}
def merge_dict(self,old, new, recurse=True):
out = {}
old_extras = old.get('extras', {})
if len(old_extras): del old['extras']
new_extras = new.get('extras', {})
if len(new_extras): del new['extras']
# Copy over the new items
for k,v in new.items():
if v is None:
pass
else:
out[k] = v
# copy over the old items that don't already exist
for k,v in old.items():
if v is not None and not out.get(k,False):
out[k] = v
if recurse:
out['extras'] = self.merge_dict(old_extras, new_extras, False)
return out
def get_package(self, id_):
r = requests.get(self.url+'/rest/package/{id}'.format(id=id_),
headers = self.auth_headers)
try:
r.raise_for_status()
return r.json()
except Exception as e:
print "ERROR: "+r.content
raise e
def put_package(self, pe):
data = json.dumps(pe)
url = self.url+'/rest/package/{id}'.format(id=pe['id'])
r = requests.put(url, headers = self.auth_headers, data = data )
try:
r.raise_for_status()
return r.json()
except Exception as e:
print "ERROR: "+r.content
raise e
def update_or_new_bundle(self, bundle, type='bundle', name=None,
title=None, group_names=None,):
'''Create a new package for a bundle.'''
import datetime
if name is None:
name = self.translate_name(bundle.identity.name)
else:
name = self.translate_name(name)
if not group_names:
group_names = ['bundles']
groups = [self.get_or_new_group(group_name) for group_name in group_names]
try:
r = requests.get(self.url+'/rest/package/{name}'.format(name=name))
r.raise_for_status()
except requests.exceptions.HTTPError:
# Create minimal package, since we always update next.
payload = {'name': name}
r = requests.post(self.url+'/rest/package',
headers = self.auth_headers,
data=json.dumps(payload))
r.raise_for_status()
new_payload = self.entity_from_bundle(name, bundle)
payload = self.merge_dict(r.json(), new_payload)
if title is None:
title = bundle.config.about.title.format(
datetime=datetime.datetime.now().isoformat('T'),
date=datetime.date.today().isoformat()
)
description = bundle.config.about.get('description','').format(
datetime=datetime.datetime.now().isoformat('T'),
date=datetime.date.today().isoformat()
)
payload['notes'] = description
payload['title'] = title
payload['groups'] = [group['id'] for group in groups]
payload['license_id'] = bundle.config.about.get('license','other')
r = requests.post(self.url+'/rest/package/{name}'.format(name=name),
headers = self.auth_headers,
data=json.dumps(payload))
try:
r.raise_for_status()
return r.json()
except Exception as e:
print r.content
raise e
def update_or_new_bundle_extract(self, bundle, name=None, **kwargs):
if name is None:
name = self.translate_name(bundle.identity.name+'-extract')
else:
name = self.translate_name(name)
group_names = kwargs.get('group_names',[])
group_names.append('extracts')
for group_name in bundle.config.group('about').get('groups',[]):
group_names.append(group_name)
return self.update_or_new_bundle(bundle, name=name, group_names=group_names, **kwargs)
def delete_package(self, id_):
url = self.url+'/rest/package/{id}'.format(id=id_)
r = requests.delete(url,headers = self.auth_headers)
r.raise_for_status()
return
def upload_file(self,file_path, name=None):
"""Upload a file to the repository and return the URL, or an exception on
errors"""
from datetime import datetime
import os
import urlparse
import re
# see ckan/public/application.js:makeUploadKey for why the file_key
# is derived this way.
ts = datetime.isoformat(datetime.now()).replace(':','').split('.')[0]
if name is None:
name = os.path.basename(file_path)
norm_name = name.replace(' ', '-')
file_key = os.path.join(ts, norm_name)
# Inexplicably, this URL can't have the version number
url = re.sub('\/\d$','', self.url)+'/storage/auth/form/{}'.format(file_key.strip('/'))
r = requests.get(url,headers = self.auth_headers)
url_path = r.json()['action']
files = [('file', os.path.basename(file_key), open(base_path).read())]
fields = [('key', file_key)]
content_type, body = self._encode_multipart_formdata(fields, files)
headers= self.auth_headers
headers['Content-Type'] = content_type
headers['Content-Length'] = str(len(body))
# And this one not only doesn't have the api version, it also doesn't have
# 'api'
netloc = urlparse.urlparse(self.url).netloc
url = 'http://'+ netloc+ url_path
r = requests.post(url,headers = headers,data=body)
try:
r.raise_for_status()
except:
print 'ERROR for url: {}'.format(url)
print r.content
raise
return '%s/storage/f/%s' % (re.sub('/api\/\d$','', self.url), file_key)
def md5_for_file(self, file_, block_size=2**20):
'''Compute the MD5 for a file without taking up too much memory'''
import hashlib
md5 = hashlib.md5()
with open(file_, 'r') as f:
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def add_file_resource(self, pe, file_path, name,
resource_type = 'data',
content_type=None,
**kwargs):
import os
import mimetypes
server_url = self.upload_file(file_path, name=name) #@UnusedVariable
md5 = self.md5_for_file(file_path)
st = os.stat(file_path)
package_url = self.url+'/rest/package/{id}'.format(id=pe['id'])
# Fetch the pe again, in case the one passed in was incomplete.
r = requests.get(package_url,headers = self.auth_headers)
r.raise_for_status()
pe2 = r.json()
if content_type is None:
content_type = mimetypes.guess_type(file_path)[0] or 'application/octet-stream'
resource = dict(name=name,
mimetype=content_type,
hash=md5,
size=st.st_size,
url=server_url)
for k,v in kwargs.items():
if v is not None:
resource[k] = v
pe2['resources'].append(resource)
r = requests.put(package_url,
headers = self.auth_headers,
data=json.dumps(pe2))
r.raise_for_status()
return r.json()
def add_url_resource(self, pe, url, name, **kwargs):
import os
import mimetypes
r = requests.head(url)
size = r.headers.get('content-length',None)
content_type = r.headers.get('content-type',None)
# Fetch the pe again, in case the one passed in was incomplete.
package_url = self.url+'/rest/package/{id}'.format(id=pe['id'])
r = requests.get(package_url,headers = self.auth_headers)
r.raise_for_status()
pe2 = r.json()
resource = dict(name=name,
mimetype=content_type,
size=size,
url=url)
for k,v in kwargs.items():
if v is not None:
resource[k] = v
pe2['resources'].append(resource)
r = requests.put(package_url,
headers = self.auth_headers,
data=json.dumps(pe2))
r.raise_for_status()
return r.json()
def submit_bundle(self):
pass
def submit_partition(self, bunde_ref):
pass
def _encode_multipart_formdata(self, fields, files):
'''Encode fields and files to be posted as multipart/form-data.
Taken from
http://code.activestate.com/recipes/146306-http-client-to-post-using-multipartform-data/
:param fields: a sequence of (name, value) tuples for the regular
form fields to be encoded
:param files: a sequence of (name, filename, value) tuples for the data
to be uploaded as files
:returns: (content_type, body) ready for httplib.HTTP instance
'''
import mimetypes
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
content_type = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % content_type)
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
|
{
"content_hash": "b7dd300d778495d6ef6211f61277b273",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 98,
"avg_line_length": 31.758849557522122,
"alnum_prop": 0.5118773946360153,
"repo_name": "treyhunner/databundles",
"id": "71a3bf393a45c4e0693cdd0b8481a72a191d3c92",
"size": "14355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "databundles/client/ckan.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "631408"
},
{
"name": "Racket",
"bytes": "295"
},
{
"name": "Shell",
"bytes": "8872"
}
],
"symlink_target": ""
}
|
"""Some tools for running tests based on MongoDB server version."""
class Version(tuple):
def __new__(cls, *version):
padded_version = cls._padded(version, 4)
return super(Version, cls).__new__(cls, tuple(padded_version))
@classmethod
def _padded(cls, iter, length, padding=0):
l = list(iter)
if len(l) < length:
for _ in range(length - len(l)):
l.append(padding)
return l
@classmethod
def from_string(cls, version_string):
mod = 0
bump_patch_level = False
if version_string.endswith("+"):
version_string = version_string[0:-1]
mod = 1
elif version_string.endswith("-pre-"):
version_string = version_string[0:-5]
mod = -1
elif version_string.endswith("-"):
version_string = version_string[0:-1]
mod = -1
# Deal with '-rcX' substrings
if '-rc' in version_string:
version_string = version_string[0:version_string.find('-rc')]
mod = -1
# Deal with git describe generated substrings
elif '-' in version_string:
version_string = version_string[0:version_string.find('-')]
mod = -1
bump_patch_level = True
version = [int(part) for part in version_string.split(".")]
version = cls._padded(version, 3)
# Make from_string and from_version_array agree. For example:
# MongoDB Enterprise > db.runCommand('buildInfo').versionArray
# [ 3, 2, 1, -100 ]
# MongoDB Enterprise > db.runCommand('buildInfo').version
# 3.2.0-97-g1ef94fe
if bump_patch_level:
version[-1] += 1
version.append(mod)
return Version(*version)
@classmethod
def from_version_array(cls, version_array):
version = list(version_array)
if version[-1] < 0:
version[-1] = -1
version = cls._padded(version, 3)
return Version(*version)
def at_least(self, *other_version):
return self >= Version(*other_version)
def __str__(self):
return ".".join(map(str, self))
|
{
"content_hash": "067974b9926c8e0220b87603e6dedfe5",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 73,
"avg_line_length": 32.53731343283582,
"alnum_prop": 0.5573394495412844,
"repo_name": "ZeoAlliance/aiomongo",
"id": "ca081909f748c2c644cfae525c59bf5241bcef8c",
"size": "2759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "381630"
}
],
"symlink_target": ""
}
|
"""
Exceptions module.
"""
from __future__ import unicode_literals
class EventError(Exception):
"""Using to notify subscribed clients about event failure."""
pass
|
{
"content_hash": "16d888e72df665510454961623d2ce8e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 65,
"avg_line_length": 15.818181818181818,
"alnum_prop": 0.7011494252873564,
"repo_name": "ailove-dev/django-event",
"id": "3cef7263b35497b91aa845a2b987b6bb152afe20",
"size": "199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_event/publisher/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8367"
},
{
"name": "Python",
"bytes": "93832"
}
],
"symlink_target": ""
}
|
"""Base class for testing serializable datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops
from tensorflow.python.data.experimental.ops.optimization_options import OptimizationOptions
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import nest
def remove_variants(get_next_op):
# TODO(b/72408568): Remove this once session.run can get
# variant tensors.
"""Remove variants from a nest structure, so sess.run will execute."""
def _remove_variant(x):
if isinstance(x, ops.Tensor) and x.dtype == dtypes.variant:
return ()
else:
return x
return nest.map_structure(_remove_variant, get_next_op)
class DatasetSerializationTestBase(test.TestCase):
"""Base class for testing serializable datasets."""
def tearDown(self):
self._delete_ckpt()
# TODO(b/72657739): Remove sparse_tensor argument, which is to test the
# (deprecated) saveable `SparseTensorSliceDataset`, once the API
# `from_sparse_tensor_slices()`and related tests are deleted.
def run_core_tests(self, ds_fn1, ds_fn2, num_outputs, sparse_tensors=False):
"""Runs the core tests.
Args:
ds_fn1: 0-argument function that returns a Dataset.
ds_fn2: 0-argument function that returns a Dataset different from
ds_fn1. If None, verify_restore_in_modified_graph test is not run.
num_outputs: Total number of outputs expected from this Dataset.
sparse_tensors: Whether dataset is built from SparseTensor(s).
Raises:
AssertionError if any test fails.
"""
# NOTE: We disable all default optimizations in serialization tests in order
# to test the actual dataset in question.
options = dataset_ops.Options()
options.experimental_optimization = OptimizationOptions()
options.experimental_optimization.apply_default_optimizations = False
def ds_fn1_no_opt():
return ds_fn1().with_options(options)
self.verify_unused_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_fully_used_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_exhausted_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_init_before_restore(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_multiple_breaks(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_reset_restored_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_restore_in_empty_graph(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
if ds_fn2:
def ds_fn2_no_opt():
return ds_fn2().with_options(options)
self.verify_restore_in_modified_graph(
ds_fn1_no_opt,
ds_fn2_no_opt,
num_outputs,
sparse_tensors=sparse_tensors)
def verify_unused_iterator(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that saving and restoring an unused iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn, [0],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_fully_used_iterator(self, ds_fn, num_outputs,
sparse_tensors=False):
"""Verifies that saving and restoring a fully used iterator works.
Note that this only checks saving and restoring an iterator from which
`num_outputs` items have been produced but does not check for an
exhausted iterator, i.e., one from which an OutOfRange error has been
returned.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if test fails.
"""
self.verify_run_with_breaks(
ds_fn, [num_outputs], num_outputs, sparse_tensors=sparse_tensors)
def verify_exhausted_iterator(self, ds_fn, num_outputs, sparse_tensors=False):
"""Verifies that saving and restoring an exhausted iterator works.
An exhausted iterator is one which has returned an OutOfRange error.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
self.gen_outputs(
ds_fn, [],
num_outputs,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
actual = self.gen_outputs(
ds_fn, [],
0,
ckpt_saved=True,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
self.assertEqual(len(actual), 0)
def verify_init_before_restore(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that restoring into an already initialized iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs),
num_outputs,
init_before_restore=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_multiple_breaks(self,
ds_fn,
num_outputs,
num_breaks=10,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to save/restore at multiple break points.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
num_breaks: The number of break points. These are uniformly spread in
[0, num_outputs] both inclusive.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs, num_breaks),
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_reset_restored_iterator(self,
ds_fn,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to re-initialize a restored iterator.
This is useful when restoring a training checkpoint during validation.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Collect ground truth containing all outputs.
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Skip some items and save checkpoint.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Restore from checkpoint and then run init_op.
with ops.Graph().as_default() as g:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
self._initialize(init_op, sess)
for _ in range(num_outputs):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_restore_in_modified_graph(self,
ds_fn1,
ds_fn2,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to restore an iterator in a modified graph.
Builds an input pipeline using ds_fn1, runs it for `break_point` steps
and saves a checkpoint. Then builds a new graph using ds_fn2, restores
the checkpoint from ds_fn1 and verifies that the restore is successful.
Args:
ds_fn1: See `run_core_tests`.
ds_fn2: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Skip `break_point` items and store the remaining produced from ds_fn1
# in `expected`.
self.gen_outputs(
ds_fn1, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
expected = self.gen_outputs(
ds_fn1, [],
num_outputs - break_point,
ckpt_saved=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Generate `break_point` items from ds_fn1 and save checkpoint.
self.gen_outputs(
ds_fn1, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Build graph for ds_fn2 but load checkpoint for ds_fn1.
with ops.Graph().as_default() as g:
_, get_next_op, saver = self._build_graph(
ds_fn2, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
for _ in range(num_outputs - break_point):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_restore_in_empty_graph(self,
ds_fn,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to restore an iterator in an empty graph.
Builds an input pipeline using ds_fn, runs it for `break_point` steps
and saves a checkpoint. Then builds a new empty graph, restores
the checkpoint from ds_fn and verifies that the restore is successful.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Skip `break_point` items and store the remaining produced from ds_fn
# in `expected`.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
expected = self.gen_outputs(
ds_fn, [],
num_outputs - break_point,
ckpt_saved=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Generate `break_point` items from ds_fn and save checkpoint.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Build an empty graph but load checkpoint for ds_fn.
with ops.Graph().as_default() as g:
get_next_op, saver = self._build_empty_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
for _ in range(num_outputs - break_point):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_error_on_save(self,
ds_fn,
num_outputs,
error,
break_point=None,
sparse_tensors=False):
"""Attempts to save a non-saveable iterator.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
error: Declared error when trying to save iterator.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._initialize(init_op, sess)
for _ in range(break_point):
sess.run(get_next_op)
with self.assertRaises(error):
self._save(sess, saver)
def verify_run_with_breaks(self,
ds_fn,
break_points,
num_outputs,
init_before_restore=False,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that ds_fn() produces the same outputs with and without breaks.
1. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
*without* stopping at break points.
2. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
with stopping at break points.
Deep matches outputs from 1 and 2.
Args:
ds_fn: See `gen_outputs`.
break_points: See `gen_outputs`.
num_outputs: See `gen_outputs`.
init_before_restore: See `gen_outputs`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
init_before_restore=init_before_restore,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
actual = self.gen_outputs(
ds_fn,
break_points,
num_outputs,
init_before_restore=init_before_restore,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
self.match(expected, actual)
def gen_outputs(self,
ds_fn,
break_points,
num_outputs,
ckpt_saved=False,
init_before_restore=False,
sparse_tensors=False,
verify_exhausted=True,
save_checkpoint_at_end=True):
"""Generates elements from input dataset while stopping at break points.
Produces `num_outputs` outputs and saves the state of the iterator in the
Saver checkpoint.
Args:
ds_fn: 0-argument function that returns the dataset.
break_points: A list of integers. For each `break_point` in
`break_points`, we produce outputs till `break_point` number of items
have been produced and then checkpoint the state. The current graph
and session are destroyed and a new graph and session are used to
produce outputs till next checkpoint or till `num_outputs` elements
have been produced. `break_point` must be <= `num_outputs`.
num_outputs: The total number of outputs to produce from the iterator.
ckpt_saved: Whether a checkpoint already exists. If False, we build the
graph from ds_fn.
init_before_restore: Whether init should be called before saver.restore.
This is just so that we can verify that restoring an already initialized
iterator works.
sparse_tensors: Whether dataset is built from SparseTensor(s).
verify_exhausted: Whether to verify that the iterator has been exhausted
after producing `num_outputs` elements.
save_checkpoint_at_end: Whether to save a checkpoint after producing all
outputs. If False, checkpoints are saved each break point but not at the
end. Note that checkpoints overwrite each other so there is always only
a single checkpoint available. Defaults to True.
Returns:
A list of `num_outputs` items.
"""
outputs = []
def get_ops():
if ckpt_saved:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
else:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
return init_op, get_next_op, saver
for i in range(len(break_points) + 1):
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = get_ops()
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
if ckpt_saved:
if init_before_restore:
self._initialize(init_op, sess)
self._restore(saver, sess)
else:
self._initialize(init_op, sess)
start = break_points[i - 1] if i > 0 else 0
end = break_points[i] if i < len(break_points) else num_outputs
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
if i == len(break_points) and verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
if save_checkpoint_at_end or i < len(break_points):
self._save(sess, saver)
ckpt_saved = True
return outputs
def match(self, expected, actual):
"""Matches nested structures.
Recursively matches shape and values of `expected` and `actual`.
Handles scalars, numpy arrays and other python sequence containers
e.g. list, dict.
Args:
expected: Nested structure 1.
actual: Nested structure 2.
Raises:
AssertionError if matching fails.
"""
if isinstance(expected, np.ndarray):
expected = expected.tolist()
if isinstance(actual, np.ndarray):
actual = actual.tolist()
self.assertEqual(type(expected), type(actual))
if nest.is_sequence(expected):
self.assertEqual(len(expected), len(actual))
if isinstance(expected, dict):
for key1, key2 in zip(sorted(expected), sorted(actual)):
self.assertEqual(key1, key2)
self.match(expected[key1], actual[key2])
else:
for item1, item2 in zip(expected, actual):
self.match(item1, item2)
else:
self.assertEqual(expected, actual)
def does_not_match(self, expected, actual):
with self.assertRaises(AssertionError):
self.match(expected, actual)
def gen_break_points(self, num_outputs, num_samples=10):
"""Generates `num_samples` breaks points in [0, num_outputs]."""
return np.linspace(0, num_outputs, num_samples, dtype=int)
def _build_graph(self, ds_fn, sparse_tensors=False):
iterator = dataset_ops.make_initializable_iterator(ds_fn())
saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
init_op = iterator.initializer
if sparse_tensors:
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
else:
get_next = iterator.get_next()
self._add_iterator_ops_to_collection(init_op, get_next, ds_fn,
sparse_tensors)
saver = saver_lib.Saver(allow_empty=True)
return init_op, get_next, saver
def _build_empty_graph(self, ds_fn, sparse_tensors=False):
iterator = iterator_ops.Iterator.from_structure(
self._get_output_types(ds_fn),
output_shapes=self._get_output_shapes(ds_fn),
output_classes=self._get_output_classes(ds_fn))
saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
if sparse_tensors:
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
else:
get_next = iterator.get_next()
saver = saver_lib.Saver(allow_empty=True)
return get_next, saver
def _add_iterator_ops_to_collection(self,
init_op,
get_next,
ds_fn,
sparse_tensors=False):
ops.add_to_collection("iterator_ops", init_op)
# `get_next` may be a tuple e.g. in TensorSliceDataset. Since Collections
# do not support tuples we flatten the tensors and restore the shape in
# `_get_iterator_ops_from_collection`.
if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`.
ops.add_to_collection("iterator_ops", get_next.indices)
ops.add_to_collection("iterator_ops", get_next.values)
ops.add_to_collection("iterator_ops", get_next.dense_shape)
return
get_next_list = nest.flatten(get_next)
for i, output_class in enumerate(
nest.flatten(self._get_output_classes(ds_fn))):
if output_class is sparse_tensor.SparseTensor:
ops.add_to_collection("iterator_ops", get_next_list[i].indices)
ops.add_to_collection("iterator_ops", get_next_list[i].values)
ops.add_to_collection("iterator_ops", get_next_list[i].dense_shape)
else:
ops.add_to_collection("iterator_ops", get_next_list[i])
def _get_iterator_ops_from_collection(self, ds_fn, sparse_tensors=False):
all_ops = ops.get_collection("iterator_ops")
if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`.
init_op, indices, values, dense_shape = all_ops
return init_op, sparse_tensor.SparseTensor(indices, values, dense_shape)
get_next_list = []
i = 1
for output_class in nest.flatten(self._get_output_classes(ds_fn)):
if output_class is sparse_tensor.SparseTensor:
indices, values, dense_shape = all_ops[i:i + 3]
i += 3
get_next_list.append(
sparse_tensor.SparseTensor(indices, values, dense_shape))
else:
get_next_list.append(all_ops[i])
i += 1
return all_ops[0], nest.pack_sequence_as(
self._get_output_types(ds_fn), get_next_list)
def _get_output_types(self, ds_fn):
with ops.Graph().as_default():
return ds_fn().output_types
def _get_output_shapes(self, ds_fn):
with ops.Graph().as_default():
return ds_fn().output_shapes
def _get_output_classes(self, ds_fn):
with ops.Graph().as_default():
return ds_fn().output_classes
def _ckpt_path(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _latest_ckpt(self):
return checkpoint_management.latest_checkpoint(self.get_temp_dir())
def _save(self, sess, saver):
saver.save(sess, self._ckpt_path())
def _restore(self, saver, sess):
sess.run(lookup_ops.tables_initializer())
saver.restore(sess, self._latest_ckpt())
def _initialize(self, init_op, sess):
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
sess.run(init_op)
def _import_meta_graph(self):
meta_file_path = self._ckpt_path() + ".meta"
return saver_lib.import_meta_graph(meta_file_path)
def _delete_ckpt(self):
# Remove all checkpoint files.
prefix = self._ckpt_path()
pattern = prefix + "*"
files = gfile.Glob(pattern)
map(gfile.Remove, files)
|
{
"content_hash": "c40d8a6a43c7ae71fd984ec7276c7628",
"timestamp": "",
"source": "github",
"line_count": 696,
"max_line_length": 92,
"avg_line_length": 36.57183908045977,
"alnum_prop": 0.6215133181425316,
"repo_name": "asimshankar/tensorflow",
"id": "bdbd8702b7f8d315a730c5cd2b000218ea5e19be",
"size": "26143",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/kernel_tests/serialization/dataset_serialization_test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "490070"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "52677142"
},
{
"name": "CMake",
"bytes": "207176"
},
{
"name": "Dockerfile",
"bytes": "39454"
},
{
"name": "Go",
"bytes": "1290930"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "890529"
},
{
"name": "Jupyter Notebook",
"bytes": "2618412"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "68402"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102518"
},
{
"name": "PHP",
"bytes": "5172"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "43038983"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "497659"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
"""
Tools to Unify Image Data Against Other BioVida APIs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Note: To use these tools, see ``biovida.unify_domains.unify_against_images()``
import numpy as np
import pandas as pd
from copy import deepcopy
from collections import defaultdict
# Import Interfaces
from biovida.genomics.disgenet_interface import DisgenetInterface
from biovida.diagnostics.disease_ont_interface import DiseaseOntInterface
from biovida.diagnostics.disease_symptoms_interface import DiseaseSymptomsInterface
# Support Tools
from biovida.support_tools.support_tools import tqdm, is_int, items_null
# Image Tools
from biovida.images._image_tools import try_fuzzywuzzy_import
# Open-i Specific Tools
from biovida.images._interface_support.openi.openi_support_tools import \
possible_openi_image_processing_cols
# ----------------------------------------------------------------------------------------------------------
# Interface Integration
# ----------------------------------------------------------------------------------------------------------
class _ImagesInterfaceIntegration(object):
"""
Tools to Unify BioVida Image Interfaces
"""
def __init__(self):
self._additional_columns = list()
def _add_additional_columns(self, db, list_of_columns, skip=None):
"""
Add ``self._additional_columns`` to ``db`` and ``list_of_columns``
:param list_of_columns: as present in ``_open_i_prep()`` or ``_cancer_image_prep()``.
:type list_of_columns: ``list``
:param db: as passed to ``_open_i_prep()`` or ``_cancer_image_prep()``.
:type db: ``Pandas DataFrame``
:param skip: items in ``self._additional_columns`` to refrain from adding to ``db``.
:type skip: ``list`` or ``None``
:return: see description.
:rtype: ``tuple``
"""
for c in self._additional_columns:
if skip is None or c not in skip:
list_of_columns.append(c)
if c not in db.columns:
db[c] = [None] * db.shape[0]
return db, list_of_columns
def _open_i_prep(self, db):
"""
A tool to clean and standardize a database from a ``OpeniInterface`` instance.
:param db: a database from an ``OpeniInterface`` instance.
:type db: ``Pandas DataFrame``
:return: a cleaned and standardize ``db``.
:rtype: ``Pandas DataFrame``
"""
# Deep copy the input to prevent mutating the original in memory.
db_cln = db.copy(deep=True)
# Column which provides a guess, based on the text, on which imaging modality created the image.
db_cln['modality_best_guess'] = db_cln.apply(
lambda x: x['imaging_modality_from_text'] if isinstance(x['imaging_modality_from_text'],
str) else x[
'modality_full'], axis=1)
# Define columns to keep
openi_columns = ['abstract', 'article_type', 'image_id_short', 'image_caption',
'modality_best_guess', 'age', 'sex', 'diagnosis', 'query', 'pull_time']
openi_col_rename = {'diagnosis': 'disease'}
# Allow for cases where images have not been downloaded.
if 'cached_images_path' in db_cln.columns:
openi_columns.append('cached_images_path')
db_cln['cached_images_path'] = db_cln['cached_images_path'].map(
lambda x: tuple([x]) if not isinstance(x, tuple) else x, na_action='ignore')
if len(self._additional_columns):
db_cln, openi_columns = self._add_additional_columns(db_cln, openi_columns)
db_cln['article_type'] = db_cln['article_type'].replace({'encounter': 'case_report',
'image_id_short': 'image_id'})
# Define subsection based on `openi_columns`
openi_subsection = db_cln[openi_columns]
openi_subsection['source_api'] = ['openi'] * openi_subsection.shape[0]
return openi_subsection.rename(columns=openi_col_rename)
def _image_processing_prep(self, db):
"""
A tool to clean and standardize a database from a ``OpeniImageProcessing`` instance.
:param db: a database from a ``OpeniImageProcessing`` instance.
:type db: ``Pandas DataFrame``
:return: a cleaned and standardize ``db``.
:rtype: ``Pandas DataFrame``
"""
# Note: if the ``OpeniImageProcessing`` class is updated to handle
# instances other than ``OpeniInterface``, this approach will need to be changed.
return self._open_i_prep(db)
def _cancer_image_prep(self, db):
"""
A tool to clean and standardize a database from a ``CancerImageInterface`` instance.
:param db: a database from a ``CancerImageInterface`` instance.
:type db: ``Pandas DataFrame``
:return: a cleaned and standardize ``db``.
:rtype: ``Pandas DataFrame``
"""
# Define columns to keep
cancer_image_columns = ['series_instance_uid', 'series_description', 'modality_full', 'age',
'sex', 'article_type', 'cancer_type', 'query', 'pull_time']
# Column name changes (based on ``_open_i_prep()``).
cancer_image_col_rename = {'series_instance_uid': 'image_id',
'series_description': 'image_caption',
'cached_dicom_images_path': 'raw',
'modality_full': 'modality_best_guess',
'cancer_type': 'disease'}
# Allow for cases where images have not been downloaded,
# i.e., absent 'cached_images_path' and 'cached_dicom_images_path' columns.
if 'cached_images_path' in db.columns:
cancer_image_columns.append('cached_images_path')
if 'cached_dicom_images_path' in db.columns:
cancer_image_columns.append('cached_dicom_images_path')
cancer_image_col_rename['cached_dicom_images_path'] = 'source_images_path'
additional_columns_skip = 'source_images_path'
else:
additional_columns_skip = None
# Deep copy the input to prevent mutating the original in memory.
db_cln = db.copy(deep=True)
if len(self._additional_columns):
db_cln, cancer_image_columns = self._add_additional_columns(db=db_cln,
list_of_columns=cancer_image_columns,
skip=additional_columns_skip)
# Define subsection based on `cancer_image_columns`
cancer_image_subsection = db_cln[cancer_image_columns]
cancer_image_subsection['abstract'] = np.NaN
cancer_image_subsection['source_api'] = ['tcia'] * cancer_image_subsection.shape[0]
return cancer_image_subsection.rename(columns=cancer_image_col_rename)
@property
def _prep_class_dict(self):
"""
Return a dictionary which maps image interface classes to
the methods designed to handle them.
:return: a dictionary mapping class names to functions.
:rtype: ``dict``
"""
return {'OpeniInterface': self._open_i_prep,
'CancerImageInterface': self._cancer_image_prep,
'OpeniImageProcessing': self._image_processing_prep}
def integration(self, instances, db_to_extract):
"""
Standardize instances.
This method yields a single dataframe with the following columns:
- 'abstract'*
- 'image_id_short'
- 'image_caption'
- 'modality_best_guess'
- 'age'
- 'sex'
- 'disease'
- 'query'
- 'pull_time'
- 'harvest_success'
- 'files_path'
- 'source_api'
*NOTE: this column will be dropped after passing through ``_DiseaseSymptomsIntegration().integration()``.
:param instances: any one of ``OpeniInterface``, ``CancerImageInterface`` or ``OpeniImageProcessing``, or some
combination inside an iterable.
:type instances: ``list``, ``tuple``, ``OpeniInterface``, ``CancerImageInterface`` or ``OpeniImageProcessing``.
:param db_to_extract: the database to use. Must be one of: 'records_db', 'cache_records_db'.
:type db_to_extract: ``str``
:return: standardize instances
:rtype: ``Pandas DataFrame``
"""
instances_types = [type(i).__name__ for i in instances]
if 'OpeniImageProcessing' in instances_types:
self._additional_columns += possible_openi_image_processing_cols
if 'CancerImageInterface' in instances_types:
self._additional_columns += ['source_images_path']
frames = list()
for class_instance in instances:
interface_name = type(class_instance).__name__
func = self._prep_class_dict[interface_name]
if interface_name == 'OpeniImageProcessing':
database = getattr(class_instance, "image_dataframe")
else:
database = getattr(class_instance, db_to_extract)
if not isinstance(database, pd.DataFrame):
raise ValueError(
"The {0} instance's '{1}' database must be of type DataFrame,\nnot "
"'{2}'.".format(interface_name, db_to_extract, type(database).__name__))
frames.append(func(database))
self._additional_columns = list() # reset
combined_df = pd.concat(frames, ignore_index=True)
combined_df['disease'] = combined_df['disease'].map(
lambda x: x.lower() if isinstance(x, str) else x)
return combined_df.fillna(np.NaN)
# ----------------------------------------------------------------------------------------------------------
# Disease Ontology Integration
# ----------------------------------------------------------------------------------------------------------
class _DiseaseOntologyIntegration(object):
"""
Integration of Disease Ontology data.
:param cache_path: location of the BioVida cache. If one does not exist in this location, one will created.
Default to ``None`` (which will generate a cache in the home folder).
:type cache_path: ``str`` or ``None``
:param verbose: If ``True``, print notice when downloading database. Defaults to ``True``.
:type verbose: ``bool``
"""
@staticmethod
def _dis_ont_dict_gen(ontology_df):
"""
Convert the information obtained from ``DiseaseOntInterface().pull()`` into:
- a nested dictionary with ``ontology_df``'s 'name' column as the outer key (``ont_name_dict``).
Form: ``{'name': {'disease_family' ('is_a'): tuple or None,
'disease_synonym': tuple or None,
'diagnosis_definition' ('def'): str or None},
...}``
- the keys of the nested dictionaries in ``ont_name_dict``
- a dictionary with 'disease_synonym' as keys and related names in a list:
Form ``{'disease_synonym': ['name', 'name', ...], ...}``
:param ontology_df: yield of ``DiseaseOntInterface().pull()``
:type ontology_df: ``Pandas DataFrame``
:return: see method description.
:rtype: ``tuple``
"""
ont_name_dict, ont_disease_synonym_dict = dict(), dict()
ont_name_dict_nest_keys = ('disease_family', 'disease_synonym', 'disease_definition')
def str_split(s, split_on='; '):
return tuple(s.split(split_on)) if isinstance(s, str) else s
# ToDo: change to iterrows().
for name, is_a, disease_synonym, defn in zip(
*[ontology_df[c] for c in ('name', 'is_a', 'synonym', 'def')]):
disease_synonym_split = str_split(disease_synonym)
if not items_null(name):
# Update `ont_name_dict`
ont_name_dict[name] = {'disease_family': str_split(is_a),
'disease_synonym': disease_synonym_split,
'disease_definition': defn}
# Update `ont_disease_synonym_dict`
if isinstance(disease_synonym_split, tuple):
for s in disease_synonym_split:
if s not in ont_disease_synonym_dict:
ont_disease_synonym_dict[s] = [name]
# Check a duplicate is not added under a given disease_synonym
elif name not in ont_disease_synonym_dict[s]:
ont_disease_synonym_dict[s] += [name]
return ont_name_dict, ont_name_dict_nest_keys, {k: sorted(v) for k, v in
ont_disease_synonym_dict.items()}
def __init__(self, cache_path=None, verbose=True):
self.verbose = verbose
# Load the database
ontology_df = DiseaseOntInterface(cache_path=cache_path, verbose=verbose).pull()
# Obtain dictionaries
self.ont_name_dict, self.ont_name_dict_nest_keys, self.ont_disease_synonym_dict = self._dis_ont_dict_gen(
ontology_df)
# Convert `ont_name_dict_nest_keys` to an empty dict.
self.empty_nest_dict = dict.fromkeys(self.ont_name_dict_nest_keys, np.NaN)
# Extract keys from the two dictionaries passed
self.ont_name_dict_keys = tuple(self.ont_name_dict.keys())
self.ont_disease_synonym_dict_keys = tuple(self.ont_disease_synonym_dict.keys())
def _disease_synonym_match(self, disease_synonym):
"""
Maps a disease synonym to an *actual* disease name.
The 'disease_synonym' key of the dictionary which is returned will have `disease_synonym` removed
and will have a disease names which are mapped to `disease_synonym` installed in its place.
Put another way, `ont_name_dict` gives the formal name. If we have a disease which is not in
this dictionary, we may find it in a list of synonyms associated with that disease.
:param disease_synonym: a disease synonym.
:param disease_synonym: ``str``
:return: data for a disease which the input `disease_synonym` is a synonym.
:rtype: ``dict``
"""
# Mapping from disease_synonym to related diseases
ont_dis_names = self.ont_disease_synonym_dict[disease_synonym]
# Simply use the first disease name related to the disease_synonym.
# Note: this *assumes* that which 'name' is chosen from the list is irrelevant.
# If the disease ontology database is not consistent, this assumption is invalid.
disease_info = deepcopy(self.ont_name_dict[ont_dis_names[0]])
# Remove the synonym from the 'disease_synonym' key and add 'ont_dis_names'
if isinstance(disease_info['disease_synonym'], tuple):
disease_synonym_new = [i for i in disease_info['disease_synonym'] if
i != disease_synonym] + ont_dis_names
else:
disease_synonym_new = [ont_dis_names]
# Add to `disease_info` (and remove any possible duplicates)
disease_info['disease_synonym'] = tuple(sorted(set(disease_synonym_new)))
return disease_info
def _find_disease_info_raw(self, disease):
"""
Try to match the input (`disease`) to information in the Disease Ontology Database.
:param disease: a disease name.
:param disease: ``str``
:return: information on the disease (see ``_dis_ont_dict_gen()``).
:rtype: ``dict`` or ``None``
"""
if not isinstance(disease, str):
return None
elif disease in self.ont_name_dict:
return self.ont_name_dict[disease]
elif disease in self.ont_disease_synonym_dict:
return self._disease_synonym_match(disease_synonym=disease)
else:
return None
def _find_disease_info(self, disease, fuzzy_threshold):
"""
Look up the family, synonyms and definition for a given ``disease``.
:param disease: a disease name.
:param disease: ``str``
:param fuzzy_threshold: an integer on ``(0, 100]``.
:type fuzzy_threshold: ``int``, `bool`, ``None``
:return: disease information dictionary.
:rtype: ``dict``
"""
# ToDo: add memorizing of fuzzy matches
# Try matching the string raw (i.e., 'as is').
raw_rslt = self._find_disease_info_raw(disease)
if isinstance(raw_rslt, dict):
return raw_rslt
if is_int(fuzzy_threshold):
process = try_fuzzywuzzy_import()
# Eject if fuzzy matching is disabled
if not is_int(fuzzy_threshold):
return self.empty_nest_dict
# Try using `ont_name_dict`
name_fuzzy_match, threshold = process.extractOne(disease, self.ont_name_dict_keys)
if threshold >= fuzzy_threshold:
return self.ont_name_dict[name_fuzzy_match]
# Try using `ont_disease_synonym_dict`
disease_synonym_fuzzy_match, threshold = process.extractOne(disease,
self.ont_disease_synonym_dict_keys)
if threshold >= fuzzy_threshold:
return self._disease_synonym_match(disease_synonym_fuzzy_match)
else:
return self.empty_nest_dict
def integration(self, data_frame, fuzzy_threshold=False):
"""
Create the 'disease_family', 'disease_synonym' and 'disease_definition' columns to ``data_frame``
using Disease Ontology data.
:param data_frame: a dataframe which has been passed through ``_ImagesInterfaceIntegration().integration()``
:type data_frame: ``Pandas DataFrame``
:param fuzzy_threshold: an integer on ``(0, 100]``.
:type fuzzy_threshold: ``int``, `bool`, ``None``
:return: ``data_frame`` with the columns enumerated in the description.
:rtype: ``Pandas DataFrame``
"""
if fuzzy_threshold is True:
raise ValueError(
"`fuzzy_threshold` cannot be `True`. Please provide a specific integer on ``(0, 100]``.")
# Extract disease information using the Disease Ontology database
disease_ontology_data = [self._find_disease_info(i, fuzzy_threshold)
for i in tqdm(data_frame['disease'], desc='Disease Data',
disable=not self.verbose)]
# Convert `disease_ontology_data` to a dataframe
disease_ontology_addition = pd.DataFrame(disease_ontology_data)
# Add the columns in `disease_ontology_addition` to `data_frame`.
for c in self.ont_name_dict_nest_keys:
data_frame[c] = disease_ontology_addition[c]
return data_frame
# ----------------------------------------------------------------------------------------------------------
# Tools to Add Data by Matching Against Disease and Disease disease_synonyms.
# ----------------------------------------------------------------------------------------------------------
def _disease_synonym_match_battery(disease, disease_synonyms, resource_dict, fuzzy_threshold):
"""
Try to match ``disease`` and ``disease_synonyms`` in ``resource_dict``
and return the corresponding nested dictionary (i.e., value).
:param disease: a disease name.
:param disease: ``str``
:param disease_synonyms: synonyms for ``disease``
:type disease_synonyms: ``tuple``
:param resource_dict: a nested dictionary (see: ``_DiseaseOntologyIntegration()._dis_ont_dict_gen()``).
:type resource_dict: ``dict``
:param fuzzy_threshold: an integer on ``(0, 100]``.
:type fuzzy_threshold: ``int``, `bool`, ``None``
:return: the nested dictionary for a given key.
:rtype: ``dict`` or ``None``
"""
# Extract the keys
lookup_dict_keys = tuple(resource_dict.keys())
# Import process
if is_int(fuzzy_threshold):
process = try_fuzzywuzzy_import()
# Try disease 'as is'
if disease in resource_dict:
return resource_dict[disease]
# Search through disease_synonyms
if isinstance(disease_synonyms, tuple):
for s in disease_synonyms:
if s in resource_dict:
return resource_dict[s]
# Eject if fuzzy matching is disabled
if not is_int(fuzzy_threshold):
return np.NaN
# Try Fuzzy matching on `disease`
disease_fuzzy_match, threshold = process.extractOne(disease, lookup_dict_keys)
if threshold >= fuzzy_threshold:
return resource_dict[disease_fuzzy_match]
# Try Fuzzy matching on `disease_synonyms`
if not isinstance(disease_synonyms, tuple):
return np.NaN
else:
for s in disease_synonyms:
disease_synonym_fuzzy_match, threshold = process.extractOne(s, lookup_dict_keys)
if threshold >= fuzzy_threshold:
return resource_dict[disease_synonym_fuzzy_match]
else:
return np.NaN # capitulate
def _resource_integration(data_frame, resource_dict, fuzzy_threshold, new_column_name, verbose,
desc):
"""
Integrates information in ``resource_dict`` into ``data_frame`` as new column (``new_column_name``).
:param data_frame: a dataframe which has been passed through ``_DiseaseOntologyIntegration().integration()``
:type data_frame: ``Pandas DataFrame``
:param resource_dict: a nested dictionary (see: ``_DiseaseOntologyIntegration()._dis_ont_dict_gen()``).
:type resource_dict: ``dict``
:param fuzzy_threshold: an integer on ``(0, 100]``.
:type fuzzy_threshold: ``int``, `bool`, ``None``
:param new_column_name: the name of the column with the extracted information.
:type new_column_name: ``str``
:param verbose: If ``True``, print notice when downloading database.
:type verbose: ``bool``
:param desc: description to pass to ``tqdm``.
:type desc: ``str`` or ``None``
:return: ``data_frame`` with information extracted from ``resource_dict``
:rtype: ``Pandas DataFrame``
"""
if fuzzy_threshold is True:
raise ValueError(
"`fuzzy_threshold` cannot be `True`. Please specify a specific integer on ``(0, 100]``.")
missing_column_error_message = "`data_frame` must contain a '{0}' column.\n" \
"Call ``_DiseaseOntologyIntegration().disease_ont_integration()``"
if 'disease' not in data_frame.columns:
raise AttributeError(missing_column_error_message.format('disease'))
elif 'disease_synonym' not in data_frame.columns:
raise AttributeError(missing_column_error_message.format('disease_synonym'))
# Map gene-disease information onto the dataframe
matches = list()
for _, row in tqdm(data_frame.iterrows(), total=len(data_frame), desc=desc,
disable=not verbose):
match = _disease_synonym_match_battery(disease=row['disease'],
disease_synonyms=row['disease_synonym'],
resource_dict=resource_dict,
fuzzy_threshold=fuzzy_threshold)
matches.append(match)
# Add the `rslt` series to `data_frame`
data_frame[new_column_name] = matches
return data_frame
# ----------------------------------------------------------------------------------------------------------
# Disease Symptoms Interface (Symptomatology)
# ----------------------------------------------------------------------------------------------------------
class _DiseaseSymptomsIntegration(object):
"""
Integration of Disease Symptoms information.
:param cache_path: location of the BioVida cache. If one does not exist in this location, one will created.
Default to ``None`` (which will generate a cache in the home folder).
:type cache_path: ``str`` or ``None``
:param verbose: If ``True``, print notice when downloading database. Defaults to ``True``.
:type verbose: ``bool``
"""
@staticmethod
def _disease_symptom_dict_gen(dis_symp_db):
"""
Tool to create a dictionary mapping disease to symptoms.
:param dis_symp_db: yield of ``DiseaseSymptomsInterface().pull()``
:type dis_symp_db: ``Pandas DataFrame``
:return: a dictionary of the form ``{disease name: [symptom, symptom, symptom, ...], ...}``
:rtype: ``dict``
"""
d = defaultdict(set)
for disease, symptom in zip(dis_symp_db['common_disease_name'],
dis_symp_db['common_symptom_term']):
d[disease.lower()].add(symptom.lower())
return {k: tuple(sorted(v)) for k, v in d.items()}
def __init__(self, cache_path=None, verbose=True):
self.verbose = verbose
# Load the Disease Symptoms database
dis_symp_db = DiseaseSymptomsInterface(cache_path=cache_path, verbose=verbose).pull()
# Create a disease-symptoms mapping
self.disease_symptom_dict = self._disease_symptom_dict_gen(dis_symp_db)
def _mentioned_symptoms(self, data_frame):
"""
Match 'known_associated_symptoms' to the 'abstract' for the given row.
:param data_frame: ``updated_data_frame`` as evolved in ``_DiseaseSymptomsIntegration().integration()``.
:type data_frame: ``Pandas DataFrame``
:return: a series with tuples of 'known_associated_symptoms' found in 'abstract'.
:rtype: ``Pandas Series``
"""
# ToDo: consider using 'mesh' and 'problems' cols - would have to be added in ``_ImagesInterfaceIntegration()``.
def match_symptoms(row):
"""Find items in 'known_associated_symptoms' in 'abstract'."""
if isinstance(row['known_associated_symptoms'], (list, tuple)) and isinstance(
row['abstract'], str):
abstract_lower = row['abstract'].lower()
symptoms = [i for i in row['known_associated_symptoms'] if i in abstract_lower]
return tuple(symptoms) if len(symptoms) else np.NaN
else:
return np.NaN
return [match_symptoms(row) for _, row in tqdm(data_frame.iterrows(),
total=len(data_frame),
desc='Matching Symptoms',
disable=not self.verbose)]
def integration(self, data_frame, fuzzy_threshold=False):
"""
Adds a 'known_associated_symptoms' column to ``data_frame`` based on the Disease Symptoms database.
:param data_frame: a dataframe which has been passed through ``_DiseaseOntologyIntegration().integration()``.
:type data_frame: ``Pandas DataFrame``
:param fuzzy_threshold: an integer on ``(0, 100]``.
:type fuzzy_threshold: ``int``, ``bool``, ``None``
:return: ``data_frame`` with a 'known_associated_symptoms' column.
:rtype: ``Pandas DataFrame``
"""
# Generate a 'known_associated_symptoms' columns
updated_data_frame = _resource_integration(data_frame=data_frame,
resource_dict=self.disease_symptom_dict,
fuzzy_threshold=fuzzy_threshold,
new_column_name='known_associated_symptoms',
verbose=self.verbose,
desc='Symptoms')
# Find 'known_associated_symptoms' which individual patients presented with by scanning the abstract
updated_data_frame['mentioned_symptoms'] = self._mentioned_symptoms(updated_data_frame)
# Drop the 'abstract' column as it is no longer needed
del updated_data_frame['abstract']
return updated_data_frame
# ----------------------------------------------------------------------------------------------------------
# DisGeNET Integration
# ----------------------------------------------------------------------------------------------------------
class _DisgenetIntegration(object):
"""
Integration of DisGeNET information.
:param cache_path: location of the BioVida cache. If one does not exist in this location, one will created.
Default to ``None`` (which will generate a cache in the home folder).
:type cache_path: ``str`` or ``None``
:param verbose: If ``True``, print notice when downloading database. Defaults to ``True``.
:type verbose: ``bool``
"""
@staticmethod
def _disease_gene_dict_gen(disgenet_df):
"""
Generates a dictionary of the form: ``{disease name: (gene name, disgenet score), ...}``
:param disgenet_df: yield of ``DisgenetInterface().pull('all)``.
:type disgenet_df: ``Pandas DataFrame``
:return: dictionary of the form ``{'disease_name': [('gene_name', disgenet score), ...], ...}``.
:rtype: ``dict``
"""
d = defaultdict(list)
cols = zip(*[disgenet_df[c] for c in ('disease_name', 'gene_name', 'score')])
for disease_name, gene_name, score in cols:
d[disease_name].append((gene_name, score))
return {k: tuple(sorted(v, key=lambda x: x[0])) for k, v in d.items()}
def __init__(self, cache_path=None, verbose=True):
self.verbose = verbose
# Load the database
disgenet_df = DisgenetInterface(cache_path=cache_path, verbose=verbose).pull('all')
# Extract the relevant information in `disgenet_df` to a dictionary.
self.disease_gene_dict = self._disease_gene_dict_gen(disgenet_df)
def integration(self, data_frame, fuzzy_threshold=False):
"""
Adds a series of genes known to be associated with the given disease to ``data_frame``.
:param data_frame: a dataframe which has been passed through ``_DiseaseOntologyIntegration().integration()``
:type data_frame: ``Pandas DataFrame``
:param fuzzy_threshold: an integer on ``(0, 100]``.
:type fuzzy_threshold: ``int``, `bool`, ``None``
:return: ``data_frame`` with a 'known_associated_genes' column.
:rtype: ``Pandas DataFrame``
"""
return _resource_integration(data_frame=data_frame,
resource_dict=self.disease_gene_dict,
fuzzy_threshold=fuzzy_threshold,
new_column_name='known_associated_genes',
verbose=self.verbose,
desc='Genomic')
# ----------------------------------------------------------------------------------------------------------
# Unify
# ----------------------------------------------------------------------------------------------------------
def images_unify(instances, db_to_extract='records_db', verbose=True, fuzzy_threshold=False):
"""
Unify Instances in the ``images`` subpackage against other BioVida APIs.
:param instances: See: ``biovida.unify_domains.unify_against_images()``
:param instances: `list``, ``tuple``, ``OpeniInterface``, ``OpeniImageProcessing`` or ``CancerImageInterface``
:param db_to_extract: the database to use. Must be one of: 'records_db', 'cache_records_db'.
Defaults to 'records_db'.
:type db_to_extract: ``str``
:param verbose: See: ``biovida.unify_domains.unify_against_images()``
:param verbose: ``bool``
:param fuzzy_threshold: See: ``biovida.unify_domains.unify_against_images()``
:param fuzzy_threshold: ``int``, ``bool``, ``None``
:return: See: ``biovida.unify_domains.unify_against_images()``
:rtype: ``Pandas DataFrame``
"""
instances = [instances] if not isinstance(instances, (list, tuple)) else instances
# Catch ``fuzzy_threshold=True`` and set to a reasonably high default.
if fuzzy_threshold is True:
fuzzy_threshold = 95
# Note: this doesn't consider cases where multiple biovida
# caches exist, each with different data, e.g., one has
# genetic data, another has Symptoms data. Thus, in rare
# cases, this may cause data to be downloaded unnecessarily.
cache_path = None # default
for i in instances:
if hasattr(i, '_cache_path'):
cache_path = getattr(i, '_cache_path')
break
# Combine Instances
combined_df = _ImagesInterfaceIntegration().integration(instances=instances,
db_to_extract=db_to_extract)
# Disease Ontology
combined_df = _DiseaseOntologyIntegration(cache_path, verbose).integration(combined_df,
fuzzy_threshold)
# Disease Symptoms
combined_df = _DiseaseSymptomsIntegration(cache_path, verbose).integration(combined_df,
fuzzy_threshold)
# Disgenet
combined_df = _DisgenetIntegration(cache_path, verbose).integration(combined_df,
fuzzy_threshold)
return combined_df
|
{
"content_hash": "e74e34a8e51d1b2937cd2ecb448dc31d",
"timestamp": "",
"source": "github",
"line_count": 771,
"max_line_length": 120,
"avg_line_length": 43.784695201037614,
"alnum_prop": 0.571271994786421,
"repo_name": "TariqAHassan/BioVida",
"id": "d0dbc64e4051e051028175b70b17e7b0c27e2e6c",
"size": "33775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "biovida/images/_unify_images_against_other_biovida_apis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "585874"
}
],
"symlink_target": ""
}
|
from os import path
from time import time
import warnings
from itertools import product
import yaml
import numpy as np
import pandas as pd
from mpi4py import MPI
import h5py
from pearce.mocks import cat_dict
from sys import argv
from sys import stdout
if __name__ == '__main__':
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
print rank, MPI.Get_processor_name()
stdout.flush()
|
{
"content_hash": "ed8fe248394ebce2a0e05a0bb6883e4a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 40,
"avg_line_length": 17.708333333333332,
"alnum_prop": 0.7129411764705882,
"repo_name": "mclaughlin6464/pearce",
"id": "c222d82b18646c5ea944fd8f7e942bcfeda66908",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/trainer/test_trainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "56380623"
},
{
"name": "Python",
"bytes": "473976"
},
{
"name": "Shell",
"bytes": "452246"
}
],
"symlink_target": ""
}
|
import pytest
from threddsclient import *
def test_birdhouse_root():
xml = """
<catalog xmlns="http://www.unidata.ucar.edu/namespaces/thredds/InvCatalog/v1.0" xmlns:xlink="http://www.w3.org/1999/xlink" name="THREDDS Server Default Catalog" version="1.0.1">
<service name="all" serviceType="Compound" base="">
<service name="service4" serviceType="HTTPServer" base="/thredds/fileServer/" />
<service name="odap" serviceType="OPENDAP" base="/thredds/dodsC/" />
<service name="wcs" serviceType="WCS" base="/thredds/wcs/" />
<service name="wms" serviceType="WMS" base="/thredds/wms/" />
</service>
<catalogRef name="" ID="testDatasetScan" xlink:href="/thredds/catalog/test/catalog.xml" xlink:title="Test all files in a directory">
<metadata inherited="true">
<serviceName>all</serviceName>
</metadata>
<property name="DatasetScan" value="true" />
</catalogRef>
</catalog>
"""
cat = read_xml(xml, 'http://example.test/catalog.xml')
assert cat.name == "THREDDS Server Default Catalog"
assert len(cat.datasets) == 0
assert len(cat.references) == 1
assert len(cat.flat_datasets()) == 0
assert len(cat.flat_references()) == 1
assert len(cat.services) == 1
assert len(cat.services[0].services) == 4
assert len(cat.get_services('all')) == 4
def test_birdhouse_top():
xml = """
<catalog xmlns="http://www.unidata.ucar.edu/namespaces/thredds/InvCatalog/v1.0" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.0.1">
<service name="all" serviceType="Compound" base="">
<service name="service4" serviceType="HTTPServer" base="/thredds/fileServer/" />
<service name="odap" serviceType="OPENDAP" base="/thredds/dodsC/" />
<service name="wcs" serviceType="WCS" base="/thredds/wcs/" />
<service name="wms" serviceType="WMS" base="/thredds/wms/" />
</service>
<dataset name="Test all files in a directory" ID="testDatasetScan">
<metadata inherited="true">
<serviceName>all</serviceName>
</metadata>
<catalogRef xlink:href="malleefowl/catalog.xml" xlink:title="malleefowl" ID="testDatasetScan/malleefowl" name="" />
<catalogRef xlink:href="hummingbird/catalog.xml" xlink:title="hummingbird" ID="testDatasetScan/hummingbird" name="" />
<catalogRef xlink:href="flyingpigeon/catalog.xml" xlink:title="flyingpigeon" ID="testDatasetScan/flyingpigeon" name="" />
<catalogRef xlink:href="emu/catalog.xml" xlink:title="emu" ID="testDatasetScan/emu" name="" />
</dataset>
</catalog>
"""
cat = read_xml(xml, 'http://example.test/catalog.xml')
assert cat.name == "Test all files in a directory"
assert len(cat.datasets) == 1
assert len(cat.references) == 0
assert len(cat.flat_datasets()) == 0
assert len(cat.flat_references()) == 4
assert cat.flat_references()[0].name == "malleefowl"
|
{
"content_hash": "e8e01e34bebd114efde079836171f765",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 181,
"avg_line_length": 49.36666666666667,
"alnum_prop": 0.6512491559756921,
"repo_name": "ScottWales/threddsclient",
"id": "210b8a91b0f2a6ed2f753710254d401c808752c8",
"size": "2962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_birdhouse_sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "33638"
}
],
"symlink_target": ""
}
|
from asynctest.mock import MagicMock, call
import pytest
from google.api_core.exceptions import FailedPrecondition
# All test coroutines will be treated as marked.
from google.cloud.pubsublite.cloudpubsub.internal.ack_set_tracker import AckSetTracker
from google.cloud.pubsublite.cloudpubsub.internal.ack_set_tracker_impl import (
AckSetTrackerImpl,
)
from google.cloud.pubsublite.internal.wire.committer import Committer
from google.cloud.pubsublite_v1 import Cursor
pytestmark = pytest.mark.asyncio
@pytest.fixture()
def committer():
committer = MagicMock(spec=Committer)
committer.__aenter__.return_value = committer
return committer
@pytest.fixture()
def tracker(committer):
return AckSetTrackerImpl(committer)
async def test_track_and_aggregate_acks(committer, tracker: AckSetTracker):
async with tracker:
committer.__aenter__.assert_called_once()
tracker.track(offset=1)
tracker.track(offset=3)
tracker.track(offset=5)
tracker.track(offset=7)
committer.commit.assert_has_calls([])
tracker.ack(offset=3)
committer.commit.assert_has_calls([])
tracker.ack(offset=1)
committer.commit.assert_has_calls([call(Cursor(offset=4))])
tracker.ack(offset=5)
committer.commit.assert_has_calls(
[call(Cursor(offset=4)), call(Cursor(offset=6))]
)
tracker.track(offset=8)
tracker.ack(offset=7)
committer.commit.assert_has_calls(
[call(Cursor(offset=4)), call(Cursor(offset=6)), call(Cursor(offset=8))]
)
committer.__aexit__.assert_called_once()
async def test_clear_and_commit(committer, tracker: AckSetTracker):
async with tracker:
committer.__aenter__.assert_called_once()
tracker.track(offset=3)
tracker.track(offset=5)
with pytest.raises(FailedPrecondition):
tracker.track(offset=1)
tracker.ack(offset=5)
committer.commit.assert_has_calls([])
await tracker.clear_and_commit()
committer.wait_until_empty.assert_called_once()
# After clearing, it should be possible to track earlier offsets.
tracker.track(offset=1)
tracker.ack(offset=1)
committer.commit.assert_has_calls([call(Cursor(offset=2))])
committer.__aexit__.assert_called_once()
|
{
"content_hash": "6dc6391fbd85f5def8f37b75f0c5d6c1",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 86,
"avg_line_length": 32.19178082191781,
"alnum_prop": 0.685531914893617,
"repo_name": "googleapis/python-pubsublite",
"id": "f859592fa4c856219a69e2b6c6292d7f08d2617c",
"size": "2925",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/pubsublite/cloudpubsub/internal/ack_set_tracker_impl_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1689513"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'numpydoc',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx_gallery.gen_gallery',
'sphinx.ext.autosummary'
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs' : '../examples',
# path where to save gallery generated examples
'gallery_dirs' : 'auto_examples'}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mifs'
copyright = u'2016, G. Lemaitre, F. Nogueira, D. Oliveira, C. Aridas'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
__version__ = '0.2.0.dev0'
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mifsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'mifs.tex', u'mifs Documentation',
u'D. Homola', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mifs', u'mifs Documentation',
[u'D. Homola'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mifs', u'mifs Documentation',
u'D. Homola', 'mifs', 'Parallelized Mutual Information based Feature Selection module.',
'Miscellaneous'),
]
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
app.connect('autodoc-process-docstring', generate_example_rst)
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
{
"content_hash": "3204b73ea0ed13016bd7e99215af7be0",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 93,
"avg_line_length": 31.536912751677853,
"alnum_prop": 0.6938710363907215,
"repo_name": "danielhomola/mifs",
"id": "e39fed3455e79a8b99683a9647f03112dcabbb38",
"size": "9815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "654"
},
{
"name": "Python",
"bytes": "24926"
}
],
"symlink_target": ""
}
|
shopping_list = []
def show_help():
# print instructions on how to use the app
print("What sould we pick up at the store, if you are not sure, please ring me?")
print ("""
Enter 'DONE to stop adding items
Enter 'HELP fot the help menu
Enter 'SHOW to see your current list""")
#print out the list
def show_list():
print("Here is your list:")
for item in shopping_list:
print(item)
def add_to_list(new_item):
# add new items
shopping_list.append(new_item)
print ("Added {}. list now has {} items.".format(new_item, len(shopping_list)))
def main():
show_help()
while True:
new_item = input("> ")
# be able to quit the app
if new_item == 'DONE':
break
elif new_item == 'HELP':
show_help()
continue
elif new_item == 'SHOW':
show_list()
continue
add_to_list(new_item)
#ask for new items
show_list()
|
{
"content_hash": "57c5c40a97ebb250a1556517be7645e1",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 85,
"avg_line_length": 18.34,
"alnum_prop": 0.6030534351145038,
"repo_name": "BIMobject-Ben/test",
"id": "3532229e49df52a799965bf6afeecc97790c9921",
"size": "944",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shopping_list_3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "28070"
}
],
"symlink_target": ""
}
|
import os
import unittest
from telemetry import story
from telemetry import page as page_module
from telemetry.value import improvement_direction
from telemetry.value import list_of_scalar_values
from telemetry.value import merge_values
from telemetry.value import scalar
class TestBase(unittest.TestCase):
def setUp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page_module.Page('http://www.bar.com/', story_set, story_set.base_dir,
name='http://www.bar.com/'))
story_set.AddStory(
page_module.Page('http://www.baz.com/', story_set, story_set.base_dir,
name='http://www.baz.com/'))
story_set.AddStory(
page_module.Page('http://www.foo.com/', story_set, story_set.base_dir,
name='http://www.foo.com/'))
self.story_set = story_set
@property
def pages(self):
return self.story_set.stories
class MergeValueTest(TestBase):
def testDefaultKeyFuncWithTirLabel(self):
page0 = self.pages[0]
value = scalar.ScalarValue(
page0, 'x', 'units', 1,
improvement_direction=improvement_direction.UP,
tir_label='foo')
self.assertEquals(('x', 'foo'), merge_values.DefaultKeyFunc(value))
def testSamePageMergeBasic(self):
page0 = self.pages[0]
page1 = self.pages[1]
all_values = [scalar.ScalarValue(
page0, 'x', 'units', 1,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page1, 'x', 'units', 4,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page0, 'x', 'units', 2,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page1, 'x', 'units', 5,
improvement_direction=improvement_direction.UP)]
merged_values = merge_values.MergeLikeValuesFromSamePage(all_values)
# Sort the results so that their order is predictable for the subsequent
# assertions.
merged_values.sort(key=lambda x: x.page.url)
self.assertEquals(2, len(merged_values))
self.assertEquals((page0, 'x'),
(merged_values[0].page, merged_values[0].name))
self.assertEquals([1, 2], merged_values[0].values)
self.assertEquals((page1, 'x'),
(merged_values[1].page, merged_values[1].name))
self.assertEquals([4, 5], merged_values[1].values)
def testSamePageMergeNonstandardKeyFunc(self):
page0 = self.pages[0]
page1 = self.pages[1]
all_values = [scalar.ScalarValue(
page0, 'x', 'units', 1,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page1, 'x', 'units', 4,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page0, 'y', 'units', 2,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page1, 'y', 'units', 5,
improvement_direction=improvement_direction.UP)]
merged_values = merge_values.MergeLikeValuesFromSamePage(
all_values, key_func=lambda v: v.page.name)
# Sort the results so that their order is predictable for the subsequent
# assertions.
merged_values.sort(key=lambda x: x.page.url)
self.assertEquals(2, len(merged_values))
self.assertEquals([1, 2], merged_values[0].values)
self.assertEquals([4, 5], merged_values[1].values)
def testSamePageMergeOneValue(self):
page0 = self.pages[0]
all_values = [scalar.ScalarValue(
page0, 'x', 'units', 1,
improvement_direction=improvement_direction.DOWN)]
# Sort the results so that their order is predictable for the subsequent
# assertions.
merged_values = merge_values.MergeLikeValuesFromSamePage(all_values)
self.assertEquals(1, len(merged_values))
self.assertEquals(all_values[0].name, merged_values[0].name)
self.assertEquals(all_values[0].units, merged_values[0].units)
def testSamePageMergeWithInteractionRecord(self):
page0 = self.pages[0]
all_values = [scalar.ScalarValue(
page0, 'foo-x', 'units', 1, tir_label='foo',
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page0, 'foo-x', 'units', 4, tir_label='foo',
improvement_direction=improvement_direction.UP)]
merged_values = merge_values.MergeLikeValuesFromSamePage(all_values)
self.assertEquals(1, len(merged_values))
self.assertEquals('foo', merged_values[0].tir_label)
def testSamePageMergeWithTwoInteractionRecords(self):
page0 = self.pages[0]
all_values = [scalar.ScalarValue(page0, 'x', 'units', 1, tir_label='foo'),
scalar.ScalarValue(page0, 'x', 'units', 4, tir_label='bar')]
merged_values = merge_values.MergeLikeValuesFromSamePage(all_values)
self.assertEquals(2, len(merged_values))
self.assertEquals('foo', merged_values[0].tir_label)
self.assertEquals('bar', merged_values[1].tir_label)
def testDifferentPageMergeBasic(self):
page0 = self.pages[0]
page1 = self.pages[1]
all_values = [scalar.ScalarValue(
page0, 'x', 'units', 1,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page1, 'x', 'units', 2,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page0, 'y', 'units', 10,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page1, 'y', 'units', 20,
improvement_direction=improvement_direction.UP)]
merged_values = merge_values.MergeLikeValuesFromDifferentPages(all_values)
merged_values.sort(key=lambda x: x.name)
self.assertEquals(2, len(merged_values))
self.assertEquals((None, 'x'),
(merged_values[0].page, merged_values[0].name))
self.assertEquals([1, 2], merged_values[0].values)
self.assertEquals((None, 'y'),
(merged_values[1].page, merged_values[1].name))
self.assertEquals([10, 20], merged_values[1].values)
def testDifferentPageMergeNonstandardKeyFunc(self):
page0 = self.pages[0]
page1 = self.pages[1]
all_values = [scalar.ScalarValue(
page0, 'x', 'units', 1,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page1, 'x', 'units', 2,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page0, 'y', 'units', 10,
improvement_direction=improvement_direction.UP),
scalar.ScalarValue(
page1, 'y', 'units', 20,
improvement_direction=improvement_direction.UP)]
merged_values = merge_values.MergeLikeValuesFromDifferentPages(
all_values, key_func=lambda v: True)
self.assertEquals(1, len(merged_values))
self.assertEquals([1, 2, 10, 20], merged_values[0].values)
def testDifferentPageMergeSingleValueStillMerges(self):
page0 = self.pages[0]
all_values = [scalar.ScalarValue(
page0, 'x', 'units', 1,
improvement_direction=improvement_direction.DOWN)]
# Sort the results so that their order is predictable for the subsequent
# assertions.
merged_values = merge_values.MergeLikeValuesFromDifferentPages(all_values)
self.assertEquals(1, len(merged_values))
self.assertEquals((None, 'x'),
(merged_values[0].page, merged_values[0].name))
self.assertTrue(
isinstance(merged_values[0], list_of_scalar_values.ListOfScalarValues))
self.assertEquals([1], merged_values[0].values)
def testDifferentPageMergeWithInteractionRecord(self):
page0 = self.pages[0]
page1 = self.pages[1]
v0 = scalar.ScalarValue(page0, 'x', 'units', 1, tir_label='foo')
v1 = scalar.ScalarValue(page0, 'y', 'units', 30, tir_label='bar')
v2 = scalar.ScalarValue(page1, 'x', 'units', 2, tir_label='foo')
v3 = scalar.ScalarValue(page1, 'y', 'units', 40, tir_label='baz')
all_values = [v0, v1, v2, v3]
merged_x = list_of_scalar_values.ListOfScalarValues(
None, 'x', 'units', [1, 2], tir_label='foo')
merged_y_bar = list_of_scalar_values.ListOfScalarValues(
None, 'y', 'units', [30], tir_label='bar')
merged_y_baz = list_of_scalar_values.ListOfScalarValues(
None, 'y', 'units', [40], tir_label='baz')
merged_values = merge_values.MergeLikeValuesFromDifferentPages(all_values)
merged_values.sort(key=lambda x: x.tir_label)
self.assertEquals([merged_y_bar, merged_y_baz, merged_x], merged_values)
|
{
"content_hash": "97daf616773364525437efc2bc3b16c1",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 79,
"avg_line_length": 40.07826086956522,
"alnum_prop": 0.6096767194619224,
"repo_name": "benschmaus/catapult",
"id": "7380fb4ca6eb91151d936790c02ca75b74b92fac",
"size": "9380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telemetry/telemetry/value/merge_values_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43486"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "58279"
},
{
"name": "HTML",
"bytes": "11801772"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6141932"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
"""@package duo3d
@brief:
@author: Mateusz Owczarek (mateusz.owczarek@dokt.p.lodz.pl)
@version: 0.2
@date: April, 2016
@copyright: 2016 (c) Mateusz Owczarek
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This work was supported by the European Union's
Horizon 2020 Research and Innovation Programme
under grant agreement No 643636 "Sound of Vision."
"""
import os
import ctypes as ct
__all__ = [
"CloseDUO", "EnumerateDUOResolutions", "FindOptimalBinning",
"GetDUOCalibrationPresent", "GetDUOCameraSwap", "GetDUODeviceName",
"GetDUOExposure", "GetDUOExposureMS", "GetDUOExtrinsics", "GetDUOFOV",
"GetDUOFirmwareBuild", "GetDUOFirmwareVersion", "GetDUOResolutionInfo",
"GetDUOFrameDimension", "GetDUOGain", "GetDUOHFlip", "GetDUOIMURange",
"GetDUOIntrinsics", "GetDUOLedPWM", "GetDUOSerialNumber",
"GetDUOStereoParameters", "GetDUOUndistort", "GetDUOVFlip",
"GetDUOLibVersion", "OpenDUO", "SetDUOCameraSwap", "SetDUOExposure",
"SetDUOExposureMS", "SetDUOGain", "SetDUOHFlip", "SetDUOIMURange",
"SetDUOIMURate", "SetDUOLedPWM", "SetDUOLedPWMSeq", "SetDUOResolutionInfo",
"SetDUOUndistort", "SetDUOVFlip", "StartDUO", "StopDUO",
"DUOFrame", "DUOFrameCallback", "DUOIMUSample", "DUOInstance",
"DUOLEDSeq", "DUOResolutionInfo", "PDUOFrame", "PDUOLEDSeq",
"PDUOResolutionInfo",
"DUO_ACCEL_16G", "DUO_ACCEL_2G", "DUO_ACCEL_4G", "DUO_ACCEL_8G",
"DUO_BIN_ANY", "DUO_BIN_HORIZONTAL2", "DUO_BIN_HORIZONTAL4",
"DUO_BIN_NONE", "DUO_BIN_VERTICAL2", "DUO_BIN_VERTICAL4",
"DUO_GYRO_1000", "DUO_GYRO_2000", "DUO_GYRO_250", "DUO_GYRO_500",
]
# Load shared library
if os.sys.platform.startswith( "win" ):
_duolib_filename = "DUOLib.dll"
elif os.sys.platform.startswith( "linux" ):
_duolib_filename = "libDUO.so"
elif os.sys.platform.startswith( "darwin" ):
_duolib_filename = "libDUO.dylib"
_duolib_filepath = os.path.abspath( os.path.join( os.path.dirname( __file__ ),
"..",
_duolib_filename ) )
if not os.path.isfile( _duolib_filepath ):
_duolib_dir = os.path.dirname( _duolib_filepath )
error_str = "You need to copy '%s' from DUOSDK into %s to make this package work"
raise ImportError( error_str % ( _duolib_filename, _duolib_dir ) )
_duolib = ct.cdll.LoadLibrary( _duolib_filepath )
# DUO instance
DUOInstance = ct.c_void_p
# DUO binning
DUO_BIN_ANY = -1
DUO_BIN_NONE = 0
DUO_BIN_HORIZONTAL2 = 1 # Horizontal binning by factor of 2
DUO_BIN_HORIZONTAL4 = 2 # Horizontal binning by factor of 4
DUO_BIN_VERTICAL2 = 4 # Vertical binning by factor of 2
DUO_BIN_VERTICAL4 = 8 # Vertical binning by factor of 4
class DUOResolutionInfo( ct.Structure ):
"""
DUO resolution info
"""
_fields_ = [
( "width", ct.c_int ),
( "height", ct.c_int ),
( "binning", ct.c_int ),
( "fps", ct.c_float ),
( "minFps", ct.c_float ),
( "maxFps", ct.c_float ),
]
PDUOResolutionInfo = ct.POINTER( DUOResolutionInfo )
class DUOIMUSample( ct.Structure ):
"""
DUO IMU data sample
"""
_fields_ = [
( "timeStamp", ct.c_uint32 ), # DUO IMU time stamp in 100us increments
( "tempData", ct.c_float ), # DUO temperature data in degrees Centigrade
( "accelData", ct.c_float * 3 ), # DUO accelerometer data (x,y,z) in g units
( "gyroData", ct.c_float * 3 ) # DUO gyroscope data (x,y,z) id degrees/s
]
DUO_MAX_IMU_SAMPLES = 100
class DUOFrame( ct.Structure ):
"""
DUOFrame structure holds the sensor data
that is passed to user via DUOFrameCallback function
"""
_fields_ = [
( "width", ct.c_uint32 ), # DUO frame width
( "height", ct.c_uint32 ), # DUO frame height
( "ledSeqTag", ct.c_uint8 ), # DUO frame LED tag
( "timeStamp", ct.c_uint32 ), # DUO frame time stamp in 100us increments
( "leftData", ct.POINTER( ct.c_uint8 ) ), # DUO left frame data
( "rightData", ct.POINTER( ct.c_uint8 ) ), # DUO right frame data
( "IMUPresent", ct.c_uint8 ), # True if IMU chip is present ( DUO MLX )
( "IMUSamples", ct.c_uint32 ), # Number of IMU data samples in this frame
( "IMUData", DUOIMUSample * DUO_MAX_IMU_SAMPLES ) # DUO IMU data samples
]
PDUOFrame = ct.POINTER( DUOFrame )
class DUOLEDSeq( ct.Structure ):
"""
DUO LED PWM
"""
_fields_ = [
( "ledPwmValue", ct.c_uint8 * 4 ) # LED PWM values are in percentage [0,100]
]
PDUOLEDSeq = ct.POINTER( DUOLEDSeq )
# DUO Accelerometer Range
DUO_ACCEL_2G = 0 # DUO Accelerometer full scale range +/- 2g
DUO_ACCEL_4G = 1 # DUO Accelerometer full scale range +/- 4g
DUO_ACCEL_8G = 2 # DUO Accelerometer full scale range +/- 8g
DUO_ACCEL_16G = 3 # DUO Accelerometer full scale range +/- 16g
# DUO Gyroscope Range
DUO_GYRO_250 = 0 # DUO Gyroscope full scale range 250 deg/s
DUO_GYRO_500 = 1 # DUO Gyroscope full scale range 500 deg/s
DUO_GYRO_1000 = 2 # DUO Gyroscope full scale range 1000 deg/s
DUO_GYRO_2000 = 3 # DUO Gyroscope full scale range 2000 deg/s
class DUO_INTR( ct.Structure ):
"""
"""
class INTR( ct.Structure ):
_pack_ = 1
_fields_ = [
( "k1", ct.c_double ), # Camera radial distortion coefficients
( "k2", ct.c_double ),
( "k3", ct.c_double ),
( "k4", ct.c_double ), # Camera radial distortion coefficients
( "k5", ct.c_double ),
( "k6", ct.c_double ),
( "p1", ct.c_double ), # Camera tangential distortion coefficients
( "p2", ct.c_double ),
( "fx", ct.c_double ), # Camera focal lengths in pixel units
( "fy", ct.c_double ),
( "cx", ct.c_double ), # Camera principal point
( "cy", ct.c_double ),
]
_pack_ = 1
_fields_ = [
( "width", ct.c_uint16 ),
( "height", ct.c_uint16 ),
( "left", INTR ),
( "right", INTR ),
]
class DUO_EXTR( ct.Structure ):
"""
"""
_pack_ = 1
_fields_ = [
( "rotation", ct.c_double * 9 ),
( "translation", ct.c_double * 3 )
]
class DUO_STEREO( ct.Structure ):
"""
"""
_pack_ = 1
_fields_ = [
( "M1", ct.c_double * 9 ), # 3x3 - Camera matrices
( "M2", ct.c_double * 9 ),
( "D1", ct.c_double * 8 ), # 1x8 - Camera distortion parameters
( "D2", ct.c_double * 8 ),
( "R", ct.c_double * 9 ), # 3x3 - Rotation between left and right camera
( "T", ct.c_double * 3 ), # 3x1 - Translation vector between left and right camera
( "R1", ct.c_double * 9 ), # 3x3 - Rectified rotation matrices
( "R2", ct.c_double * 9 ),
( "P1", ct.c_double * 12 ), # 3x4 - Rectified projection matrices
( "P2", ct.c_double * 12 ),
( "Q", ct.c_double * 16 ) # 4x4 - Disparity to depth mapping matrix
]
_duolib.GetDUOLibVersion.argtypes = None
_duolib.GetDUOLibVersion.restype = ct.c_char_p
def GetDUOLibVersion():
"""
"""
return _duolib.GetDUOLibVersion()
# DUO resolution enumeration
_duolib.EnumerateDUOResolutions.argtypes = [
PDUOResolutionInfo,
ct.c_int32,
ct.c_int32,
ct.c_int32,
ct.c_int32,
ct.c_float
]
_duolib.EnumerateDUOResolutions.restype = ct.c_int
def EnumerateDUOResolutions( resList, resListSize, width = -1, height = -1,
binning = DUO_BIN_ANY, fps = -1.0 ):
"""
Enumerates supported resolutions.
To enumerate resolution settings for specific resolution,
set width and height and optionally fps.
To enumerate all supported resolutions set width, height and fps all to -1.
@note: There are large number of resolution setting supported.
@param resList:
@param resListSize:
@param width:
@param height:
@param binning:
@param fps:
@return: number of resolutions found
"""
return _duolib.EnumerateDUOResolutions( ct.byref( resList ),
resListSize,
width,
height,
binning,
fps )
def FindOptimalBinning( width, height ):
"""
Finds optimal binning.
This maximizes sensor imaging area for given resolution.
@note: Not a part of DUO API, just a helper function
@param width: width of the frame
@param height: height of the frame
@return: optimal binning parameters for given (width, height)
"""
binning = DUO_BIN_NONE
if width <= 752 / 2:
binning += DUO_BIN_HORIZONTAL2
if height <= 480 / 4:
binning += DUO_BIN_VERTICAL4
elif height <= 480 / 2:
binning += DUO_BIN_VERTICAL2
return binning
# DUO device initialization
_duolib.OpenDUO.argtypes = [ ct.POINTER( DUOInstance ) ]
_duolib.OpenDUO.restype = ct.c_bool
def OpenDUO( duo ):
"""
Opens the DUO device and initialized the passed DUOInstance handle pointer.
@param duo: DUOInstance handle pointer
@return: True on success
"""
return _duolib.OpenDUO( ct.byref( duo ) )
_duolib.CloseDUO.argtypes = [ DUOInstance ]
_duolib.CloseDUO.restype = ct.c_bool
def CloseDUO( duo ):
"""
Closes the DUO device.
@param duo: DUOInstance handle pointer
@return: True on success
"""
return _duolib.CloseDUO( duo )
# DUO frame callback function
# NOTE: This function is called in the context of the DUO capture thread.
# To prevent any dropped frames, this function must return as soon as possible.
DUOFrameCallback = ct.CFUNCTYPE( None, PDUOFrame, ct.c_void_p )
# DUO device capture control
_duolib.StartDUO.argtypes = [ DUOInstance,
DUOFrameCallback,
ct.c_void_p,
ct.c_bool ]
_duolib.StartDUO.restype = ct.c_bool
def StartDUO( duo, frameCallback = None, pUserData = None, masterMode = True ):
"""
Starts capturing frames.
@param duo: DUOInstance handle pointer
@param frameCallback: pointer to user defined DUOFrameCallback callback function
@param pUserData: any user data that needs to be passed to the callback function
@param masterMode:
@return: True on success
"""
callback = ( frameCallback if frameCallback is not None else DUOFrameCallback() )
return _duolib.StartDUO( duo, callback, pUserData, masterMode )
_duolib.StopDUO.argtypes = [ DUOInstance ]
_duolib.StopDUO.restype = ct.c_bool
def StopDUO( duo ):
"""
Stops capturing frames.
@param duo: DUOInstance handle pointer
@return: True on success
"""
return _duolib.StopDUO( duo )
# Get DUO parameters
_duolib.GetDUODeviceName.argtypes = [ DUOInstance,
ct.c_char_p ]
_duolib.GetDUODeviceName.restype = ct.c_bool
def GetDUODeviceName( duo ):
"""
Returns DUO device name
"""
val = ct.create_string_buffer( 260 )
_duolib.GetDUODeviceName( duo, val )
return val.value
_duolib.GetDUOSerialNumber.argtypes = [ DUOInstance,
ct.c_char_p ]
_duolib.GetDUOSerialNumber.restype = ct.c_bool
def GetDUOSerialNumber( duo ):
"""
Returns DUO serial number
"""
val = ct.create_string_buffer( 260 )
_duolib.GetDUOSerialNumber( duo, val )
return val.value
_duolib.GetDUOFirmwareVersion.argtypes = [ DUOInstance,
ct.c_char_p ]
_duolib.GetDUOFirmwareVersion.restype = ct.c_bool
def GetDUOFirmwareVersion( duo ):
"""
Returns DUO firmware version
"""
val = ct.create_string_buffer( 260 )
_duolib.GetDUOFirmwareVersion( duo, val )
return val.value
_duolib.GetDUOFirmwareBuild.argtypes = [ DUOInstance,
ct.c_char_p ]
_duolib.GetDUOFirmwareBuild.restype = ct.c_bool
def GetDUOFirmwareBuild( duo ):
"""
Returns DUO firmware build information
"""
val = ct.create_string_buffer( 260 )
_duolib.GetDUOFirmwareBuild( duo, val )
return val.value
_duolib.GetDUOResolutionInfo.argtypes = [ DUOInstance,
PDUOResolutionInfo ]
_duolib.GetDUOResolutionInfo.restype = ct.c_bool
def GetDUOResolutionInfo( duo ):
"""
"""
res_info = DUOResolutionInfo()
_duolib.GetDUOResolutionInfo( duo, ct.byref( res_info ) )
return res_info
_duolib.GetDUOFrameDimension.argtypes = [ DUOInstance,
ct.POINTER( ct.c_uint32 ),
ct.POINTER( ct.c_uint32 ) ]
_duolib.GetDUOFrameDimension.restype = ct.c_bool
def GetDUOFrameDimension( duo ):
"""
Returns DUO frame width and height
@return: tuple(width, height)
"""
w = ct.c_uint32()
h = ct.c_uint32()
_duolib.GetDUOFrameDimension( duo, ct.byref( w ), ct.byref( h ) )
return ( w.value, h.value )
_duolib.GetDUOExposure.argtypes = [ DUOInstance,
ct.POINTER( ct.c_double ) ]
_duolib.GetDUOExposure.restype = ct.c_bool
def GetDUOExposure( duo ):
"""
Returns DUO exposure value in percentage [0,100]
"""
val = ct.c_double()
_duolib.GetDUOExposure( duo, ct.byref( val ) )
return val.value
_duolib.GetDUOExposureMS.argtypes = [ DUOInstance,
ct.POINTER( ct.c_double ) ]
_duolib.GetDUOExposureMS.restype = ct.c_bool
def GetDUOExposureMS( duo ):
"""
Returns DUO exposure value in milliseconds
"""
val = ct.c_double()
_duolib.GetDUOExposureMS( duo, ct.byref( val ) )
return val.value
_duolib.GetDUOAutoExposure.argtypes = [ DUOInstance,
ct.POINTER( ct.c_bool ) ]
_duolib.GetDUOAutoExposure.restype = ct.c_bool
def GetDUOAutoExposure( duo ):
"""
Returns DUO auto exposure value
"""
val = ct.c_bool()
_duolib.GetDUOAutoExposure( duo, ct.byref( val ) )
return val.value
_duolib.GetDUOGain.argtypes = [ DUOInstance,
ct.POINTER( ct.c_double ) ]
_duolib.GetDUOGain.restype = ct.c_bool
def GetDUOGain( duo ):
"""
Returns DUO gain value in percentage [0,100]
"""
val = ct.c_double()
_duolib.GetDUOGain( duo, ct.byref( val ) )
return val.value
_duolib.GetDUOHFlip.argtypes = [ DUOInstance,
ct.POINTER( ct.c_bool ) ]
_duolib.GetDUOHFlip.restype = ct.c_bool
def GetDUOHFlip( duo ):
"""
Returns DUO horizontal image flip value
"""
val = ct.c_bool()
_duolib.GetDUOHFlip( duo, ct.byref( val ) )
return val.value
_duolib.GetDUOVFlip.argtypes = [ DUOInstance,
ct.POINTER( ct.c_bool ) ]
_duolib.GetDUOVFlip.restype = ct.c_bool
def GetDUOVFlip( duo ):
"""
Returns DUO vertical image flip value
"""
val = ct.c_bool()
_duolib.GetDUOVFlip( duo, ct.byref( val ) )
return val.value
_duolib.GetDUOCameraSwap.argtypes = [ DUOInstance,
ct.POINTER( ct.c_bool ) ]
_duolib.GetDUOCameraSwap.restype = ct.c_bool
def GetDUOCameraSwap( duo ):
"""
Returns DUO camera swap value
"""
val = ct.c_bool()
_duolib.GetDUOCameraSwap( duo, ct.byref( val ) )
return val.value
_duolib.GetDUOLedPWM.argtypes = [ DUOInstance,
ct.POINTER( ct.c_double ) ]
_duolib.GetDUOLedPWM.restype = ct.c_bool
def GetDUOLedPWM( duo ):
"""
Returns DUO LED brightness in percentage [0,100]
"""
val = ct.c_double()
_duolib.GetDUOLedPWM( duo, ct.byref( val ) )
return val.value
_duolib.GetDUOCalibrationPresent.argtypes = [ DUOInstance,
ct.POINTER( ct.c_bool ) ]
_duolib.GetDUOCalibrationPresent.restype = ct.c_bool
def GetDUOCalibrationPresent( duo ):
"""
Returns DUO calibration present status value
"""
val = ct.c_bool()
_duolib.GetDUOCalibrationPresent( duo, ct.byref( val ) )
return val.value
_duolib.GetDUOFOV.argtypes = [ DUOInstance,
ct.POINTER( ct.c_double ) ]
_duolib.GetDUOFOV.restype = ct.c_bool
def GetDUOFOV( duo ):
"""
Returns DUO field of view for currently selected resolution.
"""
val = ( ct.c_double * 4 )()
_duolib.GetDUOFOV( duo, val )
return tuple( val )
_duolib.GetDUORectifiedFOV.argtypes = [ DUOInstance,
ct.POINTER( ct.c_double ) ]
_duolib.GetDUORectifiedFOV.restype = ct.c_bool
def GetDUORectifiedFOV( duo ):
"""
Returns DUO rectified field of view for currently selected resolution.
"""
val = ( ct.c_double * 4 )()
_duolib.GetDUORectifiedFOV( duo, val )
return tuple( val )
_duolib.GetDUOUndistort.argtypes = [ DUOInstance,
ct.POINTER( ct.c_bool ) ]
_duolib.GetDUOUndistort.restype = ct.c_bool
def GetDUOUndistort( duo ):
"""
Returns DUO image undistort value
"""
val = ct.c_bool()
_duolib.GetDUOUndistort( duo, ct.byref( val ) )
return val.value
_duolib.GetDUOIntrinsics.argtypes = [ DUOInstance,
ct.POINTER( DUO_INTR ) ]
_duolib.GetDUOIntrinsics.restype = ct.c_bool
def GetDUOIntrinsics( duo ):
"""
Returns DUO camera intrinsics parameters, see DUO_INTR structure
"""
val = DUO_INTR()
_duolib.GetDUOIntrinsics( duo, ct.byref( val ) )
return val
_duolib.GetDUOExtrinsics.argtypes = [ DUOInstance,
ct.POINTER( DUO_EXTR ) ]
_duolib.GetDUOExtrinsics.restype = ct.c_bool
def GetDUOExtrinsics( duo, val ):
"""
Returns DUO camera extrinsics parameters, see DUO_EXTR structure
"""
val = DUO_EXTR()
_duolib.GetDUOExtrinsics( duo, ct.byref( val ) )
return val
_duolib.GetDUOStereoParameters.argtypes = [ DUOInstance,
ct.POINTER( DUO_STEREO ) ]
_duolib.GetDUOStereoParameters.restype = ct.c_bool
def GetDUOStereoParameters( duo ):
"""
Returns DUO camera stereo parameters, see DUO_STEREO structure
"""
val = DUO_STEREO()
return _duolib.GetDUOStereoParameters( duo, ct.byref( val ) )
return val
_duolib.GetDUOIMURange.argtypes = [ DUOInstance,
ct.POINTER( ct.c_int ),
ct.POINTER( ct.c_int ) ]
_duolib.GetDUOIMURange.restype = ct.c_bool
def GetDUOIMURange( duo ):
"""
Returns DUO currently selected IMU range
@return: tuple(accel, gyro)
"""
accel = ct.c_int()
gyro = ct.c_int()
_duolib.GetDUOIMURange( duo, ct.byref( accel ), ct.byref( gyro ) )
return ( accel.value, gyro.value )
# Set DUO parameters
_duolib.SetDUOResolutionInfo.argtypes = [ DUOInstance, DUOResolutionInfo ]
_duolib.SetDUOResolutionInfo.restype = ct.c_bool
def SetDUOResolutionInfo( duo, res_info ):
"""
Set current resolution for DUO.
The DUOResolutionInfo is obtained by calling EnumerateDUOResolutions
with desired image size, binning and frame rate.
@return: True on success
"""
return _duolib.SetDUOResolutionInfo( duo, res_info )
_duolib.SetDUOExposure.argtypes = [ DUOInstance, ct.c_double ]
_duolib.SetDUOExposure.restype = ct.c_bool
def SetDUOExposure( duo, val ):
"""
Sets DUO exposure value in percentage [0,100]
@return: True on success
"""
return _duolib.SetDUOExposure( duo, ct.c_double( val ) )
_duolib.SetDUOExposureMS.argtypes = [ DUOInstance, ct.c_double ]
_duolib.SetDUOExposureMS.restype = ct.c_bool
def SetDUOExposureMS( duo, val ):
"""
Sets DUO exposure value in milliseconds
@return: True on success
"""
return _duolib.SetDUOExposureMS( duo, ct.c_double( val ) )
_duolib.SetDUOAutoExposure.argtypes = [ DUOInstance, ct.c_bool ]
_duolib.SetDUOAutoExposure.restype = ct.c_bool
def SetDUOAutoExposure( duo, val ):
"""
Sets DUO auto exposure value, default: false.
The target exposure value is set using SetDUOExposure.
@return: True on success
"""
return _duolib.SetDUOAutoExposure( duo, ct.c_bool( val ) )
_duolib.SetDUOGain.argtypes = [ DUOInstance, ct.c_double ]
_duolib.SetDUOGain.restype = ct.c_bool
def SetDUOGain( duo, val ):
"""
Sets DUO gain value in percentage [0,100], default: 0
@return: True on success
"""
return _duolib.SetDUOGain( duo, ct.c_double( val ) )
_duolib.SetDUOHFlip.argtypes = [ DUOInstance, ct.c_bool ]
_duolib.SetDUOHFlip.restype = ct.c_bool
def SetDUOHFlip( duo, val ):
"""
Sets DUO horizontal image flip value, default: false
@return: True on success
"""
return _duolib.SetDUOHFlip( duo, ct.c_bool( val ) )
_duolib.SetDUOVFlip.argtypes = [ DUOInstance, ct.c_bool ]
_duolib.SetDUOVFlip.restype = ct.c_bool
def SetDUOVFlip( duo, val ):
"""
Sets DUO vertical image flip value, default: false
@return: True on success
"""
return _duolib.SetDUOVFlip( duo, ct.c_bool( val ) )
_duolib.SetDUOCameraSwap.argtypes = [ DUOInstance, ct.c_bool ]
_duolib.SetDUOCameraSwap.restype = ct.c_bool
def SetDUOCameraSwap( duo, val ):
"""
Sets DUO camera swap value, default: false
@return: True on success
"""
return _duolib.SetDUOCameraSwap( duo, ct.c_bool( val ) )
_duolib.SetDUOLedPWM.argtypes = [ DUOInstance, ct.c_double ]
_duolib.SetDUOLedPWM.restype = ct.c_bool
def SetDUOLedPWM( duo, val ):
"""
Sets DUO LED brightness in percentage [0,100], default: 0
@return: True on success
"""
return _duolib.SetDUOLedPWM( duo, ct.c_double( val ) )
_duolib.SetDUOLedPWMSeq.argtypes = [ DUOInstance, PDUOLEDSeq, ct.c_uint32 ]
_duolib.SetDUOLedPWMSeq.restype = ct.c_bool
def SetDUOLedPWMSeq( duo, val, size ):
"""
Sets DUO LED sequence, see DUOLEDSeq, default: none
@param val: DUOLEDSeq array
@return: True on success
"""
return _duolib.SetDUOLedPWMSeq( duo, val, ct.c_uint32( size ) )
_duolib.SetDUOUndistort.argtypes = [ DUOInstance, ct.c_bool ]
_duolib.SetDUOUndistort.restype = ct.c_bool
def SetDUOUndistort( duo, val ):
"""
Sets DUO image undistort value, default: false
@return: True on success
"""
return _duolib.SetDUOUndistort( duo, ct.c_bool( val ) )
_duolib.SetDUOIMURange.argtypes = [ DUOInstance, ct.c_int, ct.c_int ]
_duolib.SetDUOIMURange.restype = ct.c_bool
def SetDUOIMURange( duo, accel, gyro ):
"""
Sets DUO IMU range, default: DUO_ACCEL_2G, DUO_GYRO_250
@return: True on success
"""
return _duolib.SetDUOIMURange( duo, ct.c_int( accel ), ct.c_int( gyro ) )
_duolib.SetDUOIMURate.argtypes = [ DUOInstance, ct.c_double ]
_duolib.SetDUOIMURate.restype = ct.c_bool
def SetDUOIMURate( duo, rate ):
"""
Sets DUO IMU sampling rate [50,500] Hz, default: 100Hz.
@return: True on success
"""
return _duolib.SetDUOIMURate( duo, ct.c_double( rate ) )
|
{
"content_hash": "8c0d1c151c6c8b3ffaf46f055592fb39",
"timestamp": "",
"source": "github",
"line_count": 738,
"max_line_length": 91,
"avg_line_length": 31.794037940379404,
"alnum_prop": 0.6216331401295602,
"repo_name": "MateuszOwczarek/python-duo3d",
"id": "11ebdf9518556d0ac9dd78ccda4be0a91ecb293e",
"size": "23466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/duo3d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25515"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.