code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# Copyright (C) 2014, Ugo Pozo
# 2014, Câmara Municipal de São Paulo
# filters.py - definições de filtros padrão para o Anubis.
# Este arquivo é parte do software Anubis.
# Anubis é um software livre: você pode redistribuí-lo e/ou
# modificá-lo sob os termos da Licença Pública Geral GNU (GNU General Public
# License), tal como é publicada pela Free Software Foundation, na
# versão 3 da licença, ou (sua decisão) qualquer versão posterior.
# Anubis é distribuído na esperança de que seja útil, mas SEM NENHUMA
# GARANTIA; nem mesmo a garantia implícita de VALOR COMERCIAL ou ADEQUAÇÃO PARA
# UM PROPÓSITO EM PARTICULAR. Veja a Licença Pública Geral GNU para mais
# detalhes.
# Você deve ter recebido uma cópia da Licença Pública Geral GNU junto com este
# programa. Se não, consulte <http://www.gnu.org/licenses/>.
import operator
from functools import reduce
from django import forms
from django.db.models.query import Q
from anubis.forms import FilterForm, RangeForm
from anubis.query import ProcedureQuerySet
class Filter:
base_form = FilterForm
def __init__(self, identifier):
self.identifier = identifier
self.description = identifier
self.fields = {}
self.field_keys = []
def filter_queryset(self, queryset, args):
raise NotImplementedError()
def identify(self, identifier):
self.identifier = identifier
return self
def describe(self, description):
self.description = description
return self
def validate(self, args):
bound_form = self.bound_form(args)
if not bound_form.is_valid():
errors = [field.errors for field in bound_form if field.errors]
errors = reduce(lambda e, a: a + e, errors, [])
exc = ValueError(errors)
exc.name = lambda: "Erro de validação"
raise exc
return [bound_form.cleaned_data[key] for key in self.field_keys]
def field(self, field_name, field_label=None, field_cls=forms.CharField,
**field_kwargs):
if len(self.field_keys) == 0 and field_label is None:
field_label = self.description
field_name = "{}_{:04d}_{}".format(self.identifier,
len(self.field_keys) + 1, field_name)
# mudamos o padrão do Django em que o campo era obrigatório por padrão
# para o campo ser opcional por padrão, porque no contexto da pesquisa
# faz mais sentido
if "required" not in field_kwargs.keys():
field_kwargs["required"] = False
field = field_cls(label=field_label, **field_kwargs)
self.fields[field_name] = field
self.field_keys.append(field_name)
return self
def _add_fields_to_form(self, form):
for field_name in self.field_keys:
form.fields[field_name] = self.fields[field_name]
def process_form(self, form):
self._add_fields_to_form(form)
return form
@property
def form(self):
form = self.base_form()
return self.process_form(form)
def bound_form(self, args):
form = self.base_form({field_name: args[i] \
for i, field_name in enumerate(self.field_keys)})
return self.process_form(form)
class ConversionFilter(Filter):
def __init__(self, base_filter):
assert isinstance(base_filter, Filter)
self.base_filter = base_filter
self.identifier = base_filter.identifier
self.description = base_filter.description
self.fields = base_filter.fields
self.field_keys = base_filter.field_keys
def filter_queryset(self, queryset, args):
base_queryset = self.base_filter.filter_queryset(self.source_queryset(),
args)
return queryset & self.convert_queryset(base_queryset)
def source_queryset(self):
raise NotImplementedError()
def convert_queryset(self, base_queryset):
raise NotImplementedError()
class ProcedureFilter(Filter):
def __init__(self, procedure_name):
self.procedure_name = procedure_name
super().__init__(procedure_name)
def filter_queryset(self, queryset, args):
assert isinstance(queryset, ProcedureQuerySet)
return queryset.procedure(self.procedure_name, *args)
class ChoiceProcedureFilter(Filter):
def __init__(self, identifier, choices=None):
super().__init__(identifier)
if choices is not None:
self.choices(choices)
def choices(self, choices):
self.procedure_choices = choices
self.field(self.identifier, field_cls=forms.ChoiceField,
choices=self.procedure_choices)
return self
def filter_queryset(self, queryset, args):
assert isinstance(queryset, ProcedureQuerySet)
procedure_name = args[0]
assert procedure_name in dict(self.procedure_choices).keys()
return queryset.procedure(procedure_name, *args[1:])
class RangeProcedureFilter(ProcedureFilter):
base_form = RangeForm
def __init__(self, procedure_name):
super().__init__(procedure_name)
self.range_fields = []
self.range_buffer = []
def field(self, field_name, field_label=None, field_cls=forms.CharField,
range_field=False, **field_kwargs):
super().field(field_name, field_label, field_cls, **field_kwargs)
if range_field:
self._add_range_field(self.field_keys[-1])
return self
def _add_range_field(self, range_field):
self.range_buffer.append(range_field)
if len(self.range_buffer) > 1:
self.range_fields.append(tuple(self.range_buffer[:2]))
self.range_buffer = []
def process_form(self, form):
form = super().process_form(form)
form.range_fields = self.range_fields
return form
class QuerySetFilter(Filter):
def __init__(self, field_name, suffix="", connector=operator.or_):
self.field_name = field_name
super().__init__(field_name)
self.connector = connector
self.suffix = suffix
def filter_queryset(self, queryset, args):
query_field = self.field_name
if len(self.suffix) > 0:
query_field += "__{}".format(self.suffix)
complex_filter = [Q(**{query_field: arg}) for arg in args]
return queryset.filter(reduce(self.connector, complex_filter))
class MultiQuerySetFilter(Filter):
def __init__(self, identifier, *fields_names, connector=operator.and_):
self.fields_names = fields_names
super().__init__(identifier)
self.connector = connector
def filter_queryset(self, queryset, args):
complex_filter = [Q(**{field: arg}) \
for field, arg in zip(self.fields_names, args) \
if arg is not None]
return queryset.filter(reduce(self.connector, complex_filter, Q()))
class FullTextFilter(Filter):
def __init__(self, field_name):
self.field_name = field_name
super().__init__(field_name)
def filter_queryset(self, queryset, args):
arg = " ".join(list(args))
table = queryset.model._meta.db_table
query_part = """
to_tsvector(unaccent("{table}"."{field}")) @@
plainto_tsquery(unaccent(%s)) \
""".format(table=table, field=self.field_name)
filtered = queryset.extra(where=[query_part], params=[arg])
return filtered
class TrigramFilter(Filter):
def __init__(self, field_name, connector=operator.or_):
self.field_name = field_name
super().__init__(field_name)
self.connector = connector
def filter_queryset(self, queryset, args):
args = list(args)
table = queryset.model._meta.db_table
query_part = "\"{table}\".\"{field}\" %% %s" \
.format(table=table, field=self.field_name)
first_arg = args.pop(0)
filtered = queryset.extra(where=[query_part], params=[first_arg])
for arg in args:
filtered = self.connector(filtered,
queryset.extra(where=[query_part], params=[arg]))
return filtered
|
cmspsgp31/anubis
|
anubis/filters.py
|
Python
|
gpl-3.0
| 8,354
|
# OSError
import os
for i in range(10):
print i, os.ttyname(i)
'''
0 /dev/ttys000
1
Traceback (most recent call last):
File "exceptions_OSError.py", line 15, in <module>
print i, os.ttyname(i)
OSError: [Errno 25] Inappropriate ioctl for device
'''
|
lmokto/allexceptions
|
exceptions_OSError.py
|
Python
|
mit
| 265
|
# Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# for file in ../src/*/ComponentConfig.h; do /c/python27/python gen_GME_interpreter_wxi.py "$file"; done
import sys
import os
_template = '''<?xml version="1.0" encoding="utf-8"?>
#set $backslash = '\\\\'
<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
<Fragment>
<DirectoryRef Id="INSTALLDIR_BIN" />
</Fragment>
<Fragment>
<ComponentGroup Id="${COMPONENT_NAME}">
<Component Id="${COMPONENT_NAME}.dll" Directory="INSTALLDIR_BIN">
<File Source="\\$(var.InterpreterBin)$backslash${COMPONENT_NAME}.dll">
<TypeLib Id="$TYPELIB_UUID" Description="$TYPELIB_NAME" Language="0" MajorVersion="1" MinorVersion="0">
<Class Id="{$COCLASS_UUID}" Context="InprocServer32" Description="$COCLASS_PROGID">
<ProgId Id="$COCLASS_PROGID" Description="$COCLASS_PROGID" />
</Class>
</TypeLib>
</File>
<RegistryKey Root='HKLM' Key='Software\GME\Components$backslash${COCLASS_PROGID}'>
<RegistryValue Name='Description' Type='string' Value='$COMPONENT_NAME'/>
<RegistryValue Name='Icon' Type='string' Value=',IDI_COMPICON'/>
<RegistryValue Name='Paradigm' Type='string' Value='$PARADIGMS'/>
<RegistryValue Name='Tooltip' Type='string' Value='$TOOLTIP_TEXT'/>
## FIXME: 1==Interpreter. Can we support Addons?
<RegistryValue Name='Type' Type='integer' Value='1'/>
<RegistryKey Key='Associated'>
<RegistryValue Name='$PARADIGMS' Type='string' Value=''/>
</RegistryKey>
</RegistryKey>
</Component>
</ComponentGroup>
</Fragment>
</Wix>
'''
if __name__=='__main__':
if os.environ.has_key("UDM_3RDPARTY_PATH"):
sys.path.append(os.path.join(os.environ["UDM_3RDPARTY_PATH"], r"Cheetah-2.4.4\build\lib.win32-2.6"))
from Cheetah.Template import Template
import re
with open(sys.argv[1], 'r') as config:
lines = config.readlines()
defines = {}
for line in filter(lambda line: line.find('define') != -1, lines):
match = re.match(r"^.define\s+(\w+)\s+\"?([\w.,/ \(\)-]+)\"?\s*$", line)
if match:
defines[match.groups()[0]] = match.groups()[1]
else:
sys.stderr.write("Warning: nonmatching line " + line + "\n")
with open(defines['COMPONENT_NAME'] + ".wxi", 'wb') as output:
output.write(str(Template(_template, searchList=(defines,))))
|
pombredanne/metamorphosys-desktop
|
metamorphosys/META/deploy/gen_GME_interpreter_wxi.py
|
Python
|
mit
| 5,245
|
"""
A pure Python/numpy implementation of the Steihaug-Toint
truncated preconditioned conjugate gradient algorithm as described in
T. Steihaug, *The conjugate gradient method and trust regions in large scale
optimization*, SIAM Journal on Numerical Analysis **20** (3), pp. 626-637,
1983.
.. moduleauthor:: D. Orban <dominique.orban@gerad.ca>
"""
from nlp.tools.utils import to_boundary
from nlp.tools.exceptions import UserExitRequest
import numpy as np
from math import sqrt
import logging
__docformat__ = 'restructuredtext'
class TruncatedCG(object):
def __init__(self, qp, **kwargs):
"""
Solve the quadratic trust-region subproblem
minimize g's + 1/2 s'Hs
subject to s's <= radius
by means of the truncated conjugate gradient algorithm (aka the
Steihaug-Toint algorithm). The notation `x'y` denotes the dot
product of vectors `x` and `y`.
:parameters:
:qp: an instance of the :class:`QPModel` class.
The Hessian H must be a symmetric linear
operator of appropriate size, but not necessarily
positive definite.
:logger_name: name of a logger object that can be used during the
iterations (default None)
:returns:
Upon return, the following attributes are set:
:step: final step,
:niter: number of iterations,
:step_norm: Euclidian norm of the step,
:dir: direction of infinite descent (if radius=None and
H is not positive definite),
:onBoundary: set to True if trust-region boundary was hit,
:infDescent: set to True if a direction of infinite descent was found
The algorithm stops as soon as the preconditioned norm of the gradient
falls under
max( abstol, reltol * g0 )
where g0 is the preconditioned norm of the initial gradient (or the
Euclidian norm if no preconditioner is given), or as soon as the
iterates cross the boundary of the trust region.
"""
self.qp = qp
self.n = qp.c.shape[0]
self.prefix = 'Pcg: '
self.name = 'Truncated CG'
self.status = '?'
self.onBoundary = False
self.step = None
self.step_norm = 0.0
self.niter = 0
self.dir = None
self.qval = None
self.pHp = None
# Setup the logger. Install a NullHandler if no output needed.
logger_name = kwargs.get('logger_name', 'nlp.trcg')
self.log = logging.getLogger(logger_name)
self.log.addHandler(logging.NullHandler())
self.log.propagate = False
# Formats for display
self.hd_fmt = ' %-5s %9s %8s'
self.header = self.hd_fmt % ('Iter', '<r,g>', 'curv')
self.fmt0 = ' %-5d %9.2e'
self.fmt = self.fmt0 + ' %8.2e'
return
def post_iteration(self, *args, **kwargs):
"""
Subclass and override this method to implement custom post-iteration
actions. This method will be called at the end of each CG iteration.
"""
pass
def solve(self, **kwargs):
"""
Solve the trust-region subproblem.
:keywords:
:s0: initial guess (default: [0,0,...,0]),
:radius: the trust-region radius (default: None),
:abstol: absolute stopping tolerance (default: 1.0e-8),
:reltol: relative stopping tolerance (default: 1.0e-6),
:maxiter: maximum number of iterations (default: 2n),
:prec: a user-defined preconditioner.
"""
radius = kwargs.get('radius', None)
abstol = kwargs.get('absol', 1.0e-8)
reltol = kwargs.get('reltol', 1.0e-6)
maxiter = kwargs.get('maxiter', 2 * self.n)
prec = kwargs.get('prec', lambda v: v)
qp = self.qp
n = qp.n
H = qp.H
# Initialization
if 's0' in kwargs:
s = kwargs['s0']
snorm2 = np.linalg.norm(s)
else:
s = np.zeros(n)
snorm2 = 0.0
self.qval = qp.obj(s)
r = qp.grad(s)
y = prec(r)
ry = np.dot(r, y)
sqrtry = sqrt(ry)
stop_tol = max(abstol, reltol * sqrtry)
k = 0
exitOptimal = sqrtry <= stop_tol
exitIter = k > maxiter
exitUser = False
p = -y
onBoundary = False
infDescent = False
self.log.info(self.header)
self.log.info('-' * len(self.header))
while not (exitOptimal or exitIter or exitUser) and \
not onBoundary and not infDescent:
k += 1
Hp = H * p
pHp = np.dot(p, Hp)
self.log.info(self.fmt % (k, ry, pHp))
# Compute steplength to the boundary.
if radius is not None:
sigma = to_boundary(s, p, radius, xx=snorm2)
if pHp <= 0 and radius is None:
# p is direction of singularity or negative curvature.
self.status = 'infinite descent'
snorm2 = 0
self.dir = p
self.pHp = pHp
infDescent = True
continue
# Compute CG steplength.
alpha = ry / pHp if pHp != 0 else np.inf
if radius is not None and (pHp <= 0 or alpha > sigma):
# p leads past the trust-region boundary. Move to the boundary.
s += sigma * p
snorm2 = radius * radius
self.status = 'trust-region boundary active'
onBoundary = True
continue
self.qval += alpha * np.dot(r, p) + 0.5 * alpha**2 * pHp
self.ds = alpha * p
self.dr = alpha * Hp
# Move to next iterate.
s += self.ds
r += self.dr
y = prec(r)
ry_next = np.dot(r, y)
beta = ry_next / ry
p *= beta
p -= y # p = -y + beta * p
ry = ry_next
# Transfer useful quantities for post iteration.
self.pHp = pHp
self.r = r
self.y = y
self.p = p
self.step = s
self.step_norm2 = snorm2
self.ry = ry
self.alpha = alpha
self.beta = beta
sqrtry = sqrt(ry)
snorm2 = np.dot(s, s)
try:
self.post_iteration()
except UserExitRequest:
self.status = 'usr'
exitUser = True
exitIter = k >= maxiter
exitOptimal = sqrtry <= stop_tol
# Output info about the last iteration.
if k > 0:
self.log.info(self.fmt % (k, ry, pHp))
else:
self.log.info(self.fmt0 % (k, ry))
if k >= maxiter:
self.status = 'max iter'
elif not onBoundary and not infDescent and not exitUser:
self.status = 'residual small'
self.log.info(self.status)
self.step = s
self.niter = k
self.step_norm = sqrt(snorm2)
self.onBoundary = onBoundary
self.infDescent = infDescent
return
|
PythonOptimizers/NLP.py
|
nlp/optimize/pcg.py
|
Python
|
lgpl-3.0
| 7,395
|
import json
import urllib2
import urllib
import webbrowser
from alp.settings import Settings
from feedback import Feedback
_DEFAULTHOST = "http://localhost:8080"
def set_APIKey(key):
Settings().set(apikey=key.strip())
print "API key changed!"
def get_APIKey():
return Settings().get("apikey")
def set_host(url):
Settings().set(host=url.strip().rstrip("/"))
print "Host URL changed!"
def get_host():
return Settings().get("host", _DEFAULTHOST)
def url(mode):
if get_APIKey():
return get_host() + "/sabnzbd/api?mode=" + mode + "&output=json&apikey=" + get_APIKey()
else:
print "API key is not defined"
def get_data(mode):
req = urllib2.Request(url(mode))
try:
res = urllib2.urlopen(req)
except urllib2.URLError:
print "Can't connect to SABnzbd"
raise SystemExit()
return json.loads(res.read())
def open_browser():
webbrowser.open(get_host())
def get_jobs():
data = get_data("qstatus")
fb = Feedback()
if len(data['jobs']) > 0:
for job in data['jobs']:
filename = job['filename']
mb_left = str(job['mbleft'])
mb_total = str(job['mb'])
time_left = job['timeleft']
subtitle_text = mb_left + " / " + mb_total + " | Time left: " + time_left
fb.add_item(filename, subtitle_text)
else:
fb.add_item("No current jobs")
print fb
def get_history():
data = get_data("history")['history']
fb = Feedback()
if len(data['slots']) > 0:
for slot in data['slots']:
name = slot['name']
size = slot['size']
status = slot['status']
fail_message = slot['fail_message']
subtitle_text = size + " | " + status + " | " + fail_message
fb.add_item(name, subtitle_text)
else:
fb.add_item("History is empty")
print fb
def clear_history():
data = get_data("history&name=delete&value=all")
if data['status']:
print "History cleared"
def set_max_speed(value):
data = get_data("config&name=speedlimit&value=" + value)
if data['status']:
print "Max download speed changed"
def toggle_queue():
queue_data = get_data("qstatus")
if not queue_data['paused']:
if get_data("pause")['status']:
print "Queue paused"
else:
if get_data("resume")['status']:
print "Queue resuming"
def add_nzb(url):
data = get_data("addurl&name=" + urllib.quote(url))
if data['status']:
print "NZB added!"
else:
print "NZB failed to be added"
def get_version():
data = get_data("version")
print "Version: " + data['version']
def restart():
get_data("restart")
print "SABnzbd is restarting"
def shutdown():
get_data("shutdown")
print "SABnzbd is shutting down"
|
Fogh/SABnzbd-Alfred
|
source/sabAlfred.py
|
Python
|
unlicense
| 2,871
|
"""Support for RFXtrx binary sensors."""
import logging
import voluptuous as vol
from homeassistant.components import rfxtrx
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA, PLATFORM_SCHEMA, BinarySensorDevice)
from homeassistant.const import (
CONF_COMMAND_OFF, CONF_COMMAND_ON, CONF_DEVICE_CLASS, CONF_NAME)
from homeassistant.helpers import config_validation as cv, event as evt
from homeassistant.util import dt as dt_util, slugify
from . import (
ATTR_NAME, CONF_AUTOMATIC_ADD, CONF_DATA_BITS, CONF_DEVICES,
CONF_FIRE_EVENT, CONF_OFF_DELAY)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DEVICES, default={}): {
cv.string: vol.Schema({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_OFF_DELAY):
vol.Any(cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_DATA_BITS): cv.positive_int,
vol.Optional(CONF_COMMAND_ON): cv.byte,
vol.Optional(CONF_COMMAND_OFF): cv.byte,
})
},
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
}, extra=vol.ALLOW_EXTRA)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Binary Sensor platform to RFXtrx."""
import RFXtrx as rfxtrxmod
sensors = []
for packet_id, entity in config[CONF_DEVICES].items():
event = rfxtrx.get_rfx_object(packet_id)
device_id = slugify(event.device.id_string.lower())
if device_id in rfxtrx.RFX_DEVICES:
continue
if entity.get(CONF_DATA_BITS) is not None:
_LOGGER.debug(
"Masked device id: %s", rfxtrx.get_pt2262_deviceid(
device_id, entity.get(CONF_DATA_BITS)))
_LOGGER.debug("Add %s rfxtrx.binary_sensor (class %s)",
entity[ATTR_NAME], entity.get(CONF_DEVICE_CLASS))
device = RfxtrxBinarySensor(
event, entity.get(CONF_NAME), entity.get(CONF_DEVICE_CLASS),
entity[CONF_FIRE_EVENT], entity.get(CONF_OFF_DELAY),
entity.get(CONF_DATA_BITS), entity.get(CONF_COMMAND_ON),
entity.get(CONF_COMMAND_OFF))
device.hass = hass
sensors.append(device)
rfxtrx.RFX_DEVICES[device_id] = device
add_entities(sensors)
def binary_sensor_update(event):
"""Call for control updates from the RFXtrx gateway."""
if not isinstance(event, rfxtrxmod.ControlEvent):
return
device_id = slugify(event.device.id_string.lower())
if device_id in rfxtrx.RFX_DEVICES:
sensor = rfxtrx.RFX_DEVICES[device_id]
else:
sensor = rfxtrx.get_pt2262_device(device_id)
if sensor is None:
# Add the entity if not exists and automatic_add is True
if not config[CONF_AUTOMATIC_ADD]:
return
if event.device.packettype == 0x13:
poss_dev = rfxtrx.find_possible_pt2262_device(device_id)
if poss_dev is not None:
poss_id = slugify(poss_dev.event.device.id_string.lower())
_LOGGER.debug(
"Found possible matching device ID: %s", poss_id)
pkt_id = "".join("{0:02x}".format(x) for x in event.data)
sensor = RfxtrxBinarySensor(event, pkt_id)
sensor.hass = hass
rfxtrx.RFX_DEVICES[device_id] = sensor
add_entities([sensor])
_LOGGER.info(
"Added binary sensor %s (Device ID: %s Class: %s Sub: %s)",
pkt_id, slugify(event.device.id_string.lower()),
event.device.__class__.__name__, event.device.subtype)
elif not isinstance(sensor, RfxtrxBinarySensor):
return
else:
_LOGGER.debug(
"Binary sensor update (Device ID: %s Class: %s Sub: %s)",
slugify(event.device.id_string.lower()),
event.device.__class__.__name__, event.device.subtype)
if sensor.is_lighting4:
if sensor.data_bits is not None:
cmd = rfxtrx.get_pt2262_cmd(device_id, sensor.data_bits)
sensor.apply_cmd(int(cmd, 16))
else:
sensor.update_state(True)
else:
rfxtrx.apply_received_command(event)
if (sensor.is_on and sensor.off_delay is not None and
sensor.delay_listener is None):
def off_delay_listener(now):
"""Switch device off after a delay."""
sensor.delay_listener = None
sensor.update_state(False)
sensor.delay_listener = evt.track_point_in_time(
hass, off_delay_listener, dt_util.utcnow() + sensor.off_delay)
# Subscribe to main RFXtrx events
if binary_sensor_update not in rfxtrx.RECEIVED_EVT_SUBSCRIBERS:
rfxtrx.RECEIVED_EVT_SUBSCRIBERS.append(binary_sensor_update)
class RfxtrxBinarySensor(BinarySensorDevice):
"""A representation of a RFXtrx binary sensor."""
def __init__(self, event, name, device_class=None,
should_fire=False, off_delay=None, data_bits=None,
cmd_on=None, cmd_off=None):
"""Initialize the RFXtrx sensor."""
self.event = event
self._name = name
self._should_fire_event = should_fire
self._device_class = device_class
self._off_delay = off_delay
self._state = False
self.is_lighting4 = (event.device.packettype == 0x13)
self.delay_listener = None
self._data_bits = data_bits
self._cmd_on = cmd_on
self._cmd_off = cmd_off
if data_bits is not None:
self._masked_id = rfxtrx.get_pt2262_deviceid(
event.device.id_string.lower(), data_bits)
else:
self._masked_id = None
@property
def name(self):
"""Return the device name."""
return self._name
@property
def masked_id(self):
"""Return the masked device id (isolated address bits)."""
return self._masked_id
@property
def data_bits(self):
"""Return the number of data bits."""
return self._data_bits
@property
def cmd_on(self):
"""Return the value of the 'On' command."""
return self._cmd_on
@property
def cmd_off(self):
"""Return the value of the 'Off' command."""
return self._cmd_off
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def should_fire_event(self):
"""Return is the device must fire event."""
return self._should_fire_event
@property
def device_class(self):
"""Return the sensor class."""
return self._device_class
@property
def off_delay(self):
"""Return the off_delay attribute value."""
return self._off_delay
@property
def is_on(self):
"""Return true if the sensor state is True."""
return self._state
def apply_cmd(self, cmd):
"""Apply a command for updating the state."""
if cmd == self.cmd_on:
self.update_state(True)
elif cmd == self.cmd_off:
self.update_state(False)
def update_state(self, state):
"""Update the state of the device."""
self._state = state
self.schedule_update_ha_state()
|
molobrakos/home-assistant
|
homeassistant/components/rfxtrx/binary_sensor.py
|
Python
|
apache-2.0
| 7,619
|
'''
@author: Youyk
'''
import os
import tempfile
import uuid
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
def test():
test_util.test_dsc('Create test vm to test zstack upgrade by -u on ubuntu system.')
image_name = os.environ.get('imageName_i_u14')
vm = test_stub.create_vlan_vm(image_name)
test_obj_dict.add_vm(vm)
vm.check()
vm_inv = vm.get_vm()
vm_ip = vm_inv.vmNics[0].ip
target_file = '/root/zstack-all-in-one.tgz'
test_stub.prepare_test_env(vm_inv, target_file)
ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip
test_stub.copy_id_dsa(vm_inv, ssh_cmd, tmp_file)
test_stub.copy_id_dsa_pub(vm_inv)
test_stub.execute_all_install(ssh_cmd, target_file, tmp_file)
test_stub.check_installation(ssh_cmd, tmp_file, vm_inv)
test_stub.upgrade_zstack(ssh_cmd, target_file, tmp_file)
test_stub.check_installation(ssh_cmd, tmp_file, vm_inv)
os.system('rm -f %s' % tmp_file)
vm.destroy()
test_util.test_pass('ZStack upgrade Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
os.system('rm -f %s' % tmp_file)
test_lib.lib_error_cleanup(test_obj_dict)
|
quarkonics/zstack-woodpecker
|
integrationtest/vm/installation/upgrade/test_zs_upgd_on_ub14.py
|
Python
|
apache-2.0
| 1,495
|
"""
This plugin adds a test id (like #1) to each test name output. After
you've run once to generate test ids, you can re-run individual
tests by activating the plugin and passing the ids (with or
without the # prefix) instead of test names.
For example, if your normal test run looks like::
% nosetests -v
tests.test_a ... ok
tests.test_b ... ok
tests.test_c ... ok
When adding ``--with-id`` you'll see::
% nosetests -v --with-id
#1 tests.test_a ... ok
#2 tests.test_b ... ok
#2 tests.test_c ... ok
Then you can re-run individual tests by supplying just an id number::
% nosetests -v --with-id 2
#2 tests.test_b ... ok
You can also pass multiple id numbers::
% nosetests -v --with-id 2 3
#2 tests.test_b ... ok
#3 tests.test_c ... ok
Since most shells consider '#' a special character, you can leave it out when
specifying a test id.
Note that when run without the -v switch, no special output is displayed, but
the ids file is still written.
Looping over failed tests
-------------------------
This plugin also adds a mode that will direct the test runner to record
failed tests. Subsequent test runs will then run only the tests that failed
last time. Activate this mode with the ``--failed`` switch::
% nosetests -v --failed
#1 test.test_a ... ok
#2 test.test_b ... ERROR
#3 test.test_c ... FAILED
#4 test.test_d ... ok
On the second run, only tests #2 and #3 will run::
% nosetests -v --failed
#2 test.test_b ... ERROR
#3 test.test_c ... FAILED
As you correct errors and tests pass, they'll drop out of subsequent runs.
First::
% nosetests -v --failed
#2 test.test_b ... ok
#3 test.test_c ... FAILED
Second::
% nosetests -v --failed
#3 test.test_c ... FAILED
When all tests pass, the full set will run on the next invocation.
First::
% nosetests -v --failed
#3 test.test_c ... ok
Second::
% nosetests -v --failed
#1 test.test_a ... ok
#2 test.test_b ... ok
#3 test.test_c ... ok
#4 test.test_d ... ok
.. note ::
If you expect to use ``--failed`` regularly, it's a good idea to always run
run using the ``--with-id`` option. This will ensure that an id file is
always created, allowing you to add ``--failed`` to the command line as soon
as you have failing tests. Otherwise, your first run using ``--failed`` will
(perhaps surprisingly) run *all* tests, because there won't be an id file
containing the record of failed tests from your previous run.
"""
__test__ = False
import logging
import os
from nose.plugins import Plugin
from nose.util import src, set
try:
from cPickle import dump, load
except ImportError:
from pickle import dump, load
log = logging.getLogger(__name__)
class TestId(Plugin):
"""
Activate to add a test id (like #1) to each test name output. Activate
with --failed to rerun failing tests only.
"""
name = 'id'
idfile = None
collecting = True
loopOnFailed = False
def options(self, parser, env):
"""Register commandline options.
"""
Plugin.options(self, parser, env)
parser.add_option('--id-file', action='store', dest='testIdFile',
default='.noseids', metavar="FILE",
help="Store test ids found in test runs in this "
"file. Default is the file .noseids in the "
"working directory.")
parser.add_option('--failed', action='store_true',
dest='failed', default=False,
help="Run the tests that failed in the last "
"test run.")
def configure(self, options, conf):
"""Configure plugin.
"""
Plugin.configure(self, options, conf)
if options.failed:
self.enabled = True
self.loopOnFailed = True
log.debug("Looping on failed tests")
self.idfile = os.path.expanduser(options.testIdFile)
if not os.path.isabs(self.idfile):
self.idfile = os.path.join(conf.workingDir, self.idfile)
self.id = 1
# Ids and tests are mirror images: ids are {id: test address} and
# tests are {test address: id}
self.ids = {}
self.tests = {}
self.failed = []
self.source_names = []
# used to track ids seen when tests is filled from
# loaded ids file
self._seen = {}
self._write_hashes = conf.verbosity >= 2
def finalize(self, result):
"""Save new ids file, if needed.
"""
if result.wasSuccessful():
self.failed = []
if self.collecting:
ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys()))))
else:
ids = self.ids
fh = open(self.idfile, 'wb')
dump({'ids': ids,
'failed': self.failed,
'source_names': self.source_names}, fh)
fh.close()
log.debug('Saved test ids: %s, failed %s to %s',
ids, self.failed, self.idfile)
def loadTestsFromNames(self, names, module=None):
"""Translate ids in the list of requested names into their
test addresses, if they are found in my dict of tests.
"""
log.debug('ltfn %s %s', names, module)
try:
fh = open(self.idfile, 'rb')
data = load(fh)
if 'ids' in data:
self.ids = data['ids']
self.failed = data['failed']
self.source_names = data['source_names']
else:
# old ids field
self.ids = data
self.failed = []
self.source_names = names
if self.ids:
self.id = max(self.ids) + 1
self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys()))))
else:
self.id = 1
log.debug(
'Loaded test ids %s tests %s failed %s sources %s from %s',
self.ids, self.tests, self.failed, self.source_names,
self.idfile)
fh.close()
except IOError:
log.debug('IO error reading %s', self.idfile)
if self.loopOnFailed and self.failed:
self.collecting = False
names = self.failed
self.failed = []
# I don't load any tests myself, only translate names like '#2'
# into the associated test addresses
translated = []
new_source = []
really_new = []
for name in names:
trans = self.tr(name)
if trans != name:
translated.append(trans)
else:
new_source.append(name)
# names that are not ids and that are not in the current
# list of source names go into the list for next time
if new_source:
new_set = set(new_source)
old_set = set(self.source_names)
log.debug("old: %s new: %s", old_set, new_set)
really_new = [s for s in new_source
if not s in old_set]
if really_new:
# remember new sources
self.source_names.extend(really_new)
if not translated:
# new set of source names, no translations
# means "run the requested tests"
names = new_source
else:
# no new names to translate and add to id set
self.collecting = False
log.debug("translated: %s new sources %s names %s",
translated, really_new, names)
return (None, translated + really_new or names)
def makeName(self, addr):
log.debug("Make name %s", addr)
filename, module, call = addr
if filename is not None:
head = src(filename)
else:
head = module
if call is not None:
return "%s:%s" % (head, call)
return head
def setOutputStream(self, stream):
"""Get handle on output stream so the plugin can print id #s
"""
self.stream = stream
def startTest(self, test):
"""Maybe output an id # before the test name.
Example output::
#1 test.test ... ok
#2 test.test_two ... ok
"""
adr = test.address()
log.debug('start test %s (%s)', adr, adr in self.tests)
if adr in self.tests:
if adr in self._seen:
self.write(' ')
else:
self.write('#%s ' % self.tests[adr])
self._seen[adr] = 1
return
self.tests[adr] = self.id
self.write('#%s ' % self.id)
self.id += 1
def afterTest(self, test):
# None means test never ran, False means failed/err
if test.passed is False:
try:
key = str(self.tests[test.address()])
except KeyError:
# never saw this test -- startTest didn't run
pass
else:
if key not in self.failed:
self.failed.append(key)
def tr(self, name):
log.debug("tr '%s'", name)
try:
key = int(name.replace('#', ''))
except ValueError:
return name
log.debug("Got key %s", key)
# I'm running tests mapped from the ids file,
# not collecting new ones
if key in self.ids:
return self.makeName(self.ids[key])
return name
def write(self, output):
if self._write_hashes:
self.stream.write(output)
|
ktan2020/legacy-automation
|
win/Lib/site-packages/nose-1.2.1-py2.7.egg/nose/plugins/testid.py
|
Python
|
mit
| 9,641
|
from itertools import islice
with open('day3_input.txt') as triangles_file:
possible_triangles = 0
# clever idiomatic use of zip from stackoverflow/python docs:
# https://stackoverflow.com/questions/6890065/
triangles_file_slices = list(zip(*[iter(triangles_file)]*3))
for triangles_file_slice in triangles_file_slices:
triangles = [[],[],[]]
for triangle_triad in triangles_file_slice:
sides = [int(x.strip()) for x in triangle_triad.split()]
for i in range(0, 3):
triangles[i].append(sides[i])
for triangle in triangles:
hypotenuse = triangle.pop(triangle.index(max(triangle)))
if hypotenuse < sum(triangle):
possible_triangles += 1
print(possible_triangles)
|
twrightsman/advent-of-code-2016
|
advent2016_day3_pt2.py
|
Python
|
unlicense
| 805
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import matchers
from toscaparser.common import exception
from toscaparser.elements.property_definition import PropertyDef
from toscaparser.nodetemplate import NodeTemplate
from toscaparser.properties import Property
from toscaparser.tests.base import TestCase
from toscaparser.utils.gettextutils import _
from toscaparser.utils import yamlparser
class PropertyTest(TestCase):
def test_type(self):
test_property_schema = {'type': 'string'}
propertyInstance = Property('test_property', 'Hughes',
test_property_schema)
self.assertEqual('string', propertyInstance.type)
def test_type_invalid(self):
test_property_schema = {'type': 'Fish'}
propertyInstance = Property('test_property', 'Hughes',
test_property_schema)
error = self.assertRaises(exception.InvalidTypeError,
propertyInstance.validate)
self.assertEqual(_('Type "Fish" is not a valid type.'), str(error))
def test_list(self):
test_property_schema = {'type': 'list'}
propertyInstance = Property('test_property', ['a', 'b'],
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(['a', 'b'], propertyInstance.value)
def test_list_invalid(self):
test_property_schema = {'type': 'list'}
propertyInstance = Property('test_property', 'a',
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual(_('"a" is not a list.'), str(error))
def test_list_entry_schema(self):
test_property_schema = {'type': 'list',
'entry_schema': {'type': 'string'}}
propertyInstance = Property('test_property', ['a', 'b'],
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(['a', 'b'], propertyInstance.value)
schema_snippet = '''
type: list
entry_schema:
type: string
constraints:
- min_length: 2
'''
test_property_schema = yamlparser.simple_parse(schema_snippet)
propertyInstance = Property('test_property', ['ab', 'cd'],
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(['ab', 'cd'], propertyInstance.value)
def test_list_entry_schema_invalid(self):
test_property_schema = {'type': 'list',
'entry_schema': {'type': 'integer'}}
propertyInstance = Property('test_property', [1, 'b'],
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual(_('"b" is not an integer.'), str(error))
def test_map(self):
test_property_schema = {'type': 'map'}
propertyInstance = Property('test_property', {'a': 'b'},
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual({'a': 'b'}, propertyInstance.value)
def test_map_invalid(self):
test_property_schema = {'type': 'map'}
propertyInstance = Property('test_property', 12,
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual(_('"12" is not a map.'), str(error))
def test_map_entry_schema(self):
test_property_schema = {'type': 'map',
'entry_schema': {'type': 'boolean'}}
propertyInstance = Property('test_property',
{'valid': True, 'required': True},
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual({'valid': True, 'required': True},
propertyInstance.value)
def test_map_entry_schema_invalid(self):
test_property_schema = {'type': 'map',
'entry_schema': {'type': 'boolean'}}
propertyInstance = Property('test_property',
{'valid': True, 'contact_name': 123},
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual(_('"123" is not a boolean.'), str(error))
def test_boolean(self):
test_property_schema = {'type': 'boolean'}
propertyInstance = Property('test_property', 'true',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
propertyInstance = Property('test_property', True,
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(True, propertyInstance.value)
def test_boolean_invalid(self):
test_property_schema = {'type': 'boolean'}
propertyInstance = Property('test_property', 12,
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual(_('"12" is not a boolean.'), str(error))
def test_float(self):
test_property_schema = {'type': 'float'}
propertyInstance = Property('test_property', 0.1,
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(0.1, propertyInstance.value)
def test_float_invalid(self):
test_property_schema = {'type': 'float'}
propertyInstance = Property('test_property', 12,
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual(_('"12" is not a float.'), str(error))
def test_timestamp(self):
test_property_schema = {'type': 'timestamp'}
# canonical timestamp
propertyInstance = Property('test_property', '2015-04-01T02:59:43.1Z',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01T02:59:43.1Z", propertyInstance.value)
# iso8601 timestamp
propertyInstance = Property('test_property',
'2015-04-01t21:59:43.10-05:00',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01t21:59:43.10-05:00",
propertyInstance.value)
# space separated timestamp
propertyInstance = Property('test_property',
'2015-04-01 21:59:43.10 -5',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01 21:59:43.10 -5", propertyInstance.value)
# no time zone timestamp
propertyInstance = Property('test_property', '2015-04-01 21:59:43.10',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01 21:59:43.10", propertyInstance.value)
# date (00:00:00Z)
propertyInstance = Property('test_property', '2015-04-01',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01", propertyInstance.value)
def test_timestamp_invalid(self):
test_property_schema = {'type': 'timestamp'}
# invalid timestamp - day out of range
value = '2015-04-115T02:59:43.1Z'
propertyInstance = Property('test_property', value,
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
expected_message = (_('"%s" is not a valid timestamp.') % value)
self.assertThat(str(error), matchers.StartsWith(expected_message))
def test_required(self):
test_property_schema = {'type': 'string'}
propertyInstance = Property('test_property', 'Foo',
test_property_schema)
self.assertEqual(True, propertyInstance.required)
def test_proprety_inheritance(self):
tosca_custom_def = '''
tosca.nodes.SoftwareComponent.MySoftware:
derived_from: SoftwareComponent
properties:
install_path:
required: false
type: string
default: /opt/mysoftware
'''
tosca_node_template = '''
node_templates:
mysoftware_instance:
type: tosca.nodes.SoftwareComponent.MySoftware
properties:
component_version: 3.1
'''
expected_properties = ['component_version',
'install_path']
tpl = self._get_nodetemplate(tosca_node_template, tosca_custom_def)
self.assertIsNone(tpl.validate())
self.assertEqual(expected_properties,
sorted(tpl.get_properties().keys()))
def test_missing_property_type(self):
tpl_snippet = '''
properties:
prop:
typo: tosca.mytesttype.Test
'''
schema = yamlparser.simple_parse(tpl_snippet)
error = self.assertRaises(exception.InvalidSchemaError, PropertyDef,
'prop', None, schema['properties']['prop'])
self.assertEqual(_('Schema definition of "prop" must have a "type" '
'attribute.'), str(error))
def test_invalid_required_value(self):
tpl_snippet = '''
properties:
prop:
type: tosca.mytesttype.Test
required: dunno
'''
schema = yamlparser.simple_parse(tpl_snippet)
error = self.assertRaises(exception.InvalidSchemaError, PropertyDef,
'prop', None, schema['properties']['prop'])
valid_values = ', '.join(PropertyDef.VALID_REQUIRED_VALUES)
expected_message = (_('Schema definition of "prop" has "required" '
'attribute with invalid value "dunno". The '
'value must be one of "%s".') % valid_values)
self.assertEqual(expected_message, str(error))
def test_invalid_property_status(self):
tpl_snippet = '''
properties:
prop:
type: string
status: unknown
'''
schema = yamlparser.simple_parse(tpl_snippet)
error = self.assertRaises(exception.InvalidSchemaError, PropertyDef,
'prop', None, schema['properties']['prop'])
valid_values = ', '.join(PropertyDef.VALID_STATUS_VALUES)
expected_message = (_('Schema definition of "prop" has "status" '
'attribute with invalid value "unknown". The '
'value must be one of "%s".') % valid_values)
self.assertEqual(expected_message, str(error))
def test_capability_proprety_inheritance(self):
tosca_custom_def_example1 = '''
tosca.capabilities.ScalableNew:
derived_from: tosca.capabilities.Scalable
properties:
max_instances:
type: integer
default: 0
required: no
tosca.nodes.ComputeNew:
derived_from: tosca.nodes.Compute
capabilities:
scalable:
type: tosca.capabilities.ScalableNew
'''
tosca_node_template_example1 = '''
node_templates:
compute_instance:
type: tosca.nodes.ComputeNew
capabilities:
scalable:
properties:
min_instances: 1
'''
tosca_custom_def_example2 = '''
tosca.nodes.ComputeNew:
derived_from: tosca.nodes.Compute
capabilities:
new_cap:
type: tosca.capabilities.Scalable
'''
tosca_node_template_example2 = '''
node_templates:
db_server:
type: tosca.nodes.ComputeNew
capabilities:
host:
properties:
num_cpus: 1
'''
tpl1 = self._get_nodetemplate(tosca_node_template_example1,
tosca_custom_def_example1)
self.assertIsNone(tpl1.validate())
tpl2 = self._get_nodetemplate(tosca_node_template_example2,
tosca_custom_def_example2)
self.assertIsNone(tpl2.validate())
def _get_nodetemplate(self, tpl_snippet,
custom_def_snippet=None):
nodetemplates = yamlparser.\
simple_parse(tpl_snippet)['node_templates']
custom_def = []
if custom_def_snippet:
custom_def = yamlparser.simple_parse(custom_def_snippet)
name = list(nodetemplates.keys())[0]
tpl = NodeTemplate(name, nodetemplates, custom_def)
return tpl
def test_explicit_relationship_proprety(self):
tosca_node_template = '''
node_templates:
client_node:
type: tosca.nodes.Compute
requirements:
- local_storage:
node: my_storage
relationship:
type: AttachesTo
properties:
location: /mnt/disk
my_storage:
type: tosca.nodes.BlockStorage
properties:
size: 1 GB
'''
expected_properties = ['location']
nodetemplates = yamlparser.\
simple_parse(tosca_node_template)['node_templates']
tpl = NodeTemplate('client_node', nodetemplates, [])
self.assertIsNone(tpl.validate())
rel_tpls = []
for relationship, trgt in tpl.relationships.items():
rel_tpls.extend(trgt.get_relationship_template())
self.assertEqual(expected_properties,
sorted(rel_tpls[0].get_properties().keys()))
|
openstack/tosca-parser
|
toscaparser/tests/test_properties.py
|
Python
|
apache-2.0
| 15,137
|
from flask import Flask
from flask.views import MethodView
app = Flask(__name__)
class HelloView(MethodView):
def get(self):
return "Hello World, with GET, " \
"from a class-based view!"
def post(self):
return "Hello World, with POST, " \
"from a class-based view!"
app.add_url_rule('/',
view_func=HelloView.as_view('hello'),
methods=["GET", "POST"])
|
rafaelmartins/flask-pybr9
|
pybr9/examples/ex3.py
|
Python
|
bsd-3-clause
| 415
|
class ParsedResource(object):
"""
Parent class for parsed resources as returned by parse.
Each supported format parser should return an instance of a class
that inherits from this class.
"""
@property
def translations(self):
"""
Return a list of VCSTranslation instances or subclasses that
represent the translations in the resource.
"""
raise NotImplementedError()
def save(self, locale):
"""
Save any changes made the the VCSTranslation objects from
self.translations back to the originally parsed resource file.
:param Locale locale:
Locale of the file being saved.
"""
raise NotImplementedError()
|
participedia/pontoon
|
pontoon/sync/formats/base.py
|
Python
|
bsd-3-clause
| 736
|
files = ["dphy_lane.v",
"dphy_serdes.v",
"dsi_core.v",
"dsi_packer.v",
"dsi_packet_assembler.v",
"dsi_timing_gen.v",
"dsi_utils.v"]
|
twlostow/dsi-shield
|
hdl/rtl/dsi_core/Manifest.py
|
Python
|
lgpl-3.0
| 186
|
import intrepyd as ip
from intrepyd.engine import EngineResult
import A7E_requirements
import time
import sys
# Property 3:
#
# If the system is in WpnDel modes BOC or SBOC,
# then NavUpd is in AflyUpd
#
#
# In formula:
#
# F := (WpnDel=BOC \/ WpnDel=SBOC) -> NavUpd=AflyUpd
#
#
# Reachability query: !F
#
# !F <->
# (WpnDel=BOC \/ WpnDel=SBOC) /\ NavUpd!=AflyUpd <->
# (WpnDel=BOC /\ NavUpd!=AflyUpd) \/ (WpnDel=SBOC /\ NavUpd!=AflyUpd)
def mk_negation_of_property_3(ctx, inst):
navigationUpdateMode = inst.nets['A7E_requirements/NU/Mode']
weaponDeliveryMode = inst.nets['A7E_requirements/WD/Mode']
aflyUpdate = inst.nets['A7E_requirements/NU/AflyUpd']
boc = inst.nets['A7E_requirements/WD/BOC']
sboc = inst.nets['A7E_requirements/WD/SBOC']
navAfly = ctx.mk_neq(navigationUpdateMode, aflyUpdate)
wpnBoc = ctx.mk_eq(weaponDeliveryMode, boc)
wpnSboc = ctx.mk_eq(weaponDeliveryMode, sboc)
# NavUpd!=AflyUpd /\ WpnDel=BOC (1)
target1 = ctx.mk_and(navAfly, wpnBoc)
# NavUpd!=AflyUpd /\ WpnDel=SBOC (2)
target2 = ctx.mk_and(navAfly, wpnSboc)
# (1) \/ (2)
target = ctx.mk_or(target1, target2)
return target
# Converse of Property 3:
#
# If the system is in NavUpd mode AflyUpd,
# then WpnDel is in either BOC or SBOC
#
#
# In formula:
#
# CF := (WpnDel=BOC \/ WpnDel=SBOC) <- NavUpd=AflyUpd
#
#
# Reachability query: !CF
#
# !CF <->
# !(WpnDel=BOC \/ WpnDel=SBOC) /\ NavUpd=AflyUpd <->
# WpnDel!=BOC /\ WpnDel!=SBOC /\ NavUpd=AflyUpd
def mk_negation_of_converse_of_property_3(ctx, inst):
navigationUpdateMode = inst.nets['A7E_requirements/NU/Mode']
weaponDeliveryMode = inst.nets['A7E_requirements/WD/Mode']
aflyUpdate = inst.nets['A7E_requirements/NU/AflyUpd']
boc = inst.nets['A7E_requirements/WD/BOC']
sboc = inst.nets['A7E_requirements/WD/SBOC']
navAfly = ctx.mk_eq(navigationUpdateMode, aflyUpdate)
wpnBoc = ctx.mk_neq(weaponDeliveryMode, boc)
wpnSboc = ctx.mk_neq(weaponDeliveryMode, sboc)
# NavUpd=AflyUpd /\ WpnDel!=BOC (1)
tmp = ctx.mk_and(navAfly, wpnBoc)
# (1) /\ WpnDel=SBOC
target = ctx.mk_and(tmp, wpnSboc)
return target
if __name__ == "__main__":
startTime = time.time()
ctx = ip.Context()
inst = A7E_requirements.SimulinkCircuit(ctx, 'A7E')
inst.mk_circuit()
wdm = inst.nets['A7E_requirements/WD/Mode']
num = inst.nets['A7E_requirements/NU/Mode']
prsTime = time.time() - startTime
negProp3 = mk_negation_of_property_3(ctx, inst)
convnegProg3 = mk_negation_of_converse_of_property_3(ctx, inst)
br = ctx.mk_backward_reach()
br.add_target(negProp3)
br.add_target(convnegProg3)
result = br.reach_targets()
print "Unreachable?", result == EngineResult.UNREACHABLE
endTime = time.time() - startTime
print 'Parse time:', prsTime
print 'Total time:', endTime
|
formalmethods/intrepyd
|
examples/A7E_requirements/A7E_requirements_verification.py
|
Python
|
bsd-3-clause
| 2,986
|
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""MPEG audio stream information and tags."""
import struct
from mutagen import StreamInfo
from mutagen._util import MutagenError, enum, BitReader, BitReaderError, \
convert_error, intround, endswith
from mutagen.id3 import ID3FileType, delete
from mutagen.id3._util import BitPaddedInt
from ._util import XingHeader, XingHeaderError, VBRIHeader, VBRIHeaderError
__all__ = ["MP3", "Open", "delete", "MP3"]
class error(MutagenError):
pass
class HeaderNotFoundError(error):
pass
class InvalidMPEGHeader(error):
pass
@enum
class BitrateMode(object):
UNKNOWN = 0
"""Probably a CBR file, but not sure"""
CBR = 1
"""Constant Bitrate"""
VBR = 2
"""Variable Bitrate"""
ABR = 3
"""Average Bitrate (a variant of VBR)"""
def _guess_xing_bitrate_mode(xing):
if xing.lame_header:
lame = xing.lame_header
if lame.vbr_method in (1, 8):
return BitrateMode.CBR
elif lame.vbr_method in (2, 9):
return BitrateMode.ABR
elif lame.vbr_method in (3, 4, 5, 6):
return BitrateMode.VBR
# everything else undefined, continue guessing
# info tags get only written by lame for cbr files
if xing.is_info:
return BitrateMode.CBR
# older lame and non-lame with some variant of vbr
if xing.vbr_scale != -1 or xing.lame_version_desc:
return BitrateMode.VBR
return BitrateMode.UNKNOWN
# Mode values.
STEREO, JOINTSTEREO, DUALCHANNEL, MONO = range(4)
class MPEGFrame(object):
# Map (version, layer) tuples to bitrates.
__BITRATE = {
(1., 1): [0, 32, 64, 96, 128, 160, 192, 224,
256, 288, 320, 352, 384, 416, 448],
(1., 2): [0, 32, 48, 56, 64, 80, 96, 112, 128,
160, 192, 224, 256, 320, 384],
(1., 3): [0, 32, 40, 48, 56, 64, 80, 96, 112,
128, 160, 192, 224, 256, 320],
(2., 1): [0, 32, 48, 56, 64, 80, 96, 112, 128,
144, 160, 176, 192, 224, 256],
(2., 2): [0, 8, 16, 24, 32, 40, 48, 56, 64,
80, 96, 112, 128, 144, 160],
}
__BITRATE[(2, 3)] = __BITRATE[(2, 2)]
for i in range(1, 4):
__BITRATE[(2.5, i)] = __BITRATE[(2, i)]
# Map version to sample rates.
__RATES = {
1: [44100, 48000, 32000],
2: [22050, 24000, 16000],
2.5: [11025, 12000, 8000]
}
sketchy = False
def __init__(self, fileobj):
"""Raises HeaderNotFoundError"""
self.frame_offset = fileobj.tell()
r = BitReader(fileobj)
try:
if r.bits(11) != 0x7ff:
raise HeaderNotFoundError("invalid sync")
version = r.bits(2)
layer = r.bits(2)
protection = r.bits(1)
bitrate = r.bits(4)
sample_rate = r.bits(2)
padding = r.bits(1)
r.skip(1) # private
self.mode = r.bits(2)
r.skip(6)
except BitReaderError:
raise HeaderNotFoundError("truncated header")
assert r.get_position() == 32 and r.is_aligned()
# try to be strict here to redice the chance of a false positive
if version == 1 or layer == 0 or sample_rate == 0x3 or \
bitrate == 0xf or bitrate == 0:
raise HeaderNotFoundError("invalid header")
self.channels = 1 if self.mode == MONO else 2
self.version = [2.5, None, 2, 1][version]
self.layer = 4 - layer
self.protected = not protection
self.padding = bool(padding)
self.bitrate = self.__BITRATE[(self.version, self.layer)][bitrate]
self.bitrate *= 1000
self.sample_rate = self.__RATES[self.version][sample_rate]
if self.layer == 1:
frame_size = 384
slot = 4
elif self.version >= 2 and self.layer == 3:
frame_size = 576
slot = 1
else:
frame_size = 1152
slot = 1
frame_length = (
((frame_size // 8 * self.bitrate) // self.sample_rate) +
padding) * slot
self.sketchy = True
# Try to find/parse the Xing header, which trumps the above length
# and bitrate calculation.
if self.layer == 3:
self._parse_vbr_header(fileobj, self.frame_offset, frame_size,
frame_length)
fileobj.seek(self.frame_offset + frame_length, 0)
def _parse_vbr_header(self, fileobj, frame_offset, frame_size,
frame_length):
"""Does not raise"""
# Xing
xing_offset = XingHeader.get_offset(self)
fileobj.seek(frame_offset + xing_offset, 0)
try:
xing = XingHeader(fileobj)
except XingHeaderError:
pass
else:
lame = xing.lame_header
self.sketchy = False
self.bitrate_mode = _guess_xing_bitrate_mode(xing)
self.encoder_settings = xing.get_encoder_settings()
if xing.frames != -1:
samples = frame_size * xing.frames
if xing.bytes != -1 and samples > 0:
# the first frame is only included in xing.bytes but
# not in xing.frames, skip it.
audio_bytes = max(0, xing.bytes - frame_length)
self.bitrate = intround((
audio_bytes * 8 * self.sample_rate) / float(samples))
if lame is not None:
samples -= lame.encoder_delay_start
samples -= lame.encoder_padding_end
if samples < 0:
# older lame versions wrote bogus delay/padding for short
# files with low bitrate
samples = 0
self.length = float(samples) / self.sample_rate
if xing.lame_version_desc:
self.encoder_info = u"LAME %s" % xing.lame_version_desc
if lame is not None:
self.track_gain = lame.track_gain_adjustment
self.track_peak = lame.track_peak
self.album_gain = lame.album_gain_adjustment
return
# VBRI
vbri_offset = VBRIHeader.get_offset(self)
fileobj.seek(frame_offset + vbri_offset, 0)
try:
vbri = VBRIHeader(fileobj)
except VBRIHeaderError:
pass
else:
self.bitrate_mode = BitrateMode.VBR
self.encoder_info = u"FhG"
self.sketchy = False
self.length = float(frame_size * vbri.frames) / self.sample_rate
if self.length:
self.bitrate = int((vbri.bytes * 8) / self.length)
def skip_id3(fileobj):
"""Might raise IOError"""
# WMP writes multiple id3s, so skip as many as we find
while True:
idata = fileobj.read(10)
try:
id3, insize = struct.unpack('>3sxxx4s', idata)
except struct.error:
id3, insize = b'', 0
insize = BitPaddedInt(insize)
if id3 == b'ID3' and insize > 0:
fileobj.seek(insize, 1)
else:
fileobj.seek(-len(idata), 1)
break
def iter_sync(fileobj, max_read):
"""Iterate over a fileobj and yields on each mpeg sync.
When yielding the fileobj offset is right before the sync and can be
changed between iterations without affecting the iteration process.
Might raise IOError.
"""
read = 0
size = 2
last_byte = b""
is_second = lambda b: ord(b) & 0xe0 == 0xe0
while read < max_read:
data_offset = fileobj.tell()
new_data = fileobj.read(min(max_read - read, size))
if not new_data:
return
read += len(new_data)
if last_byte == b"\xff" and is_second(new_data[0:1]):
fileobj.seek(data_offset - 1, 0)
yield
size *= 2
last_byte = new_data[-1:]
find_offset = 0
while True:
index = new_data.find(b"\xff", find_offset)
# if not found or the last byte -> read more
if index == -1 or index == len(new_data) - 1:
break
if is_second(new_data[index + 1:index + 2]):
fileobj.seek(data_offset + index, 0)
yield
find_offset = index + 1
fileobj.seek(data_offset + len(new_data), 0)
class MPEGInfo(StreamInfo):
"""MPEGInfo()
MPEG audio stream information
Parse information about an MPEG audio file. This also reads the
Xing VBR header format.
This code was implemented based on the format documentation at
http://mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm.
Useful attributes:
Attributes:
length (`float`): audio length, in seconds
channels (`int`): number of audio channels
bitrate (`int`): audio bitrate, in bits per second.
In case :attr:`bitrate_mode` is :attr:`BitrateMode.UNKNOWN` the
bitrate is guessed based on the first frame.
sample_rate (`int`): audio sample rate, in Hz
encoder_info (`mutagen.text`): a string containing encoder name and
possibly version. In case a lame tag is present this will start
with ``"LAME "``, if unknown it is empty, otherwise the
text format is undefined.
encoder_settings (`mutagen.text`): a string containing a guess about
the settings used for encoding. The format is undefined and
depends on the encoder.
bitrate_mode (`BitrateMode`): a :class:`BitrateMode`
track_gain (`float` or `None`): replaygain track gain (89db) or None
track_peak (`float` or `None`): replaygain track peak or None
album_gain (`float` or `None`): replaygain album gain (89db) or None
Useless attributes:
Attributes:
version (`float`): MPEG version (1, 2, 2.5)
layer (`int`): 1, 2, or 3
mode (`int`): One of STEREO, JOINTSTEREO, DUALCHANNEL, or MONO (0-3)
protected (`bool`): whether or not the file is "protected"
sketchy (`bool`): if true, the file may not be valid MPEG audio
"""
sketchy = False
encoder_info = u""
encoder_settings = u""
bitrate_mode = BitrateMode.UNKNOWN
track_gain = track_peak = album_gain = album_peak = None
@convert_error(IOError, error)
def __init__(self, fileobj, offset=None):
"""Parse MPEG stream information from a file-like object.
If an offset argument is given, it is used to start looking
for stream information and Xing headers; otherwise, ID3v2 tags
will be skipped automatically. A correct offset can make
loading files significantly faster.
Raises HeaderNotFoundError, error
"""
if offset is None:
fileobj.seek(0, 0)
else:
fileobj.seek(offset, 0)
# skip anyway, because wmp stacks multiple id3 tags
skip_id3(fileobj)
# find a sync in the first 1024K, give up after some invalid syncs
max_read = 1024 * 1024
max_syncs = 1500
enough_frames = 4
min_frames = 2
self.sketchy = True
frames = []
first_frame = None
for _ in iter_sync(fileobj, max_read):
max_syncs -= 1
if max_syncs <= 0:
break
for _ in range(enough_frames):
try:
frame = MPEGFrame(fileobj)
except HeaderNotFoundError:
break
frames.append(frame)
if not frame.sketchy:
break
# if we have min frames, save it in case this is all we get
if len(frames) >= min_frames and first_frame is None:
first_frame = frames[0]
# if the last frame was a non-sketchy one (has a valid vbr header)
# we use that
if frames and not frames[-1].sketchy:
first_frame = frames[-1]
self.sketchy = False
break
# if we have enough valid frames, use the first
if len(frames) >= enough_frames:
first_frame = frames[0]
self.sketchy = False
break
# otherwise start over with the next sync
del frames[:]
if first_frame is None:
raise HeaderNotFoundError("can't sync to MPEG frame")
assert first_frame
self.length = -1
sketchy = self.sketchy
self.__dict__.update(first_frame.__dict__)
self.sketchy = sketchy
# no length, estimate based on file size
if self.length == -1:
fileobj.seek(0, 2)
content_size = fileobj.tell() - first_frame.frame_offset
self.length = 8 * content_size / float(self.bitrate)
def pprint(self):
info = str(self.bitrate_mode).split(".", 1)[-1]
if self.bitrate_mode == BitrateMode.UNKNOWN:
info = u"CBR?"
if self.encoder_info:
info += ", %s" % self.encoder_info
if self.encoder_settings:
info += ", %s" % self.encoder_settings
s = u"MPEG %s layer %d, %d bps (%s), %s Hz, %d chn, %.2f seconds" % (
self.version, self.layer, self.bitrate, info,
self.sample_rate, self.channels, self.length)
if self.sketchy:
s += u" (sketchy)"
return s
class MP3(ID3FileType):
"""MP3(filething)
An MPEG audio (usually MPEG-1 Layer 3) file.
Arguments:
filething (filething)
Attributes:
info (`MPEGInfo`)
tags (`mutagen.id3.ID3`)
"""
_Info = MPEGInfo
_mimes = ["audio/mpeg", "audio/mpg", "audio/x-mpeg"]
@property
def mime(self):
l = self.info.layer
return ["audio/mp%d" % l, "audio/x-mp%d" % l] + super(MP3, self).mime
@staticmethod
def score(filename, fileobj, header_data):
filename = filename.lower()
return (header_data.startswith(b"ID3") * 2 +
endswith(filename, b".mp3") +
endswith(filename, b".mp2") + endswith(filename, b".mpg") +
endswith(filename, b".mpeg"))
Open = MP3
class EasyMP3(MP3):
"""EasyMP3(filething)
Like MP3, but uses EasyID3 for tags.
Arguments:
filething (filething)
Attributes:
info (`MPEGInfo`)
tags (`mutagen.easyid3.EasyID3`)
"""
from mutagen.easyid3 import EasyID3 as ID3
ID3 = ID3 # type: ignore
|
quodlibet/mutagen
|
mutagen/mp3/__init__.py
|
Python
|
gpl-2.0
| 14,895
|
# final.py
class FinalGrade(Object):
def init(current_grade, projected_grade):
current_grade = (current_grade * .85)
projected_grade = (projected_grade * .15)
return current_grade + projected_grade
|
AsyncNick/FinalGrade
|
python/finalgrade/final.py
|
Python
|
mit
| 209
|
# -*- coding: UTF-8 -*-
from gi.repository import Gtk, Pango
from pychess.System import uistuff
from pychess.System.prefix import addDataPrefix
from pychess.System.glock import *
from pychess.Utils.const import *
from pychess.Utils.repr import reprColor, reprPiece
from pychess.Utils.lutils.lsort import staticExchangeEvaluate
from pychess.Utils.lutils.lmove import FLAG, TCORD, FCORD, toSAN
from pychess.Utils.lutils.lmovegen import genCaptures
from pychess.Utils.lutils.leval import evalMaterial
from pychess.Utils.lutils import ldata
from pychess.Utils.lutils import strateval
__title__ = _("Comments")
__icon__ = addDataPrefix("glade/panel_comments.svg")
__desc__ = _("The comments panel will try to analyze and explain the moves played")
class Switch:
def __init__(self): self.on = False
def __enter__(self): self.on = True
def __exit__(self, *a): self.on = False
class Sidepanel:
def __init__ (self):
self.givenTips = {}
def load (self, gmwidg):
self.gamemodel = gmwidg.board.view.model
self.gmhandlers = [
glock_connect(self.gamemodel, "game_changed", self.game_changed),
glock_connect(self.gamemodel, "game_started", self.game_started),
glock_connect(self.gamemodel, "moves_undoing", self.moves_undoing)
]
widgets = Gtk.Builder()
widgets.add_from_file(addDataPrefix("sidepanel/book.glade"))
self.tv = widgets.get_object("treeview")
scrollwin = widgets.get_object("scrolledwindow")
scrollwin.unparent()
self.store = Gtk.ListStore(str)
self.tv.set_model(self.store)
self.tv.get_selection().set_mode(Gtk.SelectionMode.BROWSE)
uistuff.appendAutowrapColumn(self.tv, "Comment", text=0)
self.tv.get_selection().connect_after('changed', self.select_cursor_row)
self.boardview = gmwidg.board.view
self.boardview.connect("shown_changed", self.shown_changed)
self.frozen = Switch()
return scrollwin
def select_cursor_row (self, selection):
iter = selection.get_selected()[1]
if iter == None: return
if self.frozen.on: return
row = self.tv.get_model().get_path(iter)[0]
board = self.gamemodel.boards[row]
self.boardview.setShownBoard(board)
def shown_changed (self, boardview, shown):
if not boardview.shownIsMainLine():
return
row = shown - self.gamemodel.lowply
try:
iter = self.store.get_iter(row)
self.tv.get_selection().select_iter(iter)
self.tv.scroll_to_cell(row)
except ValueError:
pass
# deleted variations by moves_undoing
def moves_undoing (self, game, moves):
assert game.ply > 0, "Can't undo when ply <= 0"
model = self.tv.get_model()
for i in range(moves):
model.remove(model.get_iter( (len(model)-1,) ))
def game_started (self, model):
self.game_changed(model)
def game_changed (self, model):
for ply in range(len(self.store)+model.lowply, model.ply+1):
self.addComment(model, self.__chooseComment(model, ply))
def addComment (self, model, comment):
self.store.append([comment])
# If latest ply is shown, we select the new latest
iter = self.tv.get_selection().get_selected()[1]
if iter:
row = self.tv.get_model().get_path(iter)[0]
if row < self.boardview.shown-1:
return
if self.boardview.shown >= model.ply:
iter = self.store.get_iter(len(self.store)-1)
with self.frozen:
self.tv.get_selection().select_iter(iter)
def __chooseComment(self, model, ply):
if ply == model.lowply:
return _("Initial position")
########################################################################
# Set up variables
########################################################################
color = model.getBoardAtPly(ply-1).board.color
s, phase = evalMaterial (model.getBoardAtPly(ply).board,
model.getBoardAtPly(ply-1).color)
# * Final: Will be shown alone: "mates", "draws"
# * Prefix: Will always be shown: "castles", "promotes"
# * Attack: Will always be shown: "threaten", "preassures", "defendes"
# * Moves (s): Will always be shown: "put into *"
# * State: (s) Will always be shown: "new *"
# * Simple: (s) Max one will be shown: "develops", "activity"
# * Tip: (s) Will sometimes be shown: "pawn storm", "cramped position"
########################################################################
# Call strategic evaluation functions
########################################################################
def getMessages (prefix):
messages = []
for functionName in dir(strateval):
if not functionName.startswith(prefix+"_"): continue
function = getattr(strateval, functionName)
messages.extend(function (model, ply, phase))
return messages
#move = model.moves[-1].move
#print "----- %d - %s -----" % (model.ply/2, toSAN(oldboard, move))
# ----------------------------------------------------------------------
# Final
# ----------------------------------------------------------------------
messages = getMessages ("final")
if messages:
return "%s %s" % (reprColor[color], messages[0])
# ---
strings = []
# ----------------------------------------------------------------------
# Attacks
# ----------------------------------------------------------------------
messages = getMessages ("attack")
for message in messages:
strings.append("%s %s" % (reprColor[color], message))
# ----------------------------------------------------------------------
# Check for prefixes
# ----------------------------------------------------------------------
messages = getMessages ("prefix")
if messages:
prefix = messages[0]
else: prefix = ""
# ----------------------------------------------------------------------
# Check for special move stuff. All of which accept prefixes
# ----------------------------------------------------------------------
for message in getMessages("offencive_moves") \
+ getMessages("defencive_moves"):
if prefix:
strings.append("%s %s %s %s" %
(reprColor[color], prefix, _("and"), message))
prefix = ""
else:
strings.append("%s %s" % (reprColor[color], message))
# ----------------------------------------------------------------------
# Simple
# ----------------------------------------------------------------------
# We only add simples if there hasn't been too much stuff to say
if not strings:
messages = getMessages ("simple")
if messages:
messages.sort(reverse=True)
score, message = messages[0]
if prefix:
strings.append("%s %s %s %s" %
(reprColor[color], prefix, _("and"), message))
prefix = ""
# ----------------------------------------------------------------------
# Prefix fallback
# ----------------------------------------------------------------------
# There was nothing to apply the prefix to, so we just post it here
# before the states and tips
if prefix:
strings.append("%s %s" % (reprColor[color], prefix))
prefix = ""
# ----------------------------------------------------------------------
# State
# ----------------------------------------------------------------------
messages = getMessages("state")
messages.sort(reverse=True)
for score, message in messages:
strings.append(message)
# ----------------------------------------------------------------------
# Tips
# ----------------------------------------------------------------------
tips = getMessages("tip")
tips.sort(reverse=True)
for (score, tip) in tips:
if tip in self.givenTips:
oldscore, oldply = self.givenTips[tip]
if score < oldscore*1.3 or model.ply < oldply+10:
continue
self.givenTips[tip] = (score, model.ply)
strings.append(tip)
break
# ----------------------------------------------------------------------
# Last solution
# ----------------------------------------------------------------------
if not strings:
tcord = TCORD(model.getMoveAtPly(ply-1).move)
piece = model.getBoardAtPly(ply).board.arBoard[tcord]
strings.append( _("%(color)s moves a %(piece)s to %(cord)s") % {
'color': reprColor[color], 'piece': reprPiece[piece], 'cord': reprCord[tcord]})
return ";\n".join(strings)
|
importsfromgooglecode/pychess
|
sidepanel/commentPanel.py
|
Python
|
gpl-3.0
| 9,492
|
import unittest
import random
import numpy as np
from nose.tools import assert_equal, assert_true, \
assert_false, assert_almost_equal, assert_raises
import networkx as nx
from sampler import quota_upperbound, UBSampler, RandomSampler, \
node_scores_from_tree, AdaptiveSampler, DeterministicSampler
def test_node_scores_from_tree():
tree = nx.DiGraph()
tree.add_edges_from([
(0, 1), (1, 2), (1, 3), (2, 4), (2, 5),
(0, 6), (6, 7)
])
for s, t in tree.edges_iter():
tree[s][t]['c'] = 1
for n in tree.nodes_iter():
tree.node[n]['r'] = 1
scores = node_scores_from_tree(tree, 0)
assert_equal(
{0: np.log(9) * 8 / 7.1,
1: np.log(6) * 5 / 4.1,
2: np.log(4) * 3 / 2.1,
6: np.log(3) * 2 / 1.1},
scores
)
class UpperboundTest(unittest.TestCase):
def setUp(self):
random.seed(123456)
g = nx.DiGraph()
g.add_edges_from([
(0, 1, {'c': 1}),
(1, 2, {'c': 1}),
(0, 2, {'c': 2}),
(2, 3, {'c': 1}),
])
self.g = g
for t, n in enumerate(g.nodes_iter()):
g.node[n]['p'] = 1
g.node[n]['datetime'] = t
def test_quota_upperbound_1(self):
assert_equal(
3,
quota_upperbound(self.g, 0, B=2)
)
def test_quota_upperbound_2(self):
assert_equal(
4,
quota_upperbound(self.g, 0, B=100)
)
def test_quota_upperbound_3(self):
assert_equal(
1,
quota_upperbound(self.g, 0, B=0)
)
def test_ub_sampler(self):
s = UBSampler(self.g, B=3, timespan_secs=3)
assert_equal(range(3),
s.nodes_sorted_by_upperbound)
for i in xrange(3):
node, dag = s.take()
assert_equal(i, node)
assert_true(isinstance(dag, nx.DiGraph))
assert_equal(0,
len(s.nodes_sorted_by_upperbound))
def test_random_sampler(self):
s = RandomSampler(self.g, timespan_secs=3)
assert_false([s.take()[0] for i in xrange(4)] == range(4))
assert_equal(0, len(s.nodes))
def test_deterministic_sampler(self):
s = DeterministicSampler(self.g, roots=[0, 1, 2], timespan_secs=3)
assert_true(range(3), [s.take()[0] for i in xrange(3)])
assert_raises(IndexError, s.take)
class AdaptiveSamplerTest(unittest.TestCase):
def setUp(self):
random.seed(123456)
tree = nx.DiGraph()
tree.add_edges_from([
(0, 1), (1, 2), (1, 3), (2, 4), (2, 5),
(0, 6), (6, 7), (0, 8)
])
self.assign_g_attrs(tree)
self.tree = tree
for t, nodes in enumerate([(0, ), (1, 6, 8), (2, 3, 7), (4, 5)]):
for n in nodes:
tree.node[n]['datetime'] = t
self.s = AdaptiveSampler(self.tree, B=3,
timespan_secs=1,
node_score_func=lambda p, c: p**2 / c)
def assign_g_attrs(self, tree):
for s, t in tree.edges_iter():
tree[s][t]['c'] = 1
for n in tree.nodes_iter():
tree.node[n]['r'] = 1
def test_sampler_init(self):
assert_equal(
# {0: 4, 1: 3, 2: 3, 6: 2},
[0, 2, 1, 6],
self.s.roots_sorted_by_upperbound
)
assert_equal(
1.0,
self.s.explore_proba
)
assert_equal(4, self.s.n_nodes_to_cover)
def test_update(self):
result_tree = nx.DiGraph()
result_tree.add_edges_from(
[(0, 1), (0, 6), (1, 3)]
)
self.assign_g_attrs(result_tree)
self.s.update(0, result_tree)
assert_equal(
0.5,
self.s.explore_proba
)
assert_equal(
set([0, 1]),
self.s.covered_nodes
)
assert_equal(
{1: 2 ** 2},
self.s.node2score
)
# case: score of node 1 increases
result_tree.add_edge(1, 2)
self.assign_g_attrs(result_tree)
self.s.update(0, result_tree)
assert_equal(
{1: 3 ** 2 / 2},
self.s.node2score
)
def test_update_border_case(self):
self.s.update(0, self.tree)
assert_equal(
set([0, 1, 2, 6]),
self.s.covered_nodes
)
assert_equal(0,
self.s.explore_proba)
def test_explore_proba(self):
assert_equal(1, self.s.explore_proba)
result_tree = nx.DiGraph()
result_tree.add_edges_from(
[(0, 1), (0, 6), (1, 3)]
)
self.assign_g_attrs(result_tree)
self.s.update(0, result_tree)
assert_almost_equal(2 / 4., self.s.explore_proba)
def test_take_via_explore(self):
r, tree = self.s.take()
assert_equal('explore',
self.s.random_action())
assert_equal(0, r)
assert_equal(
sorted([(0, 1), (0, 6), (0, 8)]),
sorted(tree.edges())
)
# on and on
r, tree = self.s.take()
assert_equal(2, r)
r, tree = self.s.take()
assert_equal(1, r)
def test_take_via_exploit(self):
# round 1
self.s.update(0, self.tree)
assert_equal('exploit',
self.s.random_action())
r, tree = self.s.take()
assert_equal(1, r)
# round 2
self.s.update(r, tree)
assert_true(r not in self.s.node2score)
|
xiaohan2012/lst
|
test_sampler.py
|
Python
|
mit
| 5,635
|
# -*- coding: utf-8 -*-
"""
debug.py - Functions to aid in debugging
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more information.
"""
from __future__ import print_function
import sys, traceback, time, gc, re, types, weakref, inspect, os, cProfile, threading
from . import ptime
from numpy import ndarray
from .Qt import QtCore, QtGui
from .util.mutex import Mutex
from .util import cprint
__ftraceDepth = 0
def ftrace(func):
"""Decorator used for marking the beginning and end of function calls.
Automatically indents nested calls.
"""
def w(*args, **kargs):
global __ftraceDepth
pfx = " " * __ftraceDepth
print(pfx + func.__name__ + " start")
__ftraceDepth += 1
try:
rv = func(*args, **kargs)
finally:
__ftraceDepth -= 1
print(pfx + func.__name__ + " done")
return rv
return w
class Tracer(object):
"""
Prints every function enter/exit. Useful for debugging crashes / lockups.
"""
def __init__(self):
self.count = 0
self.stack = []
def trace(self, frame, event, arg):
self.count += 1
# If it has been a long time since we saw the top of the stack,
# print a reminder
if self.count % 1000 == 0:
print("----- current stack: -----")
for line in self.stack:
print(line)
if event == 'call':
line = " " * len(self.stack) + ">> " + self.frameInfo(frame)
print(line)
self.stack.append(line)
elif event == 'return':
self.stack.pop()
line = " " * len(self.stack) + "<< " + self.frameInfo(frame)
print(line)
if len(self.stack) == 0:
self.count = 0
return self.trace
def stop(self):
sys.settrace(None)
def start(self):
sys.settrace(self.trace)
def frameInfo(self, fr):
filename = fr.f_code.co_filename
funcname = fr.f_code.co_name
lineno = fr.f_lineno
callfr = sys._getframe(3)
callline = "%s %d" % (callfr.f_code.co_name, callfr.f_lineno)
args, _, _, value_dict = inspect.getargvalues(fr)
if len(args) and args[0] == 'self':
instance = value_dict.get('self', None)
if instance is not None:
cls = getattr(instance, '__class__', None)
if cls is not None:
funcname = cls.__name__ + "." + funcname
return "%s: %s %s: %s" % (callline, filename, lineno, funcname)
def warnOnException(func):
"""Decorator that catches/ignores exceptions and prints a stack trace."""
def w(*args, **kwds):
try:
func(*args, **kwds)
except:
printExc('Ignored exception:')
return w
def getExc(indent=4, prefix='| ', skip=1):
lines = formatException(*sys.exc_info(), skip=skip)
lines2 = []
for l in lines:
lines2.extend(l.strip('\n').split('\n'))
lines3 = [" "*indent + prefix + l for l in lines2]
return '\n'.join(lines3)
def printExc(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented exception backtrace
(This function is intended to be called within except: blocks)"""
exc = getExc(indent, prefix + ' ', skip=2)
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
print(exc)
print(" "*indent + prefix + '='*30 + '<<')
def printTrace(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented stack trace"""
trace = backtrace(1)
#exc = getExc(indent, prefix + ' ')
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
for line in trace.split('\n'):
print(" "*indent + prefix + " " + line)
print(" "*indent + prefix + '='*30 + '<<')
def backtrace(skip=0):
return ''.join(traceback.format_stack()[:-(skip+1)])
def formatException(exctype, value, tb, skip=0):
"""Return a list of formatted exception strings.
Similar to traceback.format_exception, but displays the entire stack trace
rather than just the portion downstream of the point where the exception is
caught. In particular, unhandled exceptions that occur during Qt signal
handling do not usually show the portion of the stack that emitted the
signal.
"""
lines = traceback.format_exception(exctype, value, tb)
lines = [lines[0]] + traceback.format_stack()[:-(skip+1)] + [' --- exception caught here ---\n'] + lines[1:]
return lines
def printException(exctype, value, traceback):
"""Print an exception with its full traceback.
Set `sys.excepthook = printException` to ensure that exceptions caught
inside Qt signal handlers are printed with their full stack trace.
"""
print(''.join(formatException(exctype, value, traceback, skip=1)))
def listObjs(regex='Q', typ=None):
"""List all objects managed by python gc with class name matching regex.
Finds 'Q...' classes by default."""
if typ is not None:
return [x for x in gc.get_objects() if isinstance(x, typ)]
else:
return [x for x in gc.get_objects() if re.match(regex, type(x).__name__)]
def findRefPath(startObj, endObj, maxLen=8, restart=True, seen={}, path=None, ignore=None):
"""Determine all paths of object references from startObj to endObj"""
refs = []
if path is None:
path = [endObj]
if ignore is None:
ignore = {}
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
ignore[id(seen)] = None
prefix = " "*(8-maxLen)
#print prefix + str(map(type, path))
prefix += " "
if restart:
#gc.collect()
seen.clear()
gc.collect()
newRefs = [r for r in gc.get_referrers(endObj) if id(r) not in ignore]
ignore[id(newRefs)] = None
#fo = allFrameObjs()
#newRefs = []
#for r in gc.get_referrers(endObj):
#try:
#if r not in fo:
#newRefs.append(r)
#except:
#newRefs.append(r)
for r in newRefs:
#print prefix+"->"+str(type(r))
if type(r).__name__ in ['frame', 'function', 'listiterator']:
#print prefix+" FRAME"
continue
try:
if any([r is x for x in path]):
#print prefix+" LOOP", objChainString([r]+path)
continue
except:
print(r)
print(path)
raise
if r is startObj:
refs.append([r])
print(refPathString([startObj]+path))
continue
if maxLen == 0:
#print prefix+" END:", objChainString([r]+path)
continue
## See if we have already searched this node.
## If not, recurse.
tree = None
try:
cache = seen[id(r)]
if cache[0] >= maxLen:
tree = cache[1]
for p in tree:
print(refPathString(p+path))
except KeyError:
pass
ignore[id(tree)] = None
if tree is None:
tree = findRefPath(startObj, r, maxLen-1, restart=False, path=[r]+path, ignore=ignore)
seen[id(r)] = [maxLen, tree]
## integrate any returned results
if len(tree) == 0:
#print prefix+" EMPTY TREE"
continue
else:
for p in tree:
refs.append(p+[r])
#seen[id(r)] = [maxLen, refs]
return refs
def objString(obj):
"""Return a short but descriptive string for any object"""
try:
if type(obj) in [int, float]:
return str(obj)
elif isinstance(obj, dict):
if len(obj) > 5:
return "<dict {%s,...}>" % (",".join(list(obj.keys())[:5]))
else:
return "<dict {%s}>" % (",".join(list(obj.keys())))
elif isinstance(obj, str):
if len(obj) > 50:
return '"%s..."' % obj[:50]
else:
return obj[:]
elif isinstance(obj, ndarray):
return "<ndarray %s %s>" % (str(obj.dtype), str(obj.shape))
elif hasattr(obj, '__len__'):
if len(obj) > 5:
return "<%s [%s,...]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj[:5]]))
else:
return "<%s [%s]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj]))
else:
return "<%s %s>" % (type(obj).__name__, obj.__class__.__name__)
except:
return str(type(obj))
def refPathString(chain):
"""Given a list of adjacent objects in a reference path, print the 'natural' path
names (ie, attribute names, keys, and indexes) that follow from one object to the next ."""
s = objString(chain[0])
i = 0
while i < len(chain)-1:
#print " -> ", i
i += 1
o1 = chain[i-1]
o2 = chain[i]
cont = False
if isinstance(o1, list) or isinstance(o1, tuple):
if any([o2 is x for x in o1]):
s += "[%d]" % o1.index(o2)
continue
#print " not list"
if isinstance(o2, dict) and hasattr(o1, '__dict__') and o2 == o1.__dict__:
i += 1
if i >= len(chain):
s += ".__dict__"
continue
o3 = chain[i]
for k in o2:
if o2[k] is o3:
s += '.%s' % k
cont = True
continue
#print " not __dict__"
if isinstance(o1, dict):
try:
if o2 in o1:
s += "[key:%s]" % objString(o2)
continue
except TypeError:
pass
for k in o1:
if o1[k] is o2:
s += "[%s]" % objString(k)
cont = True
continue
#print " not dict"
#for k in dir(o1): ## Not safe to request attributes like this.
#if getattr(o1, k) is o2:
#s += ".%s" % k
#cont = True
#continue
#print " not attr"
if cont:
continue
s += " ? "
sys.stdout.flush()
return s
def objectSize(obj, ignore=None, verbose=False, depth=0, recursive=False):
"""Guess how much memory an object is using"""
ignoreTypes = ['MethodType', 'UnboundMethodType', 'BuiltinMethodType', 'FunctionType', 'BuiltinFunctionType']
ignoreTypes = [getattr(types, key) for key in ignoreTypes if hasattr(types, key)]
ignoreRegex = re.compile('(method-wrapper|Flag|ItemChange|Option|Mode)')
if ignore is None:
ignore = {}
indent = ' '*depth
try:
hash(obj)
hsh = obj
except:
hsh = "%s:%d" % (str(type(obj)), id(obj))
if hsh in ignore:
return 0
ignore[hsh] = 1
try:
size = sys.getsizeof(obj)
except TypeError:
size = 0
if isinstance(obj, ndarray):
try:
size += len(obj.data)
except:
pass
if recursive:
if type(obj) in [list, tuple]:
if verbose:
print(indent+"list:")
for o in obj:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', s)
size += s
elif isinstance(obj, dict):
if verbose:
print(indent+"list:")
for k in obj:
s = objectSize(obj[k], ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', k, s)
size += s
#elif isinstance(obj, QtCore.QObject):
#try:
#childs = obj.children()
#if verbose:
#print indent+"Qt children:"
#for ch in childs:
#s = objectSize(obj, ignore=ignore, verbose=verbose, depth=depth+1)
#size += s
#if verbose:
#print indent + ' +', ch.objectName(), s
#except:
#pass
#if isinstance(obj, types.InstanceType):
gc.collect()
if verbose:
print(indent+'attrs:')
for k in dir(obj):
if k in ['__dict__']:
continue
o = getattr(obj, k)
if type(o) in ignoreTypes:
continue
strtyp = str(type(o))
if ignoreRegex.search(strtyp):
continue
#if isinstance(o, types.ObjectType) and strtyp == "<type 'method-wrapper'>":
#continue
#if verbose:
#print indent, k, '?'
refs = [r for r in gc.get_referrers(o) if type(r) != types.FrameType]
if len(refs) == 1:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
size += s
if verbose:
print(indent + " +", k, s)
#else:
#if verbose:
#print indent + ' -', k, len(refs)
return size
class GarbageWatcher(object):
"""
Convenient dictionary for holding weak references to objects.
Mainly used to check whether the objects have been collect yet or not.
Example:
gw = GarbageWatcher()
gw['objName'] = obj
gw['objName2'] = obj2
gw.check()
"""
def __init__(self):
self.objs = weakref.WeakValueDictionary()
self.allNames = []
def add(self, obj, name):
self.objs[name] = obj
self.allNames.append(name)
def __setitem__(self, name, obj):
self.add(obj, name)
def check(self):
"""Print a list of all watched objects and whether they have been collected."""
gc.collect()
dead = self.allNames[:]
alive = []
for k in self.objs:
dead.remove(k)
alive.append(k)
print("Deleted objects:", dead)
print("Live objects:", alive)
def __getitem__(self, item):
return self.objs[item]
class Profiler(object):
"""Simple profiler allowing measurement of multiple time intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `PYQTGRAPHPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `PYQTGRAPHPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "pyqtgraph." prefix from the module.
"""
_profilers = os.environ.get("PYQTGRAPHPROFILE", None)
_profilers = _profilers.split(",") if _profilers is not None else []
_depth = 0
_msgs = []
disable = False # set this flag to disable all or individual profilers at runtime
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabledProfiler = DisabledProfiler()
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if disabled is True or (disabled == 'env' and len(cls._profilers) == 0):
return cls._disabledProfiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[-1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if disabled == 'env' and func_qualname not in cls._profilers: # don't do anything
return cls._disabledProfiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
obj._finished = False
obj._firstTime = obj._lastTime = ptime.time()
obj._newMsg("> Entering " + obj._name)
return obj
def __call__(self, msg=None):
"""Register or print a new message with timing information.
"""
if self.disable:
return
if msg is None:
msg = str(self._markCount)
self._markCount += 1
newTime = ptime.time()
self._newMsg(" %s: %0.4f ms",
msg, (newTime - self._lastTime) * 1000)
self._lastTime = newTime
def mark(self, msg=None):
self(msg)
def _newMsg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
self.flush()
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._newMsg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
def flush(self):
if self._msgs:
print("\n".join([m[0]%m[1] for m in self._msgs]))
type(self)._msgs = []
def profile(code, name='profile_run', sort='cumulative', num=30):
"""Common-use for cProfile"""
cProfile.run(code, name)
stats = pstats.Stats(name)
stats.sort_stats(sort)
stats.print_stats(num)
return stats
#### Code for listing (nearly) all objects in the known universe
#### http://utcc.utoronto.ca/~cks/space/blog/python/GetAllObjects
# Recursively expand slist's objects
# into olist, using seen to track
# already processed objects.
def _getr(slist, olist, first=True):
i = 0
for e in slist:
oid = id(e)
typ = type(e)
if oid in olist or typ is int: ## or e in olist: ## since we're excluding all ints, there is no longer a need to check for olist keys
continue
olist[oid] = e
if first and (i%1000) == 0:
gc.collect()
tl = gc.get_referents(e)
if tl:
_getr(tl, olist, first=False)
i += 1
# The public function.
def get_all_objects():
"""Return a list of all live Python objects (excluding int and long), not including the list itself."""
gc.collect()
gcl = gc.get_objects()
olist = {}
_getr(gcl, olist)
del olist[id(olist)]
del olist[id(gcl)]
del olist[id(sys._getframe())]
return olist
def lookup(oid, objects=None):
"""Return an object given its ID, if it exists."""
if objects is None:
objects = get_all_objects()
return objects[oid]
class ObjTracker(object):
"""
Tracks all objects under the sun, reporting the changes between snapshots: what objects are created, deleted, and persistent.
This class is very useful for tracking memory leaks. The class goes to great (but not heroic) lengths to avoid tracking
its own internal objects.
Example:
ot = ObjTracker() # takes snapshot of currently existing objects
... do stuff ...
ot.diff() # prints lists of objects created and deleted since ot was initialized
... do stuff ...
ot.diff() # prints lists of objects created and deleted since last call to ot.diff()
# also prints list of items that were created since initialization AND have not been deleted yet
# (if done correctly, this list can tell you about objects that were leaked)
arrays = ot.findPersistent('ndarray') ## returns all objects matching 'ndarray' (string match, not instance checking)
## that were considered persistent when the last diff() was run
describeObj(arrays[0]) ## See if we can determine who has references to this array
"""
allObjs = {} ## keep track of all objects created and stored within class instances
allObjs[id(allObjs)] = None
def __init__(self):
self.startRefs = {} ## list of objects that exist when the tracker is initialized {oid: weakref}
## (If it is not possible to weakref the object, then the value is None)
self.startCount = {}
self.newRefs = {} ## list of objects that have been created since initialization
self.persistentRefs = {} ## list of objects considered 'persistent' when the last diff() was called
self.objTypes = {}
ObjTracker.allObjs[id(self)] = None
self.objs = [self.__dict__, self.startRefs, self.startCount, self.newRefs, self.persistentRefs, self.objTypes]
self.objs.append(self.objs)
for v in self.objs:
ObjTracker.allObjs[id(v)] = None
self.start()
def findNew(self, regex):
"""Return all objects matching regex that were considered 'new' when the last diff() was run."""
return self.findTypes(self.newRefs, regex)
def findPersistent(self, regex):
"""Return all objects matching regex that were considered 'persistent' when the last diff() was run."""
return self.findTypes(self.persistentRefs, regex)
def start(self):
"""
Remember the current set of objects as the comparison for all future calls to diff()
Called automatically on init, but can be called manually as well.
"""
refs, count, objs = self.collect()
for r in self.startRefs:
self.forgetRef(self.startRefs[r])
self.startRefs.clear()
self.startRefs.update(refs)
for r in refs:
self.rememberRef(r)
self.startCount.clear()
self.startCount.update(count)
#self.newRefs.clear()
#self.newRefs.update(refs)
def diff(self, **kargs):
"""
Compute all differences between the current object set and the reference set.
Print a set of reports for created, deleted, and persistent objects
"""
refs, count, objs = self.collect() ## refs contains the list of ALL objects
## Which refs have disappeared since call to start() (these are only displayed once, then forgotten.)
delRefs = {}
for i in list(self.startRefs.keys()):
if i not in refs:
delRefs[i] = self.startRefs[i]
del self.startRefs[i]
self.forgetRef(delRefs[i])
for i in list(self.newRefs.keys()):
if i not in refs:
delRefs[i] = self.newRefs[i]
del self.newRefs[i]
self.forgetRef(delRefs[i])
#print "deleted:", len(delRefs)
## Which refs have appeared since call to start() or diff()
persistentRefs = {} ## created since start(), but before last diff()
createRefs = {} ## created since last diff()
for o in refs:
if o not in self.startRefs:
if o not in self.newRefs:
createRefs[o] = refs[o] ## object has been created since last diff()
else:
persistentRefs[o] = refs[o] ## object has been created since start(), but before last diff() (persistent)
#print "new:", len(newRefs)
## self.newRefs holds the entire set of objects created since start()
for r in self.newRefs:
self.forgetRef(self.newRefs[r])
self.newRefs.clear()
self.newRefs.update(persistentRefs)
self.newRefs.update(createRefs)
for r in self.newRefs:
self.rememberRef(self.newRefs[r])
#print "created:", len(createRefs)
## self.persistentRefs holds all objects considered persistent.
self.persistentRefs.clear()
self.persistentRefs.update(persistentRefs)
print("----------- Count changes since start: ----------")
c1 = count.copy()
for k in self.startCount:
c1[k] = c1.get(k, 0) - self.startCount[k]
typs = list(c1.keys())
typs.sort(key=lambda a: c1[a])
for t in typs:
if c1[t] == 0:
continue
num = "%d" % c1[t]
print(" " + num + " "*(10-len(num)) + str(t))
print("----------- %d Deleted since last diff: ------------" % len(delRefs))
self.report(delRefs, objs, **kargs)
print("----------- %d Created since last diff: ------------" % len(createRefs))
self.report(createRefs, objs, **kargs)
print("----------- %d Created since start (persistent): ------------" % len(persistentRefs))
self.report(persistentRefs, objs, **kargs)
def __del__(self):
self.startRefs.clear()
self.startCount.clear()
self.newRefs.clear()
self.persistentRefs.clear()
del ObjTracker.allObjs[id(self)]
for v in self.objs:
del ObjTracker.allObjs[id(v)]
@classmethod
def isObjVar(cls, o):
return type(o) is cls or id(o) in cls.allObjs
def collect(self):
print("Collecting list of all objects...")
gc.collect()
objs = get_all_objects()
frame = sys._getframe()
del objs[id(frame)] ## ignore the current frame
del objs[id(frame.f_code)]
ignoreTypes = [int]
refs = {}
count = {}
for k in objs:
o = objs[k]
typ = type(o)
oid = id(o)
if ObjTracker.isObjVar(o) or typ in ignoreTypes:
continue
try:
ref = weakref.ref(obj)
except:
ref = None
refs[oid] = ref
typ = type(o)
typStr = typeStr(o)
self.objTypes[oid] = typStr
ObjTracker.allObjs[id(typStr)] = None
count[typ] = count.get(typ, 0) + 1
print("All objects: %d Tracked objects: %d" % (len(objs), len(refs)))
return refs, count, objs
def forgetRef(self, ref):
if ref is not None:
del ObjTracker.allObjs[id(ref)]
def rememberRef(self, ref):
## Record the address of the weakref object so it is not included in future object counts.
if ref is not None:
ObjTracker.allObjs[id(ref)] = None
def lookup(self, oid, ref, objs=None):
if ref is None or ref() is None:
try:
obj = lookup(oid, objects=objs)
except:
obj = None
else:
obj = ref()
return obj
def report(self, refs, allobjs=None, showIDs=False):
if allobjs is None:
allobjs = get_all_objects()
count = {}
rev = {}
for oid in refs:
obj = self.lookup(oid, refs[oid], allobjs)
if obj is None:
typ = "[del] " + self.objTypes[oid]
else:
typ = typeStr(obj)
if typ not in rev:
rev[typ] = []
rev[typ].append(oid)
c = count.get(typ, [0,0])
count[typ] = [c[0]+1, c[1]+objectSize(obj)]
typs = list(count.keys())
typs.sort(key=lambda a: count[a][1])
for t in typs:
line = " %d\t%d\t%s" % (count[t][0], count[t][1], t)
if showIDs:
line += "\t"+",".join(map(str,rev[t]))
print(line)
def findTypes(self, refs, regex):
allObjs = get_all_objects()
ids = {}
objs = []
r = re.compile(regex)
for k in refs:
if r.search(self.objTypes[k]):
objs.append(self.lookup(k, refs[k], allObjs))
return objs
def describeObj(obj, depth=4, path=None, ignore=None):
"""
Trace all reference paths backward, printing a list of different ways this object can be accessed.
Attempts to answer the question "who has a reference to this object"
"""
if path is None:
path = [obj]
if ignore is None:
ignore = {} ## holds IDs of objects used within the function.
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
printed=False
for ref in refs:
if id(ref) in ignore:
continue
if id(ref) in list(map(id, path)):
print("Cyclic reference: " + refPathString([ref]+path))
printed = True
continue
newPath = [ref]+path
if len(newPath) >= depth:
refStr = refPathString(newPath)
if '[_]' not in refStr: ## ignore '_' references generated by the interactive shell
print(refStr)
printed = True
else:
describeObj(ref, depth, newPath, ignore)
printed = True
if not printed:
print("Dead end: " + refPathString(path))
def typeStr(obj):
"""Create a more useful type string by making <instance> types report their class."""
typ = type(obj)
if typ == getattr(types, 'InstanceType', None):
return "<instance of %s>" % obj.__class__.__name__
else:
return str(typ)
def searchRefs(obj, *args):
"""Pseudo-interactive function for tracing references backward.
**Arguments:**
obj: The initial object from which to start searching
args: A set of string or int arguments.
each integer selects one of obj's referrers to be the new 'obj'
each string indicates an action to take on the current 'obj':
t: print the types of obj's referrers
l: print the lengths of obj's referrers (if they have __len__)
i: print the IDs of obj's referrers
o: print obj
ro: return obj
rr: return list of obj's referrers
Examples::
searchRefs(obj, 't') ## Print types of all objects referring to obj
searchRefs(obj, 't', 0, 't') ## ..then select the first referrer and print the types of its referrers
searchRefs(obj, 't', 0, 't', 'l') ## ..also print lengths of the last set of referrers
searchRefs(obj, 0, 1, 'ro') ## Select index 0 from obj's referrer, then select index 1 from the next set of referrers, then return that object
"""
ignore = {id(sys._getframe()): None}
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
for a in args:
#fo = allFrameObjs()
#refs = [r for r in refs if r not in fo]
if type(a) is int:
obj = refs[a]
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
elif a == 't':
print(list(map(typeStr, refs)))
elif a == 'i':
print(list(map(id, refs)))
elif a == 'l':
def slen(o):
if hasattr(o, '__len__'):
return len(o)
else:
return None
print(list(map(slen, refs)))
elif a == 'o':
print(obj)
elif a == 'ro':
return obj
elif a == 'rr':
return refs
def allFrameObjs():
"""Return list of frame objects in current stack. Useful if you want to ignore these objects in refernece searches"""
f = sys._getframe()
objs = []
while f is not None:
objs.append(f)
objs.append(f.f_code)
#objs.append(f.f_locals)
#objs.append(f.f_globals)
#objs.append(f.f_builtins)
f = f.f_back
return objs
def findObj(regex):
"""Return a list of objects whose typeStr matches regex"""
allObjs = get_all_objects()
objs = []
r = re.compile(regex)
for i in allObjs:
obj = allObjs[i]
if r.search(typeStr(obj)):
objs.append(obj)
return objs
def listRedundantModules():
"""List modules that have been imported more than once via different paths."""
mods = {}
for name, mod in sys.modules.items():
if not hasattr(mod, '__file__'):
continue
mfile = os.path.abspath(mod.__file__)
if mfile[-1] == 'c':
mfile = mfile[:-1]
if mfile in mods:
print("module at %s has 2 names: %s, %s" % (mfile, name, mods[mfile]))
else:
mods[mfile] = name
def walkQObjectTree(obj, counts=None, verbose=False, depth=0):
"""
Walk through a tree of QObjects, doing nothing to them.
The purpose of this function is to find dead objects and generate a crash
immediately rather than stumbling upon them later.
Prints a count of the objects encountered, for fun. (or is it?)
"""
if verbose:
print(" "*depth + typeStr(obj))
report = False
if counts is None:
counts = {}
report = True
typ = str(type(obj))
try:
counts[typ] += 1
except KeyError:
counts[typ] = 1
for child in obj.children():
walkQObjectTree(child, counts, verbose, depth+1)
return counts
QObjCache = {}
def qObjectReport(verbose=False):
"""Generate a report counting all QObjects and their types"""
global qObjCache
count = {}
for obj in findObj('PyQt'):
if isinstance(obj, QtCore.QObject):
oid = id(obj)
if oid not in QObjCache:
QObjCache[oid] = typeStr(obj) + " " + obj.objectName()
try:
QObjCache[oid] += " " + obj.parent().objectName()
QObjCache[oid] += " " + obj.text()
except:
pass
print("check obj", oid, str(QObjCache[oid]))
if obj.parent() is None:
walkQObjectTree(obj, count, verbose)
typs = list(count.keys())
typs.sort()
for t in typs:
print(count[t], "\t", t)
class PrintDetector(object):
"""Find code locations that print to stdout."""
def __init__(self):
self.stdout = sys.stdout
sys.stdout = self
def remove(self):
sys.stdout = self.stdout
def __del__(self):
self.remove()
def write(self, x):
self.stdout.write(x)
traceback.print_stack()
def flush(self):
self.stdout.flush()
def listQThreads():
"""Prints Thread IDs (Qt's, not OS's) for all QThreads."""
thr = findObj('[Tt]hread')
thr = [t for t in thr if isinstance(t, QtCore.QThread)]
try:
from PyQt5 import sip
except ImportError:
import sip
for t in thr:
print("--> ", t)
print(" Qt ID: 0x%x" % sip.unwrapinstance(t))
def pretty(data, indent=''):
"""Format nested dict/list/tuple structures into a more human-readable string
This function is a bit better than pprint for displaying OrderedDicts.
"""
ret = ""
ind2 = indent + " "
if isinstance(data, dict):
ret = indent+"{\n"
for k, v in data.items():
ret += ind2 + repr(k) + ": " + pretty(v, ind2).strip() + "\n"
ret += indent+"}\n"
elif isinstance(data, list) or isinstance(data, tuple):
s = repr(data)
if len(s) < 40:
ret += indent + s
else:
if isinstance(data, list):
d = '[]'
else:
d = '()'
ret = indent+d[0]+"\n"
for i, v in enumerate(data):
ret += ind2 + str(i) + ": " + pretty(v, ind2).strip() + "\n"
ret += indent+d[1]+"\n"
else:
ret += indent + repr(data)
return ret
class ThreadTrace(object):
"""
Used to debug freezing by starting a new thread that reports on the
location of other threads periodically.
"""
def __init__(self, interval=10.0):
self.interval = interval
self.lock = Mutex()
self._stop = False
self.start()
def stop(self):
with self.lock:
self._stop = True
def start(self, interval=None):
if interval is not None:
self.interval = interval
self._stop = False
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def run(self):
while True:
with self.lock:
if self._stop is True:
return
print("\n============= THREAD FRAMES: ================")
for id, frame in sys._current_frames().items():
if id == threading.current_thread().ident:
continue
# try to determine a thread name
try:
name = threading._active.get(id, None)
except:
name = None
if name is None:
try:
# QThread._names must be manually set by thread creators.
name = QtCore.QThread._names.get(id)
except:
name = None
if name is None:
name = "???"
print("<< thread %d \"%s\" >>" % (id, name))
traceback.print_stack(frame)
print("===============================================\n")
time.sleep(self.interval)
class ThreadColor(object):
"""
Wrapper on stdout/stderr that colors text by the current thread ID.
*stream* must be 'stdout' or 'stderr'.
"""
colors = {}
lock = Mutex()
def __init__(self, stream):
self.stream = getattr(sys, stream)
self.err = stream == 'stderr'
setattr(sys, stream, self)
def write(self, msg):
with self.lock:
cprint.cprint(self.stream, self.color(), msg, -1, stderr=self.err)
def flush(self):
with self.lock:
self.stream.flush()
def color(self):
tid = threading.current_thread()
if tid not in self.colors:
c = (len(self.colors) % 15) + 1
self.colors[tid] = c
return self.colors[tid]
def enableFaulthandler():
""" Enable faulthandler for all threads.
If the faulthandler package is available, this function disables and then
re-enables fault handling for all threads (this is necessary to ensure any
new threads are handled correctly), and returns True.
If faulthandler is not available, then returns False.
"""
try:
import faulthandler
# necessary to disable first or else new threads may not be handled.
faulthandler.disable()
faulthandler.enable(all_threads=True)
return True
except ImportError:
return False
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
esstoolkit/external/pyqtgraph/debug.py
|
Python
|
gpl-3.0
| 41,299
|
def hello_name(name):
return "Hello " + name +"!"
|
ismk/Python-Examples
|
codingbat/hello_name.py
|
Python
|
mit
| 51
|
'''
modifier: 01
eqtime: 10
'''
def main():
info("Jan Air Sniff Pipette x1")
gosub('jan:WaitForMiniboneAccess')
gosub('jan:PrepareForAirShot')
open(name="Q", description="Quad Inlet")
close(name="T", description="Microbone to CO2 Laser")
gosub('jan:EvacPipette2')
gosub('common:FillPipette2')
gosub('jan:PrepareForAirShotExpansion')
gosub('common:SniffPipette2')
open(name="S", description="Microbone to Inlet Pipette")
sleep(duration=2.0)
#close(name="M", description="Microbone to Getter NP-10H")
close(name="K", description="Microbone to Getter NP-10C")
sleep(duration=2.0)
open(name="U", description="Microbone to Turbo")
close(name="L", description="Microbone to Minibone")
#close(name="T", description="Microbone to CO2 Laser")
sleep(duration=20.0)
close(name="U", description="Microbone to Turbo")
close(name="M", description="Microbone to Getter NP-10H")
sleep(duration=3.0)
#open(name="M", description="Microbone to Getter NP-10H")
open(name="K", description="Microbone to Getter NP-10C")
sleep(duration=10.0)
close(name="S", description="Microbone to Inlet Pipette")
|
USGSDenverPychron/pychron
|
docs/user_guide/operation/scripts/examples/argus/extraction/jan_sniffair_x1_split_with_getter_90fA.py
|
Python
|
apache-2.0
| 1,179
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import sys
from matplotlib import pyplot as plt
import numpy as np
import argparse
def getKaryotype(fname):
"""returns dictionary e.g.: {'chr13': 115169878, ... } """
data = [i.strip().split() for i in open(fname) if i[:3] == 'chr']
hs = {}
for i in data:
hs[i[6]] = int(i[5])
return hs
def drawSignals(karyotype, chromosome, signalFiles, segments):
for f in signalFiles:
y = np.fromfile(f, dtype=np.uint16)
if y.size > 1e6:
every = int(y.size//1e6)
print "[ \033[1;33mWARN\033[1;m ] Your data from {} are very big so they were probed every {:d} point so they could fit in circa 1 000 000 points. Sorry :(".format(f.name, every)
y = y[0::every]
x = np.linspace(0,karyotype[chromosome],len(y))
print "Plotting {}".format(f.name)
bla = plt.plot(x,y, '-', label=f.name)
color = bla[-1].get_color()
plt.fill_between(x,y, color = color, alpha=0.1)
if segments:
tmp = [i.strip().split() for i in segments ]
segByPrzemek = [int(i[1]) for i in tmp if i[0] == chromosome]
plt.plot(segByPrzemek,[0 for i in xrange(len(segByPrzemek))],'d',color='yellow',linestyle="none" )
plt.legend()
plt.grid()
plt.title("ChIA-PET signal for {}".format(chromosome))
plt.show()
def main():
parser = argparse.ArgumentParser(description='Script for plotting ChIA-PET signal')
parser.add_argument('karyotype', help='karyotype in Circos format')
parser.add_argument('chromosome', help='chromosome. Np chr22')
parser.add_argument('signalFiles', type=argparse.FileType('r'), nargs='+', help="filenames with signal saved in numpy np.uint16 binary format")
parser.add_argument('-s', '--segments', type=argparse.FileType('r'), help="name of file with segments")
args = parser.parse_args()
karyotype = getKaryotype(args.karyotype)
drawSignals(karyotype, args.chromosome, args.signalFiles, args.segments)
if __name__ == '__main__':
main()
|
ramidas/ChIA-PET_sigvis
|
drawSignal.py
|
Python
|
mit
| 2,104
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2009 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os
from virtaal.common import pan_app
class PluginUnsupported(Exception):
pass
class BasePlugin(object):
"""The base interface to be implemented by all plug-ins."""
configure_func = None
"""A function that starts the plug-in's configuration, if available."""
description = ''
"""A description about the plug-in's purpose."""
display_name = ''
"""The plug-in's name, suitable for display."""
version = 0
"""The plug-in's version number."""
default_config = {}
# INITIALIZERS #
def __new__(cls, *args, **kwargs):
"""Create a new plug-in instance and check that it is valid."""
if not cls.display_name:
raise Exception('No name specified')
if cls.version <= 0:
raise Exception('Invalid version number specified')
return super(BasePlugin, cls).__new__(cls)
def __init__(self):
raise NotImplementedError('This interface cannot be instantiated.')
# METHODS #
def destroy(self):
"""This method is called by C{PluginController.shutdown()} and should be
implemented by all plug-ins that need to do clean-up."""
pass
def load_config(self):
"""Load plugin config from default location."""
self.config = {}
self.config.update(self.default_config)
config_file = os.path.join(pan_app.get_config_dir(), "plugins.ini")
self.config.update(pan_app.load_config(config_file, self.internal_name))
def save_config(self):
"""Save plugin config to default location."""
config_file = os.path.join(pan_app.get_config_dir(), "plugins.ini")
pan_app.save_config(config_file, self.config, self.internal_name)
|
unho/virtaal
|
virtaal/controllers/baseplugin.py
|
Python
|
gpl-2.0
| 2,488
|
from . import vertical_lift_shuttle
|
OCA/stock-logistics-warehouse
|
stock_vertical_lift_server_env/models/__init__.py
|
Python
|
agpl-3.0
| 36
|
import json
import logging
import tornado
from db.client import sanitise_data
from handlers.base import BaseHandler
logger = logging.getLogger(__name__)
class HomeHandler(BaseHandler):
@tornado.gen.coroutine
def get(self):
user = self.get_current_user()
future = self.db.pages.find_one({'page': 'home'})
page_data = yield future
page_data['xsrf'] = self.xsrf_token.decode('utf-8')
context = {}
if not page_data:
raise tornado.web.HTTPError(500, 'No page data available.')
elif page_data:
context = sanitise_data(page_data)
if user:
context['user'] = str(user, 'utf-8')
self.render('index.html', context=json.dumps(context))
|
jwnwilson/noelwilson_2017
|
server/handlers/home.py
|
Python
|
mit
| 745
|
from .main import MyCli
import sql.parse
import sql.connection
import logging
_logger = logging.getLogger(__name__)
def load_ipython_extension(ipython):
# This is called via the ipython command '%load_ext mycli.magic'.
# First, load the sql magic if it isn't already loaded.
if not ipython.find_line_magic('sql'):
ipython.run_line_magic('load_ext', 'sql')
# Register our own magic.
ipython.register_magic_function(mycli_line_magic, 'line', 'mycli')
def mycli_line_magic(line):
_logger.debug('mycli magic called: %r', line)
parsed = sql.parse.parse(line, {})
conn = sql.connection.Connection.get(parsed['connection'])
try:
# A corresponding mycli object already exists
mycli = conn._mycli
_logger.debug('Reusing existing mycli')
except AttributeError:
mycli = MyCli()
u = conn.session.engine.url
_logger.debug('New mycli: %r', str(u))
mycli.connect(u.database, u.host, u.username, u.port, u.password)
conn._mycli = mycli
# For convenience, print the connection alias
print('Connected: {}'.format(conn.name))
try:
mycli.run_cli()
except SystemExit:
pass
if not mycli.query_history:
return
q = mycli.query_history[-1]
if q.mutating:
_logger.debug('Mutating query detected -- ignoring')
return
if q.successful:
ipython = get_ipython()
return ipython.run_cell_magic('sql', line, q.query)
|
danieljwest/mycli
|
mycli/magic.py
|
Python
|
bsd-3-clause
| 1,496
|
import unittest
import helper
class TestBuiltin(helper.TestCtypesBindingGenerator):
def test_size_t(self):
self.run_test('''
#include <stdio.h>
size_t size;
ssize_t ssize;
wchar_t wchar;
wchar_t *wchar_p;
va_list va_list_v;
''', '''
class __va_list_tag(Structure):
pass
__va_list_tag._fields_ = [('gp_offset', c_uint),
('fp_offset', c_uint),
('overflow_arg_area', c_void_p),
('reg_save_area', c_void_p)]
size = c_size_t.in_dll(_lib, 'size')
ssize = c_ssize_t.in_dll(_lib, 'ssize')
wchar = c_wchar_t.in_dll(_lib, 'wchar')
wchar_p = c_wchar_p.in_dll(_lib, 'wchar_p')
va_list_v = (__va_list_tag * 1).in_dll(_lib, 'va_list_v')
''',
# TODO(clchiou): Search include path.
args=['-I/usr/local/lib/clang/3.4/include'])
def test_unsupported_type(self):
with self.assertRaises(TypeError):
self.run_test('''
__int128_t x = 1;
''', '''
''')
if __name__ == '__main__':
unittest.main()
|
anthrotype/ctypes-binding-generator
|
test/test_builtin.py
|
Python
|
gpl-3.0
| 1,057
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Network Firewall"
prefix = "network-firewall"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AssociateFirewallPolicy = Action("AssociateFirewallPolicy")
AssociateSubnets = Action("AssociateSubnets")
CreateFirewall = Action("CreateFirewall")
CreateFirewallPolicy = Action("CreateFirewallPolicy")
CreateRuleGroup = Action("CreateRuleGroup")
DeleteFirewall = Action("DeleteFirewall")
DeleteFirewallPolicy = Action("DeleteFirewallPolicy")
DeleteResourcePolicy = Action("DeleteResourcePolicy")
DeleteRuleGroup = Action("DeleteRuleGroup")
DescribeFirewall = Action("DescribeFirewall")
DescribeFirewallPolicy = Action("DescribeFirewallPolicy")
DescribeLoggingConfiguration = Action("DescribeLoggingConfiguration")
DescribeResourcePolicy = Action("DescribeResourcePolicy")
DescribeRuleGroup = Action("DescribeRuleGroup")
DescribeRuleGroupMetadata = Action("DescribeRuleGroupMetadata")
DisassociateSubnets = Action("DisassociateSubnets")
ListFirewallPolicies = Action("ListFirewallPolicies")
ListFirewalls = Action("ListFirewalls")
ListRuleGroups = Action("ListRuleGroups")
ListTagsForResource = Action("ListTagsForResource")
PutResourcePolicy = Action("PutResourcePolicy")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateFirewallDeleteProtection = Action("UpdateFirewallDeleteProtection")
UpdateFirewallDescription = Action("UpdateFirewallDescription")
UpdateFirewallPolicy = Action("UpdateFirewallPolicy")
UpdateFirewallPolicyChangeProtection = Action("UpdateFirewallPolicyChangeProtection")
UpdateLoggingConfiguration = Action("UpdateLoggingConfiguration")
UpdateRuleGroup = Action("UpdateRuleGroup")
UpdateSubnetChangeProtection = Action("UpdateSubnetChangeProtection")
|
cloudtools/awacs
|
awacs/network_firewall.py
|
Python
|
bsd-2-clause
| 2,200
|
def cross(environment, book, row, sheet_source, column_source, column_key):
"""
Returns a single value from a column from a different dataset, matching by the key.
"""
a = book.sheets[sheet_source]
return environment.copy(a.get(**{column_key: row[column_key]})[column_source])
def column(environment, book, sheet_name, sheet_source, column_source, column_key):
"""
Returns an array of values from column from a different dataset, ordered as the key.
"""
a = book.sheets[sheet_source]
b = book.sheets[sheet_name]
return environment.copy([a.get(**{column_key: row[column_key]})[column_source] for row in b.all()])
|
databuild/databuild
|
databuild/functions/data.py
|
Python
|
bsd-3-clause
| 660
|
#!/usr/bin/python
import ctypes as c
import random
import ecdsa
import hashlib
import binascii
import os
import pytest
def bytes2num(s):
res = 0
for i, b in enumerate(reversed(bytearray(s))):
res += b << (i * 8)
return res
curves = {
'nist256p1': ecdsa.curves.NIST256p,
'secp256k1': ecdsa.curves.SECP256k1
}
class Point:
def __init__(self, name, x, y):
self.curve = name
self.x = x
self.y = y
points = [
Point('secp256k1', 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798, 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8),
Point('secp256k1', 0x1, 0x4218f20ae6c646b363db68605822fb14264ca8d2587fdd6fbc750d587e76a7ee),
Point('secp256k1', 0x2, 0x66fbe727b2ba09e09f5a98d70a5efce8424c5fa425bbda1c511f860657b8535e),
Point('secp256k1', 0x1b,0x1adcea1cf831b0ad1653e769d1a229091d0cc68d4b0328691b9caacc76e37c90),
Point('nist256p1', 0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296, 0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5),
Point('nist256p1', 0x0, 0x66485c780e2f83d72433bd5d84a06bb6541c2af31dae871728bf856a174f93f4),
Point('nist256p1', 0x0, 0x99b7a386f1d07c29dbcc42a27b5f9449abe3d50de25178e8d7407a95e8b06c0b),
Point('nist256p1', 0xaf8bbdfe8cdd5577acbf345b543d28cf402f4e94d3865b97ea0787f2d3aa5d22,0x35802b8b376b995265918b078bc109c21a535176585c40f519aca52d6afc147c),
Point('nist256p1', 0x80000, 0x580610071f440f0dcc14a22e2d5d5afc1224c0cd11a3b4b51b8ecd2224ee1ce2)
]
random_iters = int(os.environ.get('ITERS', 1))
lib = c.cdll.LoadLibrary('./libtrezor-crypto.so')
lib.get_curve_by_name.restype = c.c_void_p
BIGNUM = c.c_uint32 * 9
class Random(random.Random):
def randbytes(self, n):
buf = (c.c_uint8 * n)()
for i in range(n):
buf[i] = self.randrange(0, 256)
return buf
def randpoint(self, curve):
k = self.randrange(0, curve.order)
return k * curve.generator
def int2bn(x, bn_type=BIGNUM):
b = bn_type()
b._int = x
for i in range(len(b)):
b[i] = x % (1 << 30)
x = x >> 30
return b
def bn2int(b):
x = 0
for i in range(len(b)):
x += (b[i] << (30 * i))
return x
@pytest.fixture(params=range(random_iters))
def r(request):
seed = request.param
return Random(seed + int(os.environ.get('SEED', 0)))
@pytest.fixture(params=list(sorted(curves)))
def curve(request):
name = request.param
curve_ptr = lib.get_curve_by_name(name)
assert curve_ptr, 'curve {} not found'.format(name)
curve_obj = curves[name]
curve_obj.ptr = c.c_void_p(curve_ptr)
curve_obj.p = curve_obj.curve.p() # shorthand
return curve_obj
@pytest.fixture(params=points)
def point(request):
name = request.param.curve
curve_ptr = lib.get_curve_by_name(name)
assert curve_ptr, 'curve {} not found'.format(name)
curve_obj = curves[name]
curve_obj.ptr = c.c_void_p(curve_ptr)
curve_obj.p = ecdsa.ellipticcurve.Point(curve_obj.curve, request.param.x, request.param.y)
return curve_obj
def test_inverse(curve, r):
x = r.randrange(1, curve.p)
y = int2bn(x)
lib.bn_inverse(y, int2bn(curve.p))
y = bn2int(y)
y_ = ecdsa.numbertheory.inverse_mod(x, curve.p)
assert y == y_
def test_is_less(curve, r):
x = r.randrange(0, curve.p)
y = r.randrange(0, curve.p)
x_ = int2bn(x)
y_ = int2bn(y)
res = lib.bn_is_less(x_, y_)
assert res == (x < y)
res = lib.bn_is_less(y_, x_)
assert res == (y < x)
def test_is_equal(curve, r):
x = r.randrange(0, curve.p)
y = r.randrange(0, curve.p)
x_ = int2bn(x)
y_ = int2bn(y)
assert lib.bn_is_equal(x_, y_) == (x == y)
assert lib.bn_is_equal(x_, x_) == 1
assert lib.bn_is_equal(y_, y_) == 1
def test_is_zero(curve, r):
x = r.randrange(0, curve.p);
assert lib.bn_is_zero(int2bn(x)) == (not x)
def test_simple_comparisons():
assert lib.bn_is_zero(int2bn(0)) == 1
assert lib.bn_is_zero(int2bn(1)) == 0
assert lib.bn_is_less(int2bn(0), int2bn(0)) == 0
assert lib.bn_is_less(int2bn(1), int2bn(0)) == 0
assert lib.bn_is_less(int2bn(0), int2bn(1)) == 1
assert lib.bn_is_equal(int2bn(0), int2bn(0)) == 1
assert lib.bn_is_equal(int2bn(1), int2bn(0)) == 0
assert lib.bn_is_equal(int2bn(0), int2bn(1)) == 0
def test_mult_half(curve, r):
x = r.randrange(0, 2*curve.p)
y = int2bn(x)
lib.bn_mult_half(y, int2bn(curve.p))
y = bn2int(y)
if y >= curve.p:
y -= curve.p
half = ecdsa.numbertheory.inverse_mod(2, curve.p)
assert y == (x * half) % curve.p
def test_subtractmod(curve, r):
x = r.randrange(0, 2 ** 256)
y = r.randrange(0, 2 ** 256)
z = int2bn(0)
lib.bn_subtractmod(int2bn(x), int2bn(y), z, int2bn(curve.p))
z = bn2int(z)
z_ = x + 2*curve.p - y
assert z == z_
def test_subtract2(r):
x = r.randrange(0, 2 ** 256)
y = r.randrange(0, 2 ** 256)
x, y = max(x, y), min(x, y)
z = int2bn(0)
lib.bn_subtract(int2bn(x), int2bn(y), z)
z = bn2int(z)
z_ = x - y
assert z == z_
def test_add(curve, r):
x = r.randrange(0, 2 ** 256)
y = r.randrange(0, 2 ** 256)
z_ = x + y
z = int2bn(x)
lib.bn_add(z, int2bn(y))
z = bn2int(z)
assert z == z_
def test_addmod(curve, r):
x = r.randrange(0, 2 ** 256)
y = r.randrange(0, 2 ** 256)
z_ = (x + y) % curve.p
z = int2bn(x)
lib.bn_addmod(z, int2bn(y), int2bn(curve.p))
z = bn2int(z)
if z >= curve.p:
z = z - curve.p
assert z == z_
def test_multiply(curve, r):
k = r.randrange(0, 2 * curve.p)
x = r.randrange(0, 2 * curve.p)
z = (k * x) % curve.p
k = int2bn(k)
z_ = int2bn(x)
p_ = int2bn(curve.p)
lib.bn_multiply(k, z_, p_)
z_ = bn2int(z_)
assert z_ < 2*curve.p
if z_ >= curve.p:
z_ = z_ - curve.p
assert z_ == z
def test_multiply1(curve, r):
k = r.randrange(0, 2 * curve.p)
x = r.randrange(0, 2 * curve.p)
kx = k * x
res = int2bn(0, bn_type=(c.c_uint32 * 18))
lib.bn_multiply_long(int2bn(k), int2bn(x), res)
res = bn2int(res)
assert res == kx
def test_multiply2(curve, r):
x = int2bn(0)
s = r.randrange(0, 2 ** 526)
res = int2bn(s, bn_type=(c.c_uint32 * 18))
prime = int2bn(curve.p)
lib.bn_multiply_reduce(x, res, prime)
x = bn2int(x) % curve.p
x_ = s % curve.p
assert x == x_
def test_fast_mod(curve, r):
x = r.randrange(0, 128*curve.p)
y = int2bn(x)
lib.bn_fast_mod(y, int2bn(curve.p))
y = bn2int(y)
assert y < 2*curve.p
if y >= curve.p:
y -= curve.p
assert x % curve.p == y
def test_mod(curve, r):
x = r.randrange(0, 2*curve.p)
y = int2bn(x)
lib.bn_mod(y, int2bn(curve.p))
assert bn2int(y) == x % curve.p
def test_mod_specific(curve):
p = curve.p
for x in [0, 1, 2, p - 2, p - 1, p, p + 1, p + 2, 2*p - 2, 2*p - 1]:
y = int2bn(x)
lib.bn_mod(y, int2bn(curve.p))
assert bn2int(y) == x % p
POINT = BIGNUM * 2
to_POINT = lambda p: POINT(int2bn(p.x()), int2bn(p.y()))
from_POINT = lambda p: (bn2int(p[0]), bn2int(p[1]))
JACOBIAN = BIGNUM * 3
to_JACOBIAN = lambda jp: JACOBIAN(int2bn(jp[0]), int2bn(jp[1]), int2bn(jp[2]))
from_JACOBIAN = lambda p: (bn2int(p[0]), bn2int(p[1]), bn2int(p[2]))
def test_point_multiply(curve, r):
p = r.randpoint(curve)
k = r.randrange(0, 2 ** 256)
kp = k * p
res = POINT(int2bn(0), int2bn(0))
lib.point_multiply(curve.ptr, int2bn(k), to_POINT(p), res)
res = from_POINT(res)
assert res == (kp.x(), kp.y())
def test_point_add(curve, r):
p1 = r.randpoint(curve)
p2 = r.randpoint(curve)
#print '-' * 80
q = p1 + p2
q1 = to_POINT(p1)
q2 = to_POINT(p2)
lib.point_add(curve.ptr, q1, q2)
q_ = from_POINT(q2)
assert q_ == (q.x(), q.y())
def test_point_double(curve, r):
p = r.randpoint(curve)
q = p.double()
q_ = to_POINT(p)
lib.point_double(curve.ptr, q_)
q_ = from_POINT(q_)
assert q_ == (q.x(), q.y())
def test_point_to_jacobian(curve, r):
p = r.randpoint(curve)
jp = JACOBIAN()
lib.curve_to_jacobian(to_POINT(p), jp, int2bn(curve.p))
jx, jy, jz = from_JACOBIAN(jp)
assert jx % curve.p == (p.x() * jz ** 2) % curve.p
assert jy % curve.p == (p.y() * jz ** 3) % curve.p
q = POINT()
lib.jacobian_to_curve(jp, q, int2bn(curve.p))
q = from_POINT(q)
assert q == (p.x(), p.y())
def test_cond_negate(curve, r):
x = r.randrange(0, curve.p)
a = int2bn(x)
lib.conditional_negate(0, a, int2bn(curve.p))
assert bn2int(a) == x
lib.conditional_negate(-1, a, int2bn(curve.p))
assert bn2int(a) == 2*curve.p - x
def test_jacobian_add(curve, r):
p1 = r.randpoint(curve)
p2 = r.randpoint(curve)
prime = int2bn(curve.p)
q = POINT()
jp2 = JACOBIAN()
lib.curve_to_jacobian(to_POINT(p2), jp2, prime)
lib.point_jacobian_add(to_POINT(p1), jp2, curve.ptr)
lib.jacobian_to_curve(jp2, q, prime)
q = from_POINT(q)
p_ = p1 + p2
assert (p_.x(), p_.y()) == q
def test_jacobian_add_double(curve, r):
p1 = r.randpoint(curve)
p2 = p1
prime = int2bn(curve.p)
q = POINT()
jp2 = JACOBIAN()
lib.curve_to_jacobian(to_POINT(p2), jp2, prime)
lib.point_jacobian_add(to_POINT(p1), jp2, curve.ptr)
lib.jacobian_to_curve(jp2, q, prime)
q = from_POINT(q)
p_ = p1 + p2
assert (p_.x(), p_.y()) == q
def test_jacobian_double(curve, r):
p = r.randpoint(curve)
p2 = p.double()
prime = int2bn(curve.p)
q = POINT()
jp = JACOBIAN()
lib.curve_to_jacobian(to_POINT(p), jp, prime)
lib.point_jacobian_double(jp, curve.ptr)
lib.jacobian_to_curve(jp, q, prime)
q = from_POINT(q)
assert (p2.x(), p2.y()) == q
def sigdecode(sig, _):
return map(bytes2num, [sig[:32], sig[32:]])
def test_sign(curve, r):
priv = r.randbytes(32)
digest = r.randbytes(32)
sig = r.randbytes(64)
lib.ecdsa_sign_digest(curve.ptr, priv, digest, sig, c.c_void_p(0))
exp = bytes2num(priv)
sk = ecdsa.SigningKey.from_secret_exponent(exp, curve,
hashfunc=hashlib.sha256)
vk = sk.get_verifying_key()
sig_ref = sk.sign_digest_deterministic(digest, hashfunc=hashlib.sha256, sigencode=ecdsa.util.sigencode_string_canonize)
assert binascii.hexlify(sig) == binascii.hexlify(sig_ref)
assert vk.verify_digest(sig, digest, sigdecode)
def test_validate_pubkey(curve, r):
p = r.randpoint(curve)
assert lib.ecdsa_validate_pubkey(curve.ptr, to_POINT(p))
def test_validate_pubkey_direct(point):
assert lib.ecdsa_validate_pubkey(point.ptr, to_POINT(point.p))
|
JasonLee0524/trezor-crypto-master
|
test_curves.py
|
Python
|
mit
| 10,750
|
from library.frontend import Base
class Manager_Privacy(Base):
'''
该功能用于保存用户的机密数据,但该版本暂时不需要使用,故暂时不做展示
'''
def get(self, username, vault_password=None, force=False):
'''
获取用户privacy数据
:parm
username:用户名
vault_password:用户的vault密码
force:是否强制从mongo中获取数据
'''
redis_key = self.rediskey_prefix + username + ':privacy'
result = self.get_data(username, redis_key, self.privacy_mongocollect, force=force)
if vault_password is None :
return result
else :
vault_result = self.verify_vaultpassword(username, vault_password)
if not vault_result[0] :
return vault_result
if result[0] :
try :
data = result[1][0]
except :
return (True, {})
if not isinstance(data, dict) :
return (True, {})
try :
del data['username']
del data['add_time']
except :
pass
result = self.decryp_dict(username, vault_password, data, [])
if not result[0] :
return result
data_dict = result[1]
return (True, data_dict)
else :
self.logger.error('获取用户' + username + '的机密数据失败,原因:' + result[1])
return result
def save_vault(self, username):
'''
把privacy数据(加密后的)写入mongo中
:parm
username:用户名
'''
temp_rediskey = self.rediskey_prefix + username + ':privacy:temp'
result = self.redisclient.get(temp_rediskey, fmt='obj')
if not result[0] :
if not self.redisclient.haskey(self.temp_rediskey):
self.logger.warn('没有任何用户' + username + '的机密数据写入数据库【上级任务:更改用户的vault密码】')
return (True, '没有数据')
self.logger.error('用户' + username + '的机密数据写入数据库失败【上级任务:更改用户的vault密码】,读取缓存信息失败,原因:' + result[1])
return (False, '读取缓存失败' + result[1])
data = result[1]
if data == {}:
self.logger.warn('没有任何用户' + username + '的机密数据写入数据库【上级任务:更改用户的vault密码】')
return (True, '没有数据')
update_dict = {
'collect' : self.privacy_mongocollect,
'data' : data
}
result = self.mongoclient.update({'username' : username}, update_dict, addtime=True)
if result[0] :
self.logger.info('用户' + username + '的机密数据写入数据库成功【上级任务:更改用户的vault密码】')
else :
self.logger.error('用户' + username + '的机密数据写入数据库失败【上级任务:更改用户的vault密码】,原因:' + result[1])
return result
def save(self, username, data, vault_password):
'''
把privacy数据(明文)加密后写入mongo中
:parm
username:用户名
data:加密前的privacy数据字典
vault_password:vault密码
'''
vault_result = self.verify_vaultpassword(username, vault_password)
if not vault_result[0] :
return vault_result
if not isinstance(data, dict) :
self.logger.error('用户' + username + '的机密数据写入数据库失败【来源于web页面更改机密数据】,原因:数据格式不正确,必须是字典格式')
return (False, '数据格式不正确,必须是字典格式')
result = self.encryp_dict(username, vault_password, data, [])
if not result[0] :
self.logger.error('用户' + username + '的机密数据写入数据库失败【来源于web页面更改机密数据】,原因:' + result[1])
return result
vault_dict = result[1]
vault_dict['username'] = username
condition_dict = {'username':username}
update_dict = {
'collect' : self.privacy_mongocollect,
'data' : vault_dict
}
result = self.mongoclient.update(condition_dict, update_dict, addtime=True)
redis_key = self.rediskey_prefix + username + ':privacy'
self.get_data(username, redis_key, self.privacy_mongocollect, force=True)
if result[0] :
self.logger.info('用户' + username + '的机密数据写入数据库成功【上级任务:来源于web页面更改机密数据】')
else :
self.logger.error('用户' + username + '的机密数据写入数据库失败【上级任务:来源于web页面更改机密数据】,原因:' + result[1])
return result
def change_vault_password(self, username, old_pwd, new_pwd):
'''
修改用户的vault密码,对privacy数据进行更换密码
:parm
username:用户名
old_pwd:旧vault密码
new_pwd:新vault密码
'''
result = self.mongoclient.find(self.privacy_mongocollect, condition_dict={'username' : username})
if not result[0] :
self.logger.error('用户' + username + '的机密数据更改vault密码失败,原因:查询数据错误')
return (False, '查询数据错误')
try :
vault_dict = result[1][0]
except :
return (True, {})
if not isinstance(vault_dict, dict) :
self.logger.error('用户' + username + '的机密数据更改vault密码失败,原因:查询数据错误')
return (False, '查询数据错误')
try :
del vault_dict['username']
del vault_dict['add_time']
except :
pass
result = self.change_vltpwd_dict(username, old_pwd, new_pwd, vault_dict, [])
if not result[0] :
return result
new_vault_dict = result[1]
new_vault_dict['username'] = username
result = self.write_cache(self.rediskey_prefix + username + ':privacy:temp', new_vault_dict, expire=60 * 5)
redis_key = self.rediskey_prefix + username + ':privacy'
self.redisclient.delete(redis_key)
if result[0] :
self.logger.info('用户' + username + '的机密数据更改vault密码成功,并写入临时缓存中')
return (True, '写缓存成功')
else :
self.logger.error('用户' + username + '的机密数据更改vault密码成功,但写入临时缓存失败,原因:' + result[1])
return (False, '写缓存失败,' + result[1])
|
lykops/lykops
|
library/frontend/sysadmin/privacy.py
|
Python
|
apache-2.0
| 7,308
|
#!/bine/env python
#_*_ coding:utf-8 _*_
import signal
#define signal handler function
def myHnadler(signum,frame):
print"I received:",signum
#register signal.SIGTSTP's handler
signal.signal(signal.SIGTSTP, myHnadler)
signal.pause()
print "END of Signal Demo"
|
zhengjue/mytornado
|
process_sync/process_sigin.py
|
Python
|
gpl-3.0
| 268
|
#!/usr/bin/env python
import sys
import json
import urllib2
import argparse
from operator import itemgetter
from prettytable import PrettyTable
AWE_URL = 'https://awe.mg-rast.org'
MGP = {
'mgrast-prod-4.0.3': [
'qc_stats',
'adapter trim',
'preprocess',
'dereplication',
'screen',
'rna detection',
'rna clustering',
'rna sims blat',
'genecalling',
'aa filtering',
'aa clustering',
'aa sims blat',
'aa sims annotation',
'rna sims annotation',
'index sim seq',
'md5 abundance',
'lca abundance',
'source abundance',
'dark matter extraction',
'abundance cassandra load',
'done stage',
'notify job completion'
],
'inbox_action': [
'step 1',
'step 2',
'step 3'
],
'submission': [
'step 1'
],
'mgrast-submit-ebi': [
'step 1',
'step 2'
]
}
CGS = [
'mgrast_dbload',
'mgrast_single',
'mgrast_multi'
]
def max_pipeline():
lens = map(lambda x: len(x), MGP.values())
return max(lens)
def get_awe(url, token):
header = {'Accept': 'application/json', 'Authorization': 'mgrast '+token}
req = urllib2.Request(url, headers=header)
res = urllib2.urlopen(req)
obj = json.loads(res.read())
return obj['data']
def client_status(c):
if c['busy']:
return 'busy'
if c['online']:
return 'online'
if c['suspended']:
return 'suspended'
return 'unknown'
def job_error(e):
if e['apperror']:
parts = e['apperror'].split('\n')
trim = filter(lambda x: x.find('ERR') != -1, parts)
msg = "\n".join(trim)
if "".join(msg.split()) != "":
return msg
if e['worknotes']:
return e['worknotes']
if e['servernotes']:
return e['servernotes']
return 'unknown'
def main(args):
global AWE_URL
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='subcommands', help='sub-command help', dest='commands')
info_parser = subparsers.add_parser("info")
client_parser = subparsers.add_parser("client")
client_parser.add_argument("-a", "--awe_url", dest="awe_url", default=AWE_URL, help="AWE API url")
client_parser.add_argument("-t", "--token", dest="token", default=None, help="User token")
client_parser.add_argument("-c", "--clientgroup", dest="clientgroup", default=None, help="clientgroup to view")
client_parser.add_argument("-p", "--pipeline", dest="pipeline", default='mgrast-prod-4.0.3', help="pipeline to view")
pipeline_parser = subparsers.add_parser("pipeline")
pipeline_parser.add_argument("-a", "--awe_url", dest="awe_url", default=AWE_URL, help="AWE API url")
pipeline_parser.add_argument("-t", "--token", dest="token", default=None, help="User token")
pipeline_parser.add_argument("-p", "--pipeline", dest="pipeline", default='mgrast-prod-4.0.3', help="pipeline to view")
suspend_parser = subparsers.add_parser("suspend")
suspend_parser.add_argument("-a", "--awe_url", dest="awe_url", default=AWE_URL, help="AWE API url")
suspend_parser.add_argument("-t", "--token", dest="token", default=None, help="User token")
suspend_parser.add_argument("-p", "--pipeline", dest="pipeline", default='mgrast-prod-4.0.3', help="pipeline to view")
suspend_parser.add_argument("-s", "--stage", dest="stage", type=int, default=None, help="index of stage to view")
try:
args = parser.parse_args()
except Exception as e:
print "Error: %s"%(str(e))
parser.print_help()
return 1
if not args.commands:
print "No command provided"
parser.print_help()
return 1
if args.commands == "info":
ptp = PrettyTable()
ptp.add_column('task #', range(max_pipeline()))
for k, v in MGP.iteritems():
ptp.add_column("pipeline: "+k, v)
ptp.align = "l"
print ptp
ptc = PrettyTable()
ptc.add_column("clientgroups", CGS)
ptc.align = "l"
print ptc
return 0
if not args.token:
print "Missing required --token"
parser.print_help()
return 1
AWE_URL = args.awe_url
stages = MGP[args.pipeline]
if args.commands == "client":
if args.clientgroup not in CGS:
print "Invalid clientgroup"
parser.print_help()
return 1
clients = get_awe(AWE_URL+'/client?group='+args.clientgroup, args.token)
pt = PrettyTable(["name", "host", "status", "job", "stage"])
seen = set()
for i, s in enumerate(stages):
for c in clients:
if 'data' in c['current_work']:
for d in c['current_work']['data']:
parts = d.split('_')
if int(parts[1]) == i:
pt.add_row([c['name'], c['host_ip'], client_status(c), parts[0], s])
seen.add(c['name'])
for c in clients:
if c['name'] not in seen:
pt.add_row([c['name'], c['host_ip'], client_status(c), "", ""])
pt.align = "l"
print pt
if args.commands == "pipeline":
clients = get_awe(AWE_URL+'/client', args.token)
pt = PrettyTable(["task #", "stage name"]+CGS)
for i, s in enumerate(stages):
num = 0
row = [i, s]+[0 for _ in range(len(CGS))]
for c in clients:
if (c['group'] in CGS) and ('data' in c['current_work']):
for d in c['current_work']['data']:
parts = d.split('_')
if int(parts[1]) == i:
row[CGS.index(c['group'])+2] += 1
pt.add_row(row)
pt.align = "l"
print pt
if args.commands == "suspend":
jobs = get_awe("%s/job?query&state=suspend&info.pipeline=%s&limit=0"%(AWE_URL, args.pipeline), args.token)
if args.stage == None:
pt = PrettyTable(["task #", "stage name", "suspended"])
for i, s in enumerate(stages):
num = 0
row = [i, s, 0]
for j in jobs:
if j['error'] and j['error']['taskfailed']:
parts = j['error']['taskfailed'].split('_')
if int(parts[1]) == i:
row[2] += 1
pt.add_row(row)
pt.align = "l"
print pt
else:
pt = PrettyTable(["id", "job name", "mg ID", "error"])
for j in jobs:
if j['error'] and j['error']['taskfailed']:
parts = j['error']['taskfailed'].split('_')
if int(parts[1]) == args.stage:
pt.add_row([j['id'], j['info']['name'], j['info']['userattr']['id'], job_error(j['error'])])
pt.align = "l"
print pt
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
teharrison/MG-RAST
|
src/MGRAST/bin/awe-debuger.py
|
Python
|
bsd-2-clause
| 7,160
|
import operator
from .plugin import SimStatePlugin
class SimStateCGC(SimStatePlugin):
"""
This state plugin keeps track of CGC state.
"""
#__slots__ = [ 'heap_location', 'max_str_symbolic_bytes' ]
def __init__(self):
SimStatePlugin.__init__(self)
self.allocation_base = 0xb8000000
self.time = 0
self.max_allocation = 0x10000000
# CGC error codes
self.EBADF = 1
self.EFAULT = 2
self.EINVAL = 3
self.ENOMEM = 4
self.ENOSYS = 5
self.EPIPE = 6
# other CGC constants
self.FD_SETSIZE = 1024
self.input_size = 0
self.input_strings = [ ]
self.output_strings = [ ]
self.sinkholes = set()
self.flag_bytes = None
def peek_input(self):
if len(self.input_strings) == 0: return None
return self.input_strings[0]
def discard_input(self, num_bytes):
if len(self.input_strings) == 0: return
self.input_strings[0] = self.input_strings[0][num_bytes:]
if self.input_strings[0] == '':
self.input_strings.pop(0)
def peek_output(self):
if len(self.output_strings) == 0: return None
return self.output_strings[0]
def discard_output(self, num_bytes):
if len(self.output_strings) == 0: return
self.output_strings[0] = self.output_strings[0][num_bytes:]
if self.output_strings[0] == '':
self.output_strings.pop(0)
def addr_invalid(self, a):
return not self.state.se.solution(a != 0, True)
def copy(self):
c = SimStateCGC()
c.allocation_base = self.allocation_base
c.time = self.time
c.input_strings = list(self.input_strings)
c.output_strings = list(self.output_strings)
c.input_size = self.input_size
c.sinkholes = set(self.sinkholes)
c.flag_bytes = self.flag_bytes
return c
def _combine(self, others):
merging_occured = False
new_allocation_base = max(o.allocation_base for o in others)
if self.state.se.symbolic(new_allocation_base):
raise ValueError("wat")
concrete_allocation_base = (
self.allocation_base
if type(self.allocation_base) in (int, long) else
self.state.se.eval(self.allocation_base)
)
concrete_new_allocation_base = (
new_allocation_base
if type(new_allocation_base) in (int, long) else
self.state.se.eval(new_allocation_base)
)
if concrete_allocation_base != concrete_new_allocation_base:
self.allocation_base = new_allocation_base
merging_occured = True
return merging_occured
def merge(self, others, merge_conditions, common_ancestor=None):
return self._combine(others)
def widen(self, others):
return self._combine(others)
### HEAP MANAGEMENT
def get_max_sinkhole(self, length):
"""
Find a sinkhole which is large enough to support `length` bytes.
This uses first-fit. The first sinkhole (ordered in descending order by their address)
which can hold `length` bytes is chosen. If there are more than `length` bytes in the
sinkhole, a new sinkhole is created representing the remaining bytes while the old
sinkhole is removed.
"""
ordered_sinks = sorted(list(self.sinkholes), key=operator.itemgetter(0), reverse=True)
max_pair = None
for addr, sz in ordered_sinks:
if sz >= length:
max_pair = (addr, sz)
break
if max_pair is None:
return None
remaining = max_pair[1] - length
max_addr = max_pair[0] + remaining
max_length = remaining
self.sinkholes.remove(max_pair)
if remaining:
self.sinkholes.add((max_pair[0], max_length))
return max_addr
def add_sinkhole(self, address, length):
"""
Add a sinkhole.
Allow the possibility for the program to reuse the memory represented by the
address length pair.
"""
self.sinkholes.add((address, length))
SimStatePlugin.register_default('cgc', SimStateCGC)
|
f-prettyland/angr
|
angr/state_plugins/cgc.py
|
Python
|
bsd-2-clause
| 4,270
|
#!/usr/bin/python
from setuptools import setup, find_packages
setup(
name='dddir',
version='0.1',
description='dddir - creates directories from a blueprint',
author='Jan Oelze',
author_email='hallo@janoelze.de',
packages=['dddir'],
entry_points={
'console_scripts': [
'dddir = dddir.__init__:main'
],
},
)
|
janoelze/dddir
|
setup.py
|
Python
|
mit
| 367
|
# -*- encoding: utf-8 -*-
from supriya.tools.ugentools.Filter import Filter
class LPZ2(Filter):
r'''A two zero fixed lowpass filter.
::
>>> source = ugentools.In.ar(bus=0)
>>> lpz_2 = ugentools.LPZ2.ar(
... source=source,
... )
>>> lpz_2
LPZ2.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Filter UGens'
__slots__ = ()
_ordered_input_names = (
'source',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
source=None,
):
Filter.__init__(
self,
calculation_rate=calculation_rate,
source=source,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
source=None,
):
r'''Constructs an audio-rate LPZ2.
::
>>> source = ugentools.In.ar(bus=0)
>>> lpz_2 = ugentools.LPZ2.ar(
... source=source,
... )
>>> lpz_2
LPZ2.ar()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
source=source,
)
return ugen
# def coeffs(): ...
@classmethod
def kr(
cls,
source=None,
):
r'''Constructs a control-rate LPZ2.
::
>>> source = ugentools.In.ar(bus=0)
>>> lpz_2 = ugentools.LPZ2.kr(
... source=source,
... )
>>> lpz_2
LPZ2.kr()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
source=source,
)
return ugen
# def magResponse(): ...
# def magResponse2(): ...
# def magResponse5(): ...
# def magResponseN(): ...
# def scopeResponse(): ...
### PUBLIC PROPERTIES ###
@property
def source(self):
r'''Gets `source` input of LPZ2.
::
>>> source = ugentools.In.ar(bus=0)
>>> lpz_2 = ugentools.LPZ2.ar(
... source=source,
... )
>>> lpz_2.source
OutputProxy(
source=In(
bus=0.0,
calculation_rate=CalculationRate.AUDIO,
channel_count=1
),
output_index=0
)
Returns ugen input.
'''
index = self._ordered_input_names.index('source')
return self._inputs[index]
|
andrewyoung1991/supriya
|
supriya/tools/ugentools/LPZ2.py
|
Python
|
mit
| 2,895
|
from Soft64 import *
from Soft64.MipsR4300 import *
from System import *
from NLog import *
logger = LogManager.GetLogger("TLB Python Script")
tlb = Machine.Current.DeviceCPU.Tlb
logger.Info("Creating fake entry 0")
entry = TLBEntry()
entry.VPN2 = VirtualPageNumber2(2, 0x345)
tlb.AddEntry(0, entry)
logger.Info("Creating fake entry 1")
entry = TLBEntry()
entry.VPN2 = VirtualPageNumber2(2, 0x645)
entry.PfnEven = PageFrameNumber(0x2000);
entry.PfnEven.IsValid = True;
tlb.AddEntry(1, entry)
logger.Info("Creating fake entry 2")
entry = TLBEntry()
entry.VPN2 = VirtualPageNumber2(2, 0x445)
entry.PfnOdd = PageFrameNumber(0x3000);
entry.PfnOdd.IsValid = True;
tlb.AddEntry(2, entry)
logger.Info("Done!")
|
bryanperris/Soft64-Bryan
|
Resources/BinaryFiles/Tests/Tests_TLB.py
|
Python
|
gpl-3.0
| 712
|
# Python - 2.7.6
def day_and_time(mins):
MAX_MINUTES = 7 * 24 * 60
mins = mins % MAX_MINUTES
if mins < 0:
mins += MAX_MINUTES
hrs, mins = mins // 60, mins % 60
weeks, hrs = hrs // 24, hrs % 24
w = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday'
}
return '{} {:0>2d}:{:0>2d}'.format(w[weeks], hrs, mins)
|
RevansChen/online-judge
|
Codewars/7kyu/after-midnight/Python/solution1.py
|
Python
|
mit
| 452
|
import textwrap
from unittest.mock import MagicMock
import pytest
from logstapo.actions import run_actions, Action, SMTPAction
from logstapo.config import ConfigError
def test_run_actions(mock_config):
logs_config = {
'both': {'actions': ['a']},
'one1': {'actions': ['a', 'b']},
'one2': {'actions': ['b']},
'none': {'actions': ['c']},
'nact': {'actions': []},
}
actions = {
'a': MagicMock(spec=Action),
'b': MagicMock(spec=Action),
'c': MagicMock(spec=Action)
}
mock_config({'logs': logs_config, 'actions': actions})
res = {
'both': (['x', 'y'], ['z']),
'one1': (['x', 'y'], []),
'one2': ([], ['z']),
'none': ([], []),
'nact': (['x', 'y'], ['z']),
}
run_actions(res)
actions['a'].run.assert_called_once_with({'both': res['both'],
'one1': res['one1']})
actions['b'].run.assert_called_once_with({'one1': res['one1'],
'one2': res['one2']})
assert not actions['c'].run.called
def test_action_from_config(mocker):
actions = mocker.patch('logstapo.actions.ACTIONS', {'smtp': MagicMock(spec=Action)})
Action.from_config('smtp', {'foo': 'bar'})
actions['smtp'].assert_called_once_with({'foo': 'bar'})
with pytest.raises(ConfigError):
Action.from_config('test', {})
def test_smtpaction_invalid():
with pytest.raises(ConfigError):
SMTPAction({})
with pytest.raises(ConfigError):
SMTPAction({'to': 'foo@bar.com', 'ssl': True, 'starttls': True})
with pytest.raises(ConfigError):
SMTPAction({'to': 'foo@bar.com', 'username': 'test'})
with pytest.raises(ConfigError):
SMTPAction({'to': 'foo@bar.com', 'password': 'test'})
@pytest.mark.parametrize(('recipients', 'expected'), (
('foo@bar.com', {'foo@bar.com'}),
(['foo@bar.com'], {'foo@bar.com'}),
(['test@example.com', 'foo@bar.com'], {'foo@bar.com', 'test@example.com'})
))
def test_smtpaction_recipients(recipients, expected):
action = SMTPAction({'to': recipients})
assert action.recipients == sorted(expected)
@pytest.mark.parametrize(('from_', 'expected'), (
(None, 'USER@DOMAIN'),
('foo', 'foo'),
))
def test_smtpaction_sender(mocker, from_, expected):
mocker.patch('getpass.getuser', lambda: 'USER')
mocker.patch('socket.getfqdn', lambda: 'DOMAIN')
action = SMTPAction({'to': 'foo@bar.com', 'from': from_})
assert action.sender == expected
@pytest.mark.parametrize('dry_run', (True, False))
@pytest.mark.parametrize('auth', (True, False))
@pytest.mark.parametrize(('ssl', 'starttls'), (
(False, False),
(False, True),
(True, False),
))
def test_smtpaction_run(mocker, mock_config, dry_run, auth, ssl, starttls):
mock_config({'debug': False, 'dry_run': dry_run})
mocker.patch('logstapo.actions.debug_echo')
mocker.patch('logstapo.actions.SMTPAction._build_msg', return_value='...')
smtplib = mocker.patch('logstapo.actions.smtplib', autospec=True)
# XXX: why doesn't autospec handle this?
smtplib.SMTP.__name__ = 'SMTP'
smtplib.SMTP_SSL.__name__ = 'SMTP_SSL'
data = {'host': 'somehost', 'port': 12345, 'ssl': ssl, 'starttls': starttls,
'from': 'sender@bar.com', 'to': 'foo@bar.com', 'subject': 'log stuff'}
if auth:
data.update({'username': 'user', 'password': 'pass'})
action = SMTPAction(data)
action.run({})
cls = smtplib.SMTP_SSL if ssl else smtplib.SMTP
if dry_run:
assert not cls.called
else:
cls.assert_called_once_with('somehost', 12345)
smtp = cls()
assert smtp.starttls.called == starttls
if auth:
smtp.login.assert_called_once_with('user', 'pass')
assert smtp.send_message.called
msg = smtp.send_message.call_args[0][0]
assert msg['To'] == 'foo@bar.com'
assert msg['From'] == 'sender@bar.com'
assert msg['Subject'] == 'log stuff'
assert msg.get_payload() == '...'
@pytest.mark.parametrize('group_by_source', (True, False))
def test_smtpaction_build_msg(group_by_source):
action = SMTPAction({'to': 'foo@bar.com', 'group': group_by_source})
data = {
'a': ([('a1', {'source': 'sa1'}),
('a2', {'source': 'sa2'}),
('a3', {'source': 'sa1'})],
['uA']),
'b': ([], ['uB']),
'c': ([('c1', {'source': 'sc'})], []),
'd': ([], [])
}
msg = action._build_msg(data)
if group_by_source:
assert msg == textwrap.dedent('''
Logstapo results for 'a'
=-=-=-=-=-=-=-=-=-=-=-=-
Unparsable lines
~~~~~~~~~~~~~~~~
uA
Unusual lines
-------------
a1
a3
a2
Logstapo results for 'b'
=-=-=-=-=-=-=-=-=-=-=-=-
Unparsable lines
~~~~~~~~~~~~~~~~
uB
Logstapo results for 'c'
=-=-=-=-=-=-=-=-=-=-=-=-
Unusual lines
-------------
c1
''').strip()
else:
assert msg == textwrap.dedent('''
Logstapo results for 'a'
=-=-=-=-=-=-=-=-=-=-=-=-
Unparsable lines
~~~~~~~~~~~~~~~~
uA
Unusual lines
-------------
a1
a2
a3
Logstapo results for 'b'
=-=-=-=-=-=-=-=-=-=-=-=-
Unparsable lines
~~~~~~~~~~~~~~~~
uB
Logstapo results for 'c'
=-=-=-=-=-=-=-=-=-=-=-=-
Unusual lines
-------------
c1
''').strip()
|
ThiefMaster/logstapo
|
tests/test_actions.py
|
Python
|
mit
| 5,800
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for describing url maps."""
from googlecloudsdk.compute.lib import base_classes
class Describe(base_classes.GlobalDescriber):
"""Describe a URL map."""
@staticmethod
def Args(parser):
base_classes.GlobalDescriber.Args(parser)
base_classes.AddFieldsFlag(parser, 'urlMaps')
@property
def service(self):
return self.compute.urlMaps
@property
def resource_type(self):
return 'urlMaps'
Describe.detailed_help = {
'brief': 'Describe a URL map',
'DESCRIPTION': """\
*{command}* displays all data associated with a URL map in a
project.
""",
}
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/url_maps/describe.py
|
Python
|
gpl-3.0
| 666
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# numcodecs documentation build configuration file, created by
# sphinx-quickstart on Mon May 2 21:40:09 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from mock import Mock as MagicMock
PY2 = sys.version_info[0] == 2
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['msgpack']
if PY2:
MOCK_MODULES.append('lzma')
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'numpydoc',
'sphinx_issues',
]
numpydoc_show_class_members = False
numpydoc_class_members_toctree = False
issues_github_path = 'zarr-developers/numcodecs'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'numcodecs'
copyright = '2016, Alistair Miles'
author = 'Alistair Miles'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import numcodecs
version = numcodecs.__version__
# The full version, including alpha/beta/rc tags.
release = numcodecs.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'numcodecs v@@'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'numcodecsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'numcodecs.tex', 'numcodecs Documentation',
'Alistair Miles', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'numcodecs', 'numcodecs Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'numcodecs', 'numcodecs Documentation',
author, 'numcodecs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
alimanfoo/numcodecs
|
docs/conf.py
|
Python
|
mit
| 10,126
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013-2015 Serv. Tecnol. Avanzados
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import geonames_import
from . import l10n_es_toponyms_wizard
|
Jortolsa/l10n-spain
|
l10n_es_toponyms/wizard/__init__.py
|
Python
|
agpl-3.0
| 1,118
|
#!/usr/bin/python3
import ctypes as ct
DUMMY_LIB_PATH = 'libdummy.so.0.1.0'
class Endec(object):
"""
create_unicode_buffer(aString) -> character array
create_unicode_buffer(anInteger) -> character array
create_unicode_buffer(aString, anInteger) -> character array
create_string_buffer(aString) -> character array
create_string_buffer(anInteger) -> character array
create_string_buffer(aString, anInteger) -> character array
"""
self._libdummy = ct.cdll.LoadLibrary(DUMMY_LIB_PATH)
def __init__(self):
self._libdummy.setCountdown(999)
def __del__(self):
self._libdummy.setCountdown(0)
def enable(self, in_buffer):
if not isinstance(in_buffer, str):
raise TypeError('string type required')
addr = ct.c_buffer(in_buffer.encode())
ret = ct.c_buffer(''.encode(), 1024)
length = ct.pointer(ct.c_uint(0))
self._libdummy.enable_slayer(addr, len(in_buffer), ret, length)
return (ret.value, length.contents)
def disable(self, in_buffer):
if not isinstance(in_buffer, bytes):
raise TypeError('bytes type required')
addr = ct.c_buffer(in_buffer)
ret = ct.c_buffer(''.encode(), 1024)
length = ct.pointer(ct.c_uint(0))
self._libdummy.disable_slayer(addr, len(in_buffer), ret, length)
return (ret.value, length.contents)
|
Zex/juicemachine
|
scripts/ts_lib_loader.py
|
Python
|
mit
| 1,443
|
import webapp2
from feedgen.feed import FeedGenerator
from datetime import datetime, tzinfo, timedelta
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import re
import difflib
from HTMLParser import HTMLParser
import uuid
#from pytz import utc
ZERO = timedelta(0)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.write('''
<html><head><title>Page2RSS</title></head>
<body style="padding: 15% 0%; text-align: center;">
<h2>Page to RSS</h2>
<br>
<form action="/feed" method=get>
URL: <input name=url> <input type=submit value="Get RSS Feed">
</form>
<br>
<h4>Warning: extremely poorly tested</h4>
<h5>Send bug reports and feature requests to
dspeyer@gmail.com</h5>
</body>
</html>
''')
class Page(ndb.Model):
last_scraped = ndb.DateTimeProperty()
class Scrape(ndb.Model):
content = ndb.TextProperty()
scraped_on = ndb.DateTimeProperty()
class Diff(ndb.Model):
title = ndb.TextProperty()
content = ndb.TextProperty()
diffed_on = ndb.DateTimeProperty()
guid = ndb.StringProperty()
def getlink(lis,key,base):
for (k,v) in lis:
if k==key:
if '//' in v:
return v
else:
return base+'/'+v
return None
class HtmlStripper(HTMLParser):
def __init__(self, base):
HTMLParser.__init__(self)
self.content = u''
self.inlink = False
self.silent = False
self.base = base
def handle_starttag(self, tag, attrs):
if tag=='a':
self.content += u'<a href="%s">'%getlink(attrs,'href',self.base)
self.inlink = True
if tag=='img':
self.content += u'<img src="%s">'%getlink(attrs,'src',self.base)
if not self.inlink:
self.content += u'\n'
if tag in ['script','style']:
self.silent = True
def handle_endtag(self, tag):
if tag=='a' and self.inlink:
self.content += u'</a>\n'
if tag in ['script','style']:
self.silent = False
def handle_data(self,data):
if not self.silent:
self.content += data
if not self.inlink:
self.content += '\n'
def fetch(url):
r = urlfetch.fetch(url)
if r.status_code != 200:
return False
try:
content = r.content.decode('ascii')
except UnicodeDecodeError:
try:
content = r.content.decode('utf-8')
except UnicodeDecodeError:
try:
content = r.content.decode('latin-1')
except UnicodeDecodeError:
content = re.sub(r'[^\x00-\x7F]+',' ', r.content)
stripper = HtmlStripper(base=url)
stripper.feed(content)
content = stripper.content
return content
def get_last_scrape(url):
page_key = ndb.Key('Page',url)
page = page_key.get()
if page:
last_scrape_key = ndb.Key('Page',url,'Scrape',str(page.last_scraped))
last_scrape = last_scrape_key.get()
if last_scrape:
return last_scrape
return None
def maybe_create_diff(url):
now = datetime.now()
last_scrape = get_last_scrape(url)
if last_scrape and now - last_scrape.scraped_on < timedelta(seconds=1):
return False
new_content = fetch(url)
if not new_content:
return False
if last_scrape and new_content == last_scrape.content:
return False
page_key = ndb.Key('Page',url)
page = page_key.get()
if not page:
page = Page(id=url)
page.last_scraped = now
page.put()
scrape = Scrape(id=str(now),parent=page_key)
scrape.content = new_content
scrape.scraped_on = now
scrape.put()
diff = Diff(parent=page_key)
diff.guid = uuid.uuid4().hex
if last_scrape:
diff.title = 'New content on %s between %s and %s'%(url,last_scrape.scraped_on,now)
diff.content = u'<h4>%s:</h4>'%diff.title
indiff=False
for line in difflib.ndiff(last_scrape.content.split('\n'),
new_content.split('\n')):
if line[0]=='+':
if not indiff:
diff.content += u'<div style="margin:1em; border: thin solid black; white-space:pre-line">'
diff.content += line[1:]
indiff=True
else:
if indiff:
diff.content += '</div>'
indiff=False
else:
diff.title = 'First Scrape of %s (on %s)'%(url,now)
diff.content = u'<h4>%s:</h4>'%diff.title
diff.content += u'<div style="white-space:pre-line">'
diff.content += new_content
diff.content += u'</div>'
diff.diffed_on = now
diff.put()
return True
class Feed(webapp2.RequestHandler):
def get(self):
url = self.request.get('url')
try:
maybe_create_diff(url)
except (urlfetch.InvalidURLError, urlfetch.DownloadError):
self.response.write('<h4>Error: "%s" is not a fetchable URL</h4>'%url)
if 'http' not in url:
self.response.write('Maybe prepend http:// or https://?')
return
page_key = ndb.Key('Page',url)
diffs = Diff.query(ancestor=page_key).order(-Diff.diffed_on)
fg=FeedGenerator()
fg.title('Changes to %s' % url)
fg.link(href='http://page2rss-174917.appspot.com/feed?%s'%url)
fg.description('Changes to %s' % url)
n=0
for diff in diffs:
if not diff.guid:
diff.guid = uuid.uuid4().hex
diff.put()
if n<5:
fe = fg.add_entry()
fe.title(diff.title)
fe.link(href=url)
fe.pubdate(diff.diffed_on.replace(tzinfo=utc))
fe.content(u'<div>%s</div>'%diff.content, type='CDATA')
fe.guid(diff.guid)
else:
diff.key.delete()
n+=1
self.response.headers['Content-Type'] = 'application/rss+xml'
self.response.write(fg.rss_str(pretty=True))
app = webapp2.WSGIApplication([
('/', MainPage),
('/feed',Feed)
], debug=True)
|
dspeyer/page2rss
|
main.py
|
Python
|
gpl-3.0
| 6,623
|
# coding: utf-8
# pylint: disable=too-many-lines
import inspect
import sys
from typing import TypeVar, Optional, Sequence, Iterable, List, Any
from owlmixin import util
from owlmixin.errors import RequiredError, UnknownPropertiesError, InvalidTypeError
from owlmixin.owlcollections import TDict, TIterator, TList
from owlmixin.owlenum import OwlEnum, OwlObjectEnum
from owlmixin.transformers import (
DictTransformer,
JsonTransformer,
YamlTransformer,
ValueTransformer,
traverse_dict,
TOption,
)
T = TypeVar("T", bound="OwlMixin")
def _is_generic(type_):
return hasattr(type_, "__origin__")
def assert_extra(cls_properties, arg_dict, cls):
extra_keys: set = set(arg_dict.keys()) - {n for n, t in cls_properties}
if extra_keys:
raise UnknownPropertiesError(cls=cls, props=sorted(extra_keys))
def assert_none(value, type_, cls, name):
if value is None:
raise RequiredError(cls=cls, prop=name, type_=type_)
def assert_types(value, types: tuple, cls, name):
if not isinstance(value, types):
raise InvalidTypeError(cls=cls, prop=name, value=value, expected=types, actual=type(value))
def traverse(
type_, name, value, cls, force_snake_case: bool, force_cast: bool, restrict: bool
) -> Any:
# pylint: disable=too-many-return-statements,too-many-branches,too-many-arguments
if isinstance(type_, str):
type_ = sys.modules[cls.__module__].__dict__.get(type_)
if hasattr(type_, "__forward_arg__"):
# `_ForwardRef` (3.6) or `ForwardRef` (>= 3.7) includes __forward_arg__
# PEP 563 -- Postponed Evaluation of Annotations
type_ = sys.modules[cls.__module__].__dict__.get(type_.__forward_arg__)
if not _is_generic(type_):
assert_none(value, type_, cls, name)
if type_ is any:
return value
if type_ is Any:
return value
if isinstance(value, type_):
return value
if issubclass(type_, OwlMixin):
assert_types(value, (type_, dict), cls, name)
return type_.from_dict(
value, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
if issubclass(type_, ValueTransformer):
return type_.from_value(value)
if force_cast:
return type_(value)
assert_types(value, (type_,), cls, name)
return value
o_type = type_.__origin__
g_type = type_.__args__
if o_type == TList:
assert_none(value, type_, cls, name)
assert_types(value, (list,), cls, name)
return TList(
[
traverse(g_type[0], f"{name}.{i}", v, cls, force_snake_case, force_cast, restrict)
for i, v in enumerate(value)
]
)
if o_type == TIterator:
assert_none(value, type_, cls, name)
assert_types(value, (Iterable,), cls, name)
return TIterator(
traverse(g_type[0], f"{name}.{i}", v, cls, force_snake_case, force_cast, restrict)
for i, v in enumerate(value)
)
if o_type == TDict:
assert_none(value, type_, cls, name)
assert_types(value, (dict,), cls, name)
return TDict(
{
k: traverse(
g_type[0], f"{name}.{k}", v, cls, force_snake_case, force_cast, restrict
)
for k, v in value.items()
}
)
if o_type == TOption:
v = value.get() if isinstance(value, TOption) else value
# TODO: Fot `from_csvf`... need to more simple!!
if (isinstance(v, str) and v) or (not isinstance(v, str) and v is not None):
return TOption(
traverse(g_type[0], name, v, cls, force_snake_case, force_cast, restrict)
)
return TOption(None)
raise RuntimeError(f"This generics is not supported `{o_type}`")
class OwlMeta(type):
def __new__(cls, name, bases, class_dict):
ret_cls = type.__new__(cls, name, bases, class_dict)
ret_cls.__methods_dict__ = dict(inspect.getmembers(ret_cls, inspect.ismethod))
return ret_cls
class OwlMixin(DictTransformer, JsonTransformer, YamlTransformer, metaclass=OwlMeta):
@classmethod
def from_dict(
cls,
d: dict,
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> T:
"""From dict to instance
:param d: Dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human, Food, Japanese
>>> human: Human = Human.from_dict({
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... })
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].name
'Apple'
>>> human.favorites[0].names_by_lang.get()["de"]
'Apfel'
You can use default value
>>> taro: Japanese = Japanese.from_dict({
... "name": 'taro'
... }) # doctest: +NORMALIZE_WHITESPACE
>>> taro.name
'taro'
>>> taro.language
'japanese'
If you don't set `force_snake=False` explicitly, keys are transformed to snake case as following.
>>> human: Human = Human.from_dict({
... "--id": 1,
... "<name>": "Tom",
... "favorites": [
... {"name": "Apple", "namesByLang": {"en": "Apple"}}
... ]
... })
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].names_by_lang.get()["en"]
'Apple'
You can allow extra parameters (like ``hogehoge``) if you set `restrict=False`.
>>> apple: Food = Food.from_dict({
... "name": "Apple",
... "hogehoge": "ooooooooooooooooooooo",
... }, restrict=False)
>>> apple.to_dict()
{'name': 'Apple'}
You can prohibit extra parameters (like ``hogehoge``) if you set `restrict=True` (which is default).
>>> human = Human.from_dict({
... "id": 1,
... "name": "Tom",
... "hogehoge1": "ooooooooooooooooooooo",
... "hogehoge2": ["aaaaaaaaaaaaaaaaaa", "iiiiiiiiiiiiiiiii"],
... "favorites": [
... {"name": "Apple", "namesByLang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... }) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
owlmixin.errors.UnknownPropertiesError:
. ∧,,_∧ ,___________________
⊂ ( ・ω・ )つ- < Unknown properties error
/// /::/ `-------------------
|::|/⊂ヽノ|::|」
/ ̄ ̄旦 ̄ ̄ ̄/|
______/ | |
|------ー----ー|/
<BLANKLINE>
`owlmixin.samples.Human` has unknown properties ['hogehoge1', 'hogehoge2']!!
<BLANKLINE>
* If you want to allow unknown properties, set `restrict=False`
* If you want to disallow unknown properties, add `hogehoge1` and `hogehoge2` to owlmixin.samples.Human
<BLANKLINE>
If you specify wrong type...
>>> human: Human = Human.from_dict({
... "id": 1,
... "name": "ichiro",
... "favorites": ["apple", "orange"]
... }) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
owlmixin.errors.InvalidTypeError:
. ∧,,_∧ ,___________________
⊂ ( ・ω・ )つ- < Invalid Type error
/// /::/ `-------------------
|::|/⊂ヽノ|::|」
/ ̄ ̄旦 ̄ ̄ ̄/|
______/ | |
|------ー----ー|/
<BLANKLINE>
`owlmixin.samples.Human#favorites.0 = apple` doesn't match expected types.
Expected type is one of ["<class 'owlmixin.samples.Food'>", "<class 'dict'>"], but actual type is `<class 'str'>`
<BLANKLINE>
* If you want to force cast, set `force_cast=True`
* If you don't want to force cast, specify value which has correct type
<BLANKLINE>
If you don't specify required params... (ex. name
>>> human: Human = Human.from_dict({
... "id": 1
... }) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
owlmixin.errors.RequiredError:
. ∧,,_∧ ,___________________
⊂ ( ・ω・ )つ- < Required error
/// /::/ `-------------------
|::|/⊂ヽノ|::|」
/ ̄ ̄旦 ̄ ̄ ̄/|
______/ | |
|------ー----ー|/
<BLANKLINE>
`owlmixin.samples.Human#name: <class 'str'>` is empty!!
<BLANKLINE>
* If `name` is certainly required, specify anything.
* If `name` is optional, change type from `<class 'str'>` to `TOption[<class 'str'>]`
<BLANKLINE>
"""
if isinstance(d, cls):
return d
instance: T = cls() # type: ignore
d = util.replace_keys(d, {"self": "_self"}, force_snake_case)
properties = cls.__annotations__.items()
if restrict:
assert_extra(properties, d, cls)
for n, t in properties:
f = cls.__methods_dict__.get(f"_{cls.__name__}___{n}") # type: ignore
arg_v = f(d.get(n)) if f else d.get(n)
def_v = getattr(instance, n, None)
setattr(
instance,
n,
traverse(
type_=t,
name=n,
value=def_v if arg_v is None else arg_v,
cls=cls,
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
),
)
return instance
@classmethod
def from_optional_dict(
cls,
d: Optional[dict],
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> TOption[T]:
"""From dict to optional instance.
:param d: Dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human
>>> Human.from_optional_dict(None).is_none()
True
>>> Human.from_optional_dict({}).get() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
owlmixin.errors.RequiredError:
. ∧,,_∧ ,___________________
⊂ ( ・ω・ )つ- < Required error
/// /::/ `-------------------
|::|/⊂ヽノ|::|」
/ ̄ ̄旦 ̄ ̄ ̄/|
______/ | |
|------ー----ー|/
<BLANKLINE>
`owlmixin.samples.Human#id: <class 'int'>` is empty!!
<BLANKLINE>
* If `id` is certainly required, specify anything.
* If `id` is optional, change type from `<class 'int'>` to `TOption[<class 'int'>]`
<BLANKLINE>
"""
return TOption(
cls.from_dict(
d, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
if d is not None
else None
)
@classmethod
def from_dicts(
cls,
ds: List[dict],
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> TList[T]:
"""From list of dict to list of instance
:param ds: List of dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
Usage:
>>> from owlmixin.samples import Human
>>> humans: TList[Human] = Human.from_dicts([
... {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]},
... {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]}
... ])
>>> humans[0].name
'Tom'
>>> humans[1].name
'John'
"""
return TList(
[
cls.from_dict(
d, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
for d in ds
]
)
@classmethod
def from_iterable_dicts(
cls,
ds: Iterable[dict],
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> TIterator[T]:
"""From iterable dict to iterable instance
:param ds: Iterable dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Iterator
Usage:
>>> from owlmixin.samples import Human
>>> humans: TIterator[Human] = Human.from_iterable_dicts([
... {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]},
... {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]}
... ])
>>> humans.next_at(0).get().name
'Tom'
>>> humans.next_at(0).get().name
'John'
"""
return TIterator(
cls.from_dict(
d, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
for d in ds
)
@classmethod
def from_optional_dicts(
cls,
ds: Optional[List[dict]],
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> TOption[TList[T]]:
"""From list of dict to optional list of instance.
:param ds: List of dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
Usage:
>>> from owlmixin.samples import Human
>>> Human.from_optional_dicts(None).is_none()
True
>>> Human.from_optional_dicts([]).get()
[]
"""
return TOption(
cls.from_dicts(
ds, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
if ds is not None
else None
)
@classmethod
def from_optional_iterable_dicts(
cls,
ds: Optional[Iterable[dict]],
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> TOption[TIterator[T]]:
"""From iterable dict to optional iterable instance.
:param ds: Iterable dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Iterable instance
Usage:
>>> from owlmixin.samples import Human
>>> Human.from_optional_dicts(None).is_none()
True
>>> Human.from_optional_dicts([]).get()
[]
"""
return TOption(
cls.from_iterable_dicts(
ds, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
if ds is not None
else None
)
@classmethod
def from_dicts_by_key(
cls,
ds: dict,
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> TDict[T]:
"""From dict of dict to dict of instance
:param ds: Dict of dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Dict of instance
Usage:
>>> from owlmixin.samples import Human
>>> humans_by_name: TDict[Human] = Human.from_dicts_by_key({
... 'Tom': {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]},
... 'John': {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]}
... })
>>> humans_by_name['Tom'].name
'Tom'
>>> humans_by_name['John'].name
'John'
"""
return TDict(
{
k: cls.from_dict(
v, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
for k, v in ds.items()
}
)
@classmethod
def from_optional_dicts_by_key(
cls,
ds: Optional[dict],
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = True,
) -> TOption[TDict[T]]:
"""From dict of dict to optional dict of instance.
:param ds: Dict of dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Dict of instance
Usage:
>>> from owlmixin.samples import Human
>>> Human.from_optional_dicts_by_key(None).is_none()
True
>>> Human.from_optional_dicts_by_key({}).get()
{}
"""
return TOption(
cls.from_dicts_by_key(
ds, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
if ds is not None
else None
)
@classmethod
def from_json(
cls, data: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = False
) -> T:
"""From json string to instance
:param data: Json string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human
>>> human: Human = Human.from_json('''{
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... }''')
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].names_by_lang.get()["de"]
'Apfel'
"""
return cls.from_dict(
util.load_json(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_jsonf(
cls,
fpath: str,
encoding: str = "utf8",
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = False,
) -> T:
"""From json file path to instance
:param fpath: Json file path
:param encoding: Json file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
"""
return cls.from_dict(
util.load_jsonf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_json_to_list(
cls, data: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = False
) -> TList[T]:
"""From json string to list of instance
:param data: Json string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
Usage:
>>> from owlmixin.samples import Human
>>> humans: TList[Human] = Human.from_json_to_list('''[
... {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]},
... {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]}
... ]''')
>>> humans[0].name
'Tom'
>>> humans[1].name
'John'
"""
return cls.from_dicts(
util.load_json(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_json_to_iterator(
cls, data: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = False
) -> TIterator[T]:
"""From json string to iterable instance
:param data: Json string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Iterable instance
Usage:
>>> from owlmixin.samples import Human
>>> humans: TIterator[Human] = Human.from_json_to_iterator('''[
... {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]},
... {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]}
... ]''')
>>> humans.next_at(1).get().name
'John'
>>> humans.next_at(0).is_none()
True
"""
return cls.from_iterable_dicts(
util.load_json(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_jsonf_to_list(
cls,
fpath: str,
encoding: str = "utf8",
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = False,
) -> TList[T]:
"""From json file path to list of instance
:param fpath: Json file path
:param encoding: Json file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
"""
return cls.from_dicts(
util.load_jsonf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_jsonf_to_iterator(
cls,
fpath: str,
encoding: str = "utf8",
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = False,
) -> TIterator[T]:
"""From json file path to iterable instance
:param fpath: Json file path
:param encoding: Json file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Iterable instance
"""
return cls.from_iterable_dicts(
util.load_jsonf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_yaml(
cls, data: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = True
) -> T:
"""From yaml string to instance
:param data: Yaml string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human
>>> human: Human = Human.from_yaml('''
... id: 1
... name: Tom
... favorites:
... - name: Apple
... names_by_lang:
... en: Apple
... de: Apfel
... - name: Orange
... ''')
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].names_by_lang.get()["de"]
'Apfel'
"""
return cls.from_dict(
util.load_yaml(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_yamlf(
cls,
fpath: str,
encoding: str = "utf8",
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = True,
) -> T:
"""From yaml file path to instance
:param fpath: Yaml file path
:param encoding: Yaml file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
"""
return cls.from_dict(
util.load_yamlf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_yaml_to_list(
cls, data: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = True
) -> TList[T]:
"""From yaml string to list of instance
:param data: Yaml string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
Usage:
>>> from owlmixin.samples import Human
>>> humans: TList[Human] = Human.from_yaml_to_list('''
... - id: 1
... name: Tom
... favorites:
... - name: Apple
... - id: 2
... name: John
... favorites:
... - name: Orange
... ''')
>>> humans[0].name
'Tom'
>>> humans[1].name
'John'
>>> humans[0].favorites[0].name
'Apple'
"""
return cls.from_dicts(
util.load_yaml(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_yaml_to_iterator(
cls, data: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = True
) -> TIterator[T]:
"""From yaml string to iterable instance
:param data: Yaml string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Iterable instance
Usage:
>>> from owlmixin.samples import Human
>>> humans: TIterator[Human] = Human.from_yaml_to_iterator('''
... - id: 1
... name: Tom
... favorites:
... - name: Apple
... - id: 2
... name: John
... favorites:
... - name: Orange
... ''')
>>> human1 = humans.next_at(1).get()
>>> human1.name
'John'
>>> humans.next_at(0).is_none()
True
>>> human1.favorites[0].name
'Orange'
"""
return cls.from_iterable_dicts(
util.load_yaml(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_yamlf_to_list(
cls,
fpath: str,
encoding: str = "utf8",
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = True,
) -> TList[T]:
"""From yaml file path to list of instance
:param fpath: Yaml file path
:param encoding: Yaml file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
"""
return cls.from_dicts(
util.load_yamlf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_yamlf_to_iterator(
cls,
fpath: str,
encoding: str = "utf8",
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = True,
) -> TIterator[T]:
"""From yaml file path to iterable instance
:param fpath: Yaml file path
:param encoding: Yaml file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Iterable instance
"""
return cls.from_iterable_dicts(
util.load_yamlf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_csvf_to_list(
cls,
fpath: str,
fieldnames: Optional[Sequence[str]] = None,
encoding: str = "utf8",
*,
force_snake_case: bool = True,
restrict: bool = True,
) -> TList[T]:
"""From csv file path to list of instance
:param fpath: Csv file path
:param fieldnames: Specify csv header names if not included in the file
:param encoding: Csv file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param restrict: Prohibit extra parameters if True
:return: List of Instance
"""
return cls.from_dicts(
list(util.load_csvf(fpath, fieldnames, encoding)),
force_snake_case=force_snake_case,
force_cast=True,
restrict=restrict,
)
@classmethod
def from_csvf_to_iterator(
cls,
fpath: str,
fieldnames: Optional[Sequence[str]] = None,
encoding: str = "utf8",
*,
force_snake_case: bool = True,
restrict: bool = True,
) -> TIterator[T]:
"""From csv file path to iterable instance
:param fpath: Csv file path
:param fieldnames: Specify csv header names if not included in the file
:param encoding: Csv file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param restrict: Prohibit extra parameters if True
:return: Iterable Instance
"""
return cls.from_iterable_dicts(
util.load_csvf(fpath, fieldnames, encoding),
force_snake_case=force_snake_case,
force_cast=True,
restrict=restrict,
)
@classmethod
def from_json_url(
cls, url: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = False
) -> T:
"""From url which returns json to instance
:param url: Url which returns json
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
"""
return cls.from_dict(
util.load_json_url(url),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
|
tadashi-aikawa/owlmixin
|
owlmixin/__init__.py
|
Python
|
mit
| 34,064
|
__author__ = 'joseph'
import sys
import os
sys.path.insert(0,os.path.abspath('../src'))
sys.path.insert(0,os.path.abspath('../src/ChannelDebug.py'))
sys.path.insert(0,os.path.abspath('./ChannelDebugTest.py'))
#print "\n".join(sys.path)
from ChannelDebugTest import ChannelDebugTest
import unittest
if __name__ == '__main__':
unittest.main()
|
debugchannel/debugchannel-python-client
|
test/test.py
|
Python
|
mit
| 350
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for iond node under test"""
import decimal
import errno
import http.client
import json
import logging
import os
import re
import subprocess
import time
from .authproxy import JSONRPCException
from .util import (
assert_equal,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 600
class TestNode():
"""A class for representing a iond node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir, use_cli=False):
self.index = i
self.datadir = os.path.join(dirname, "node" + str(i))
self.rpchost = rpchost
if timewait:
self.rpc_timeout = timewait
else:
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 600
if binary is None:
self.binary = os.getenv("BITCOIND", "iond")
else:
self.binary = binary
self.stderr = stderr
self.coverage_dir = coverage_dir
# Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly.
self.extra_args = extra_args
self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]
self.cli = TestNodeCLI(os.getenv("BITCOINCLI", "ion-cli"), self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print("Cleaning up leftover process")
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
return getattr(self.rpc, name)
def start(self, extra_args=None, stderr=None, *args, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
self.process = subprocess.Popen(self.args + extra_args, stderr=stderr, *args, **kwargs)
self.running = True
self.log.debug("iond started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the iond process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
time.sleep(5)
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "iond exited with status %i during initialization" % self.process.returncode
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
while self.rpc.getblockcount() < 0:
time.sleep(1)
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
raise AssertionError("Unable to connect to iond")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected
assert self.rpc
wallet_path = "wallet/%s" % wallet_name
return self.rpc / wallet_path
def stop_node(self):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
time.sleep(20)
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert_equal(return_code, 0)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes iond to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *args, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(*args, **kwargs)
self.p2ps.append(p2p_conn)
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, "No p2p connection"
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to ion-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with ion-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run ion-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same ion-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
|
cevap/ion
|
test/functional/test_framework/test_node.py
|
Python
|
mit
| 11,014
|
import json
import traceback
import unicodedata
import bel.nanopub.validate
import falcon
import structlog
log = structlog.getLogger(__name__)
class NanopubValidateResource(object):
"""Validate nanopubs"""
def on_post(self, req, resp):
# Validate nanopub only using cached assertions/annotations
# complete - fill in any missing assertion/annotation validations
# force - redo all validations
# cached - only return cached/pre-generated validations
validation_level = req.get_param('validation_level', default="complete")
# BEL Resources loading
try:
data = req.stream.read(req.content_length or 0)
data = data.decode(encoding="utf-8")
data = data.replace("\u00a0", " ") # get rid of non-breaking spaces
data = json.loads(data)
except ValueError as e:
raise falcon.HTTPUnprocessableEntity(
title="Cannot process payload",
description=f"Cannot process nanopub (maybe an encoding error? please use UTF-8 for JSON payload) error: {e}",
)
nanopub = {}
if "nanopub" in data:
nanopub["nanopub"] = data.get("nanopub")
else:
nanopub = None
error_level = data.get("error_level", "WARNING")
if nanopub:
try:
nanopub = bel.nanopub.validate.validate(nanopub, error_level=error_level, validation_level=validation_level)
resp.media = nanopub
resp.status = falcon.HTTP_200
except Exception as e:
log.error(traceback.print_exc())
raise falcon.HTTPUnprocessableEntity(
title="Cannot process nanopub", description=f"Cannot process nanopub: {e}"
)
else:
raise falcon.HTTPBadRequest(
title="Cannot process nanopub",
description=f"No nanopub in payload to process. Please check your submission.",
)
|
belbio/bel_api
|
app/resources/nanopubs.py
|
Python
|
apache-2.0
| 2,044
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
# TODO(pekowsk): Remove import cfg and below comment in Havana.
# This import should no longer be needed when the amqp_rpc_single_reply_queue
# option is removed.
from oslo.config import cfg
from manila.openstack.common import excutils
from manila.openstack.common.gettextutils import _
from manila.openstack.common import local
from manila.openstack.common import log as logging
from manila.openstack.common.rpc import common as rpc_common
# TODO(pekowski): Remove this option in Havana.
amqp_opts = [
cfg.BoolOpt('amqp_rpc_single_reply_queue',
default=False,
help='Enable a fast single reply queue if using AMQP based '
'RPC like RabbitMQ or Qpid.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the teatDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the caller of
create_connection(). This is essentially a wrapper around
Connection that supports 'with'. It can also return a new
Connection, or one from a pool. The function will also catch
when an instance of this class is to be deleted. With that
we can return Connections to the pool on exceptions and so
forth without making the caller be responsible for catching
them. If possible the function makes sure to return a
connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool"""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self"""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance"""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
""" Connection class for RPC replies / callbacks """
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('no calling threads waiting for msg_id : %s'
', message : %s') % (msg_id, message_data))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
try:
msg = {'result': reply, 'failure': failure}
except TypeError:
msg = {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call"""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager used by
the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback to allow it to be invoked in a green
thread.
"""
def __init__(self, conf, callback, connection_pool):
"""
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
def __call__(self, message_data):
self.pool.spawn_n(self.callback, message_data)
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version')
namespace = message_data.get('namespace')
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method,
namespace, args)
def _process_data(self, ctxt, version, method, namespace, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, namespace,
**args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending" flag"""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
#TODO(pekowski): Remove MulticallWaiter() in Havana.
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
self._connection = connection
self._iterator = connection.iterconsume(timeout=timeout or
conf.rpc_response_timeout)
self._result = None
self._done = False
self._got_ending = False
self._conf = conf
self.msg_id_cache = _MsgIdCache()
def done(self):
if self._done:
return
self._done = True
self._iterator.close()
self._iterator = None
self._connection.close()
def __call__(self, data):
"""The consume() callback will call this. Store the result."""
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
self._result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
self._result = data['result']
def __iter__(self):
"""Return a result until we get a 'None' response from consumer"""
if self._done:
raise StopIteration
while True:
try:
self._iterator.next()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
result = self._result
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection"""
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
# TODO(pekowski): Remove all these comments in Havana.
# For amqp_rpc_single_reply_queue = False,
# Can't use 'with' for multicall, as it returns an iterator
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
# For amqp_rpc_single_reply_queue = True,
# The 'with' statement is mandatory for closing the connection
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
# TODO(pekowski): Remove this flag and the code under the if clause
# in Havana.
if not conf.amqp_rpc_single_reply_queue:
conn = ConnectionContext(conf, connection_pool)
wait_msg = MulticallWaiter(conf, conn, timeout)
conn.declare_direct_consumer(msg_id, wait_msg)
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
else:
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
msg = rpc_common.serialize_msg(msg)
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
return conf.control_exchange
|
tucbill/manila
|
manila/openstack/common/rpc/amqp.py
|
Python
|
apache-2.0
| 25,306
|
from django.contrib import admin
from simple_history.admin import SimpleHistoryAdmin
from .models import DocumentContent, Pessoa, HistoricalPessoa, Document
# Register your models here.
@admin.register(DocumentContent)
class DocumentContentAdmin(SimpleHistoryAdmin):
list_display = ['content', 'created_at', 'created_by']
# Register your models here.
@admin.register(Pessoa)
class PessoaAdmin(SimpleHistoryAdmin):
list_display = ['conteudo', 'user', 'contador', 'contador2']
@admin.register(HistoricalPessoa)
class HistorialPessoaAdmin(admin.ModelAdmin):
pass
class DocumentContentInline(admin.StackedInline):
model = DocumentContent
@admin.register(Document)
class Document(admin.ModelAdmin):
inlines = [DocumentContentInline]
|
luzfcb/documentos
|
src/core/admin.py
|
Python
|
mpl-2.0
| 757
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Program: Hourly Call Reports
# Description: Reports on # of inbound
# calls from each hour of the day. Then
# averages the hours for each day
# separately.
# Date: 2/2/15
# Author: Jeffrey Zic
import re
import fileinput
import sys
import datetime
from datetime import date, timedelta
import time
import os.path
import subprocess
import getpass
import Call_Detail_Record
class Call_Detail_Directory:
"""A collection of Call_Detail_Records"""
def __init__(self):
"""Initializes a Call_Detail_Directory.
:param cdd: a list of Call_Detail_Records
:type cdd: Call_Detail_Record
"""
self.call_detail_directory = []
def usernamePrompt(inputFunc):
"""Promt the user for a username
:return: username
:rtype: str
"""
username = inputFunc
return username
def passwordPrompt():
"""Prompt the user for a password
:return: password
:rtype str
"""
password = getpass.getpass("Password: ")
return password
def queryCudaTel(username,password,fname):
"""Query the CudaTel for Call Detail Records
:param username: name of the user
:param password: password for the user
:type username: str
:type password: str
:return: response
:rtype: file
"""
script = """
curl -H 'content-type: application/json' '192.168.0.199/gui/cdr/cdr?__auth_user={user}&__auth_pass={password}&sortby=end_timestamp""" +\
"""&sortorder=asc&show_outbound=0&rows=5&between=January+01+2015&between=January+01+2015&page=1' > '{fileName}'
"""
script = script.format(user=username,password=passwords,fileName=fname)
subprocess.call(['sh', '-c', script])
def getLogin(self,un=usernamePrompt(raw_input("Username: ")),pwd=passwordPrompt(),fname='/log/calls'):
"""Get login information for the Cudatel Communications Server
:return: login
:rtype: [string,string]
"""
state = False
while state == False:
username = un
password = pwd
queryCudaTel(username,password,fname)
with open(fname, 'r') as f:
line = f.readline().strip()
# Fail state if user info is incorrect
if line == '{"error":"FORBIDDEN"}':
print("Incorrect username/password.")
state = False
else:
state = True
return [username,passwords]
def get_calls(
self,
fname,
login,
page,
start_date=date.today() - timedelta(30),
end_date=date.today() - timedelta(0)
):
"""Gets call metadata from file.
get_calls is used for grabbing call metadata from files generated by the Barracuda Communications Server's
call reporting system by the REST API in a specified date-range
When grabbing the metadata as JSON data, the CCS will only return 10000 pieces of call metadata each time.
For queries that return more than 10000 pieces of metadata, you must increase the page number.
:param fname: name of call metadata file generated by the Barracuda Communications Server's
call reporting system
:param login: array of username and password for the CCS
:param page: page number of the call metadata to get
:param start_date: The first date you want to grab calls from.
:param end_date: The last date you want to grab calls from.
:type fname: String
:type login: [String,String]
:type page: int
:type start_date: Date
:type end_date: Date
:returns: [count,CDR_List]
:rtype: [int,[Call_Detail_Record]]
:Example:
count_calls("Sep0215")
"""
start_diff = '3'
fname = (date.today() - timedelta(23)).strftime('%b%d%y')
newCall = True
CDR_List = []
state = False
username = login[0]
password = login[1]
count=0
script = """
curl -H 'content-type: application/json' '192.168.0.199/gui/cdr/cdr?__auth_user={user}&__auth_pass={password}&sortby=end_timestamp""" +\
"""&sortorder=asc&show_outbound=0&rows=500000&between={first_date}&between={last_date}&page={page}' > './log/calls'
"""
script = script.format(diff=start_diff,first_date=start_date,last_date=end_date,user=username,password=password,page=page)
time.sleep(360) # You shouldn't poll the server more than every 5 minutes. We'll make it 6 to be sure
subprocess.call(['sh', '-c', script])
with open('./log/calls', 'r') as f:
for line in f:
# In the records, the parameters always appear in the same order so a new call is determined by
# seeing when it reaches the last parameter in a call, then the next one will be of a new call.
if newCall == True:
newCDR = Call_Detail_Record.Call_Detail_Record()
newCall = False
words = line.rstrip('\n').partition(':')
type = words[0].lstrip(' ').rstrip(' ')
data = words[-1].rstrip(',').rstrip(' ').lstrip(' ')
if type == '"end_timestamp"':
newCDR.end_timestamp = data
count+=1
elif type == '"direction"':
newCDR.direction = data
elif type == '"destination_name"':
newCDR.destination_name = data
elif type == '"hangup_cause"':
newCDR.hangup_cause = data
elif type == '"caller_id_name"':
newCDR.caller_id_name = data
elif type == '"destination_type"':
newCDR.destination_type = data
CDR_List.append(newCDR)
newCall = True
return [count,CDR_List]
def readCalls(self):
"""Read call metadata from file generated by the Barracuda Communications Server
:returns: CDR_List[]
:rtype: Call_Detail_Record
"""
with open('./log/calls', 'r') as f:
newCDR = Call_Detail_Record.Call_Detail_Record()
CDR_List = []
newCall = False
for line in f:
# In the records, the parameters always appear in the same order so a new call is determined by
# seeing when it reaches the last parameter in a call, then the next one will be of a new call.
if newCall == True:
newCDR = Call_Detail_Record.Call_Detail_Record()
newCall = False
words = line.rstrip('\n').partition(':')
type = words[0].lstrip(' ').rstrip(' ')
data = words[-1].rstrip(',').rstrip(' ').lstrip(' ')
if type == '"end_timestamp"':
newCDR.end_timestamp = data
elif type == '"direction"':
newCDR.direction = data
elif type == '"destination_name"':
newCDR.destination_name = data
elif type == '"hangup_cause"':
newCDR.hangup_cause = data
elif type == '"caller_id_name"':
newCDR.caller_id_name = data
elif type == '"destination_type"':
newCDR.destination_type = data
CDR_List.append(newCDR)
newCall = True
self.call_detail_directory = CDR_List
return CDR_List
|
Gixugif/CDRecording
|
Call_Detail_Directory.py
|
Python
|
gpl-3.0
| 7,758
|
"""
MagPy
Auxiliary input filter - WIC/WIK
Written by Roman Leonhardt June 2012
- contains test and read function, toDo: write function
"""
from magpy.stream import *
def isSFDMI(filename):
"""
Checks whether a file is spanish DMI format.
Time is in seconds relative to one day
"""
try:
temp = open(filename, 'rt').readline()
except:
return False
if len(temp) >= 9:
if temp[9] in ['o','+','-']: # Prevent errors with GFZ kp
return False
sp = temp.split()
if not len(sp) == 6:
return False
if not isNumber(sp[0]):
return False
#logging.info(" Found SFS file")
return True
def isSFGSM(filename):
"""
Checks whether a file is spanish GSM format.
Time is in seconds relative to one day
"""
try:
fh = open(filename, 'rt')
temp = fh.readline()
except:
return False
sp = temp.split()
if len(sp) != 2:
return False
if not isNumber(sp[0]):
return False
try:
if not 20000 < float(sp[1]) < 80000:
return False
except:
return False
return True
def readSFDMI(filename, headonly=False, **kwargs):
"""
Reading SF DMI format data.
Looks like:
0.03 99.11 -29.76 26.14 22.05 30.31
5.04 98.76 -29.78 26.20 22.04 30.31
10.01 98.85 -29.76 26.04 22.04 30.31
15.15 98.63 -29.79 26.20 22.04 30.31
20.12 98.85 -29.78 26.11 22.04 30.31
first column are seconds of day
"""
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
getfile = True
fh = open(filename, 'rt')
# read file and split text into channels
stream = DataStream()
if stream.header is None:
headers = {}
else:
headers = stream.header
data = []
key = None
# get day from filename (platform independent)
splitpath = os.path.split(filename)
daystring = splitpath[1].split('.')
try:
day = datetime.strftime(datetime.strptime(daystring[0], "%d%m%Y"),"%Y-%m-%d")
except:
logging.warning("Wrong dateformat in Filename %s" % daystring[0])
fh.close()
return DataStream([], headers)
# Select only files within eventually defined time range
if starttime:
if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if endtime:
if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if getfile:
for line in fh:
if line.isspace():
# blank line
continue
else:
row = LineStruct()
elem = line.split()
if (len(elem) == 6):
row.time=date2num(datetime.strptime(day,"%Y-%m-%d"))+ float(elem[0])/86400
xval = float(elem[1])
yval = float(elem[2])
zval = float(elem[3])
row.x = xval
row.y = yval
row.z = zval
row.t1 = float(elem[4])
row.t2 = float(elem[5])
stream.add(row)
stream.header['col-x'] = 'x'
stream.header['col-y'] = 'y'
stream.header['col-z'] = 'z'
stream.header['col-t1'] = 'T1'
stream.header['col-t2'] = 'T2'
stream.header['unit-col-x'] = 'nT'
stream.header['unit-col-y'] = 'nT'
stream.header['unit-col-z'] = 'nT'
stream.header['unit-col-t1'] = 'deg C'
stream.header['unit-col-t2'] = 'deg C'
else:
headers = stream.header
stream =[]
fh.close()
return DataStream(stream, headers)
def readSFGSM(filename, headonly=False, **kwargs):
"""
Reading SF GSM format data.
Looks like:
22 42982.35
52 42982.43
82 42982.47
first column are seconds of day
"""
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
getfile = True
fh = open(filename, 'rt')
# read file and split text into channels
stream = DataStream()
if stream.header is None:
headers = {}
else:
headers = stream.header
data = []
key = None
# get day from filename (platform independent)
splitpath = os.path.split(filename)
daystring = splitpath[1].split('.')
try:
day = datetime.strftime(datetime.strptime(daystring[0], "%d%m%Y"),"%Y-%m-%d")
except:
logging.warning("Wrong dateformat in Filename %s" % daystring[0])
return []
# Select only files within eventually defined time range
if starttime:
if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if endtime:
if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if getfile:
for line in fh:
if line.isspace():
# blank line
continue
else:
row = LineStruct()
elem = line.split()
if (len(elem) == 2):
row.time=date2num(datetime.strptime(day,"%Y-%m-%d"))+ float(elem[0])/86400
row.f = float(elem[1])
stream.add(row)
stream.header['col-f'] = 'f'
stream.header['unit-col-f'] = 'nT'
else:
headers = stream.header
stream =[]
fh.close()
return DataStream(stream, headers)
|
hschovanec-usgs/magpy
|
magpy/lib/format_sfs.py
|
Python
|
gpl-3.0
| 5,996
|
from __future__ import division
import numpy as np
from tensorprob import Model, Parameter, Normal, Exponential, Mix2
def test_mix2_fit():
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1)
a = Parameter(lower=0)
f = Parameter(lower=0, upper=1)
X1 = Normal(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])
X2 = Exponential(a, bounds=[(-np.inf, 8), (10, np.inf)])
X12 = Mix2(f, X1, X2, bounds=[(6, 17), (18, 36)])
model.observed(X12)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
f: 0.3,
})
# Generate some data to fit
np.random.seed(42)
exp_data = np.random.exponential(10, 200000)
exp_data = exp_data[(exp_data < 8) | (10 < exp_data)]
# Include the data blinded by the Mix2 bounds as we use the len(norm_data)
norm_data = np.random.normal(19, 2, 100000)
norm_data = norm_data[
((6 < norm_data) & (norm_data < 17)) |
((18 < norm_data) & (norm_data < 21)) |
((22 < norm_data) & (norm_data < 36))
]
data = np.concatenate([exp_data, norm_data])
data = data[((6 < data) & (data < 17)) | ((18 < data) & (data < 36))]
result = model.fit(data)
assert result.success
assert abs(model.state[mu] - 19) < 5e-3
assert abs(model.state[sigma] - 2) < 5e-3
assert abs(model.state[a] - 0.1) < 5e-4
assert abs(model.state[f] - (len(norm_data)/len(data))) < 5e-4
def test_mix2_fit_with_mix2_input():
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1, upper=4)
a = Parameter(lower=0.06)
b = Parameter(lower=0)
f_1 = Parameter(lower=0, upper=1)
f_2 = Parameter(lower=0, upper=1)
X1 = Normal(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])
X2 = Exponential(a, bounds=[(-np.inf, 8), (10, 27), (31, np.inf)])
X12 = Mix2(f_1, X1, X2, bounds=[(6, 17), (18, 36)])
X3 = Exponential(b)
X123 = Mix2(f_2, X12, X3, bounds=[(6, 17), (18, 36)])
model.observed(X123)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
b: 0.04,
f_1: 0.3,
f_2: 0.4
})
# Generate some data to fit
np.random.seed(42)
exp_1_data = np.random.exponential(10, 200000)
exp_1_data = exp_1_data[
(6 < exp_1_data) &
((exp_1_data < 8) | (10 < exp_1_data)) &
((exp_1_data < 17) | (18 < exp_1_data)) &
((exp_1_data < 27) | (31 < exp_1_data)) &
(exp_1_data < 36)
]
exp_2_data = np.random.exponential(20, 200000)
exp_2_data = exp_2_data[
(6 < exp_2_data) &
((exp_2_data < 17) | (18 < exp_2_data)) &
(exp_2_data < 36)
]
# Include the data blinded by the Mix2 bounds as we use the len(norm_data)
norm_data = np.random.normal(19, 2, 100000)
norm_data = norm_data[
((6 < norm_data) & (norm_data < 17)) |
((18 < norm_data) & (norm_data < 21)) |
((22 < norm_data) & (norm_data < 36))
]
data = np.concatenate([exp_1_data, exp_2_data, norm_data])
data = data[((6 < data) & (data < 17)) | ((18 < data) & (data < 36))]
result = model.fit(data)
assert result.success
assert abs(model.state[mu] - 19) < 3e-2
assert abs(model.state[sigma] - 2) < 1e-3
assert abs(model.state[a] - 0.1) < 1e-3
assert abs(model.state[b] - 0.05) < 3e-4
assert abs(model.state[f_1] - (len(norm_data)/(len(exp_1_data)+len(norm_data)))) < 5e-3
assert abs(model.state[f_2] - ((len(exp_1_data)+len(norm_data))/len(data))) < 5e-4
|
ibab/tensorprob
|
tests/distributions/test_combinators.py
|
Python
|
mit
| 3,595
|
# yacon.models.hierarchy.py
import re, logging
from django.db import models
from django.template.defaultfilters import slugify
from treebeard.mp_tree import MP_Node
from yacon.models.common import Language, TimeTrackedModel, NodePermissionTypes
from yacon.models.pages import Page, MetaPage
from yacon.definitions import SLUG_LENGTH
logger = logging.getLogger(__name__)
MATCH_WORD = re.compile('^[-\w]*$')
# ============================================================================
# Exceptions
# ============================================================================
class BadSlug(Exception):
pass
# ============================================================================
# Site and Page Hierarchy Management
# ============================================================================
class BaseNode(MP_Node, TimeTrackedModel):
class Meta:
abstract = True
app_label = 'yacon'
def has_children(self):
return self.get_children_count() > 0
class Node(BaseNode):
"""A Site object represents a collection of page hiearchies and menus that
are represented as a series of trees. The Node object is a single node in
one of those trees.
Each node is identified by a slug with a series of slugs forming a path to
an individual Node. Slugs can be translated into multiple languages, thus
allowing for multiple paths identifying a single unique Node. A path
should never point to two different Nodes in a single Site but as this is
computationally expensive to enforce it is left to the developer to
ensure.
It is highly suggested that the factory methods be used to construct new
Node objects. There are two types of factory methods: those for creating
root objects are on the Site class, those for creating children are on the
Node class itself. Node objects are based on django-treebeard's MP_Node
object and any of those methods are available, but the factory methods
attempt to abstract some of the associated complexity and enforce rules
around formatting of slugs, etc.
"""
site = models.ForeignKey('yacon.Site')
permission = models.CharField(max_length=3,
choices=list(NodePermissionTypes), default=NodePermissionTypes.INHERIT)
class Meta:
app_label = 'yacon'
def __unicode__(self):
return 'Node(%s:%s)' % (self.name, self.slug)
# -----------------------------------------------------------------------
# Utility Methods
def validate_slug(self, slug, auto_fix=False):
"""Raises a BadSlug exception if "slug" is not valid."""
if len(slug) > SLUG_LENGTH:
if not auto_fix:
raise BadSlug('Max slug length is %d characters' % SLUG_LENGTH)
# strip the slug to max length
slug = slug[:SLUG_LENGTH]
if not MATCH_WORD.search(slug):
if not auto_fix:
raise BadSlug('Slug must be of form [0-9a-zA-Z_-]*')
# slugify the slug
slug = slugify(slug)
# find the slugs of all the children of this node as well as all the
# pages in the node
existing = NodeTranslation.objects.filter(
node__in=self.get_children()).values_list('slug', flat=True)
if slug in existing and not auto_fix:
raise BadSlug('A child node already has the given slug')
existing = list(existing)
metapages = MetaPage.objects.filter(node=self)
slugs = Page.objects.filter(metapage__in=metapages).values_list(
'slug', flat=True)
existing.extend(slugs)
aliased_metapages = MetaPage.objects.filter(alias__in=metapages)
slugs = Page.objects.filter(metapage__in=aliased_metapages).values_list(
'slug', flat=True)
existing.extend(slugs)
if slug in existing and not auto_fix:
raise BadSlug('A page in this node already has the given slug.')
if slug not in existing:
return slug
# attempt to auto_fix the slug
suffix = ''
i = 2
while(True):
new_slug = slug + suffix
if new_slug not in existing:
return new_slug
suffix = '-%d' % i
i += 1
# -----------------------------------------------------------------------
# Factory/Fetch Methods
def create_child(self, name, slug, translations={},
permission=NodePermissionTypes.INHERIT):
"""Creates a Node object as a child of this Node. Name and slug for
the default language are passed in. An optional dictionary of
Language objects mapped to name/slug tuples can be used to
populate other translations.
:param name: name of Node in default language
:param slug: slug for Node in default language
:param translations: dictionary mapping language codes to tuples of
name/slug pairs to be used to populate translations. Example:
{Language('en-GB'):('Colour','colour'), Language('fr'):('Couleur',
'couleur'), }
:returns: newly created child Node
:raises: BadSlug, if the slug contains any non-alpha-numeric
character, exceeds 25 characters in length, or exists at this
level already
"""
translations[self.site.default_language] = (name, slug)
# check for bad slugs
for key, value in translations.items():
(name, slug) = value
self.validate_slug(slug)
# no bad slugs, create the child node
child = self.add_child(site=self.site, permission=permission)
# add translations to child
for key, value in translations.items():
(name, slug) = value
tx = NodeTranslation(node=child, language=key, name=name, slug=slug)
tx.save()
return child
# -----------------------------------------------------------------------
# Getter Methods
@property
def name(self):
"""Returns the name for this Node in the Site's default translation"""
return self.get_name()
@property
def slug(self):
"""Returns the slug for this Node in the Site's default translation"""
return self.get_slug()
@property
def has_missing_translations(self):
"""Returns True if there are languages in the site that there are no
translations for in this node."""
return self.nodetranslation_set.count() != self.site.language_count()
@property
def default_metapage(self):
mps = self.metapage_set.filter(is_node_default=True)
if len(mps) == 0:
return None
return mps[0]
def get_default_page(self, language):
"""If this Node has an associated default MetaPage item return a
translated Page for it in the given Language.
:param language: Language object to use for getting the Page, can be
None to get the default language
:returns: Page representing the default page for this Node, or None if
there isn't one, or isn't one in the given language
"""
if self.default_metapage == None:
return None
if language == None:
# no language passed in, use default language
return self.default_metapage.get_default_translation()
return self.default_metapage.get_translation(language)
def get_name(self, language=None):
"""Returns the name for this Node in the given Language. If no
Language is passed in then the Site's default Language is used.
:param langauge: optional parameter specifying the Language to return
the Node's name in. If not given the Site's default Language is
used
:returns: string containing desired Node name
"""
if language == None:
language = self.site.default_language
try:
tx = NodeTranslation.objects.get(node=self, language=language)
return tx.name
except NodeTranslation.DoesNotExist:
return None
def get_slug(self, language=None):
"""Returns the slug for this Node in the given Language. If no
Language is passed in then the Site's default language is used.
:param langauge: optional parameter specifying the Language to return
the Node's slug in. If not given the Site's default Language is
used
:returns: string containing desired Node slug
"""
if language == None:
language = self.site.default_language
try:
tx = NodeTranslation.objects.get(node=self, language=language)
return tx.slug
except NodeTranslation.DoesNotExist:
return None
def has_slug(self, find_slug):
"""Returns true if one of the NodeTranslation for this Node contains
the given slug.
:param find_slug: slug to search for
:returns: True if find_slug matches one of the slug translations
"""
txs = NodeTranslation.objects.filter(node=self, slug=find_slug)
return len(txs) > 0
def language_of_slug(self, find_slug):
"""Returns the Language object in the NodeTranslation object that
contains the given slug
:param find_slug: slug to find the Language for
:returns: Language object
"""
tx = NodeTranslation.objects.get(node=self, slug=find_slug)
return tx.language
@property
def effective_permission(self):
"""Returns the effective permission value of this Node, one of either
the node's permission attribute, or if the attribute is INHERIT, it
returns the inherited value.
:returns: NodePermissionType Enum value
"""
if self.permission != NodePermissionTypes.INHERIT:
return self.permission
# our permission value is inherit, need to determine what we're
# inheriting
if self.is_root():
return NodePermissionTypes.PUBLIC
node = self.get_parent()
while(node):
if node.permission != NodePermissionTypes.INHERIT:
return node.permission
if node.is_root():
return NodePermissionTypes.PUBLIC
node = node.get_parent()
@property
def permission_string(self):
return NodePermissionTypes.get_value(self.permission)
@property
def effective_permission_string(self):
return NodePermissionTypes.get_value(self.effective_permission)
# -----------------------------------------------------------------------
# Tree Walking Methods
def _walk_tree_to_string(self, node, output, indent):
"""Breadth first walk of tree returning node as string
:param node -- node to walk
:param string -- string to append to before returning
:param output -- list of lines containing a string from each node
visited
:param indent -- how much to indent the displayed node
"""
output.append('%s%s (%s)' % (3*indent*' ', node.name, node.slug))
for child in node.get_children():
self._walk_tree_to_string(child, output, indent+1)
def tree_to_string(self):
"""Returns a string representation of the sub-tree using the 'self'
node as root"""
output = []
self._walk_tree_to_string(self, output, 0)
return "\n".join(output)
def node_to_path(self, language=None):
"""Returns a path string for this node
:param language: optional parameter specifying the Language to express
the path in. If none specified then Site object's default
Language is used
"""
if self.is_root():
return '/'
nodes = []
for node in self.get_ancestors():
nodes.append(node)
# get_ancestors() will include root which we don't use in paths and
# won't include us, so remove the first and add this node at the end
nodes.pop(0)
nodes.append(self)
path = '/'
for node in nodes:
slug = node.get_slug(language)
if slug == None:
return None
path += slug + '/'
return path
class NodeTranslation(TimeTrackedModel):
"""Stores translations of Node names and slugs according to Language
object.
"""
language = models.ForeignKey(Language, related_name='+')
name = models.CharField(max_length=30)
slug = models.CharField(max_length=SLUG_LENGTH)
node = models.ForeignKey(Node)
class Meta:
app_label = 'yacon'
def get_path(self):
return self.node.node_to_path(self.language)
# ============================================================================
# Menu Management
# ============================================================================
class MenuItem(BaseNode):
"""A MenuItem is a node in a hierarchy that is displayed to the user,
typically for navigation, that is independent of the document hierarchy.
A MenuItem can be "clickable" and associated with a Metapage, or not
"clickable" and simply a placeholder in the hierarchy.
A Metapage is only allowed to be mapped to a single MenuItem across all
menus. This allows for easy determination from the URL as to where in the
menu the user is. If you want the same content to show up in two
MenuItems then create a Metapage alias.
"""
metapage = models.OneToOneField('yacon.MetaPage', blank=True, null=True,
unique=True)
link = models.TextField(blank=True)
menu = models.ForeignKey('yacon.Menu')
requires_login = models.BooleanField(default=False)
requires_admin = models.BooleanField(default=False)
class Meta:
app_label = 'yacon'
def __unicode__(self):
return 'MenuItem(id=%s)' % self.id
def create_child(self, metapage=None, link='', translations={},
requires_login=False, requires_admin=False):
child = self.add_child(metapage=metapage, link=link, menu=self.menu,
requires_login=requires_login, requires_admin=requires_admin)
for key, value in translations.items():
MenuItemTranslation.objects.create(language=key, name=value,
menuitem=child)
return child
# --------------------
# Translation Access Methods
def get_translation(self, language):
"""Returns a MenuItemTranslation object for this MenuItem in the given
language.
:param language: Language for the corresponding translation
:returns: MenuItemTranslation object or None
"""
try:
return MenuItemTranslation.objects.get(menuitem=self,
language=language)
except MenuItemTranslation.DoesNotExist:
return None
def get_translations(self, ignore_default=False):
"""Returns a list of MenuItemTranslation objects representing the
for this MenuItem.
:param ignore_default: if True the default language will not be
returned in the list; defaults to False
:returns: list of MenuItemTranslation objects
"""
try:
txs = MenuItemTranslation.objects.filter(menuitem=self)
if ignore_default:
txs = txs.exclude(language=self.menu.site.default_language)
return txs
except MenuItemTranslation.DoesNotExist:
return []
def get_default_translation(self):
"""Returns the MenuItemTranslation object for the site default language
:returns: MenuItemTranslation object or None
"""
try:
return MenuItemTranslation.objects.get(menuitem=self,
language=self.menu.site.default_language)
except MenuItemTranslation.DoesNotExist:
return None
@property
def has_missing_translations(self):
"""Returns True if there are languages in the site that there are no
translations for in this menu."""
txs = self.menuitemtranslation_set.count()
langs = self.menu.site.language_count()
return txs != langs
@property
def can_move_out(self):
"""Returns True if this item is not at root depth."""
depth = self.get_depth()
return depth != 1
@property
def can_move_up(self):
"""Returns True if this item is not the first sibling."""
first = self.get_first_sibling()
return first != self
@property
def can_move_down(self):
"""Returns True if this item is not the last sibling."""
last = self.get_last_sibling()
return last != self
class Menu(TimeTrackedModel):
"""Represents a menu associated with a site. The name in this menu is for
configuration purposes only, the MenuItems contained within it (and their
translations) are what are displayed to the user.
"""
name = models.CharField(max_length=30)
site = models.ForeignKey('yacon.Site')
@property
def first_level(self):
return MenuItem.objects.filter(menu=self, depth=1)
def create_child(self, metapage=None, link='', translations={},
requires_login=False, requires_admin=False):
item = MenuItem.add_root(metapage=metapage, link=link, menu=self,
requires_login=requires_login, requires_admin=requires_admin)
for key, value in translations.items():
MenuItemTranslation.objects.create(language=key, name=value,
menuitem=item)
return item
class MenuItemTranslation(TimeTrackedModel):
"""Stores translations of MenuItem names according to Language object.
"""
language = models.ForeignKey(Language, related_name='+')
name = models.CharField(max_length=30)
menuitem = models.ForeignKey(MenuItem)
class Meta:
app_label = 'yacon'
def __unicode__(self):
return 'MenuItemTranslation: %s (%s)' % (self.name, self.language.code)
@property
def page(self):
"""Returns the Page that corresponds to this language for the MetaPage
that this item's menuitem points to. Essentially, where this item
points."""
if hasattr(self, '_cached_page'):
return self._cached_page
self._cached_page = self.menuitem.metapage.get_translation(
self.language)
return self._cached_page
|
cltrudeau/django-yacon
|
yacon/models/hierarchy.py
|
Python
|
mit
| 18,597
|
#!/Users/tony/Projects/zooplankton/repositories/python/gizehmoviepy/bin/python
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack = [item] + self.stack
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
top = self.stack[0]
self.stack = self.stack[1:]
return top
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.stack = [dup] + self.stack
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1> [<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower> <image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
filter = eval("ImageFilter." + self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(filter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset> <image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
Ibuprofen/gizehmoviepy
|
bin/pildriver.py
|
Python
|
mit
| 15,563
|
# -*- coding: utf-8 -*-
# [HARPIA PROJECT]
#
#
# S2i - Intelligent Industrial Systems
# DAS - Automation and Systems Department
# UFSC - Federal University of Santa Catarina
# Copyright: 2006 - 2007 Luis Carlos Dill Junges (lcdjunges@yahoo.com.br), Clovis Peruchi Scotti (scotti@ieee.org),
# Guilherme Augusto Rutzen (rutzen@das.ufsc.br), Mathias Erdtmann (erdtmann@gmail.com) and S2i (www.s2i.das.ufsc.br)
# 2007 - 2009 Clovis Peruchi Scotti (scotti@ieee.org), S2i (www.s2i.das.ufsc.br)
#
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further information, check the COPYING file distributed with this software.
#
#----------------------------------------------------------------------
from harpia.GladeWindow import GladeWindow
from harpia.amara import binderytools as bt
import gtk
from harpia.s2icommonproperties import S2iCommonProperties
#i18n
import os
import gettext
APP='harpia'
DIR=os.environ['HARPIA_DATA_DIR']+'po'
_ = gettext.gettext
gettext.bindtextdomain(APP, DIR)
gettext.textdomain(APP)
#----------------------------------------------------------------------
class Properties( GladeWindow, S2iCommonProperties ):
#----------------------------------------------------------------------
def __init__( self, PropertiesXML, S2iBlockProperties):
self.m_sDataDir = os.environ['HARPIA_DATA_DIR']
filename = self.m_sDataDir+'glade/rotate.glade'
self.m_oPropertiesXML = PropertiesXML
self.m_oS2iBlockProperties = S2iBlockProperties
widget_list = [
'Properties',
'isAtCenter',
'isAtPoint',
'isScalling',
'isFilling',
'xC',
'yC',
'propBackgroundColor',
'propBorderColor',
'propHelpView',
'prop_confirm'
]
handlers = [
'on_prop_cancel_clicked',
'on_prop_confirm_clicked',
'on_propBackColorButton_clicked',
'on_propBorderColorButton_clicked'
]
top_window = 'Properties'
GladeWindow.__init__(self, filename, top_window, widget_list, handlers)
self.widgets['Properties'].set_icon_from_file(self.m_sDataDir+"images/harpia_ave.png")
#load properties values
for Property in self.m_oPropertiesXML.properties.block.property:
if Property.name == "xC":
self.widgets['xC'].set_value( float(Property.value) );
if Property.name == "yC":
self.widgets['yC'].set_value( float(Property.value) );
if Property.name == "isFilling":
if Property.value == "true":
self.widgets['isFilling'].set_active( True );
else:
self.widgets['isFilling'].set_active( False );
if Property.name == "isScalling":
if Property.value == "true":
self.widgets['isScalling'].set_active( True );
else:
self.widgets['isScalling'].set_active( False );
if Property.name == "isCenter":
if Property.value == "true":
self.widgets['isAtCenter'].set_active( True );
else:
self.widgets['isAtPoint'].set_active( True );
#load border color
self.m_oBorderColor = self.m_oS2iBlockProperties.GetBorderColor()
t_nBorderRed = self.m_oBorderColor[0] * 257
t_nBorderGreen = self.m_oBorderColor[1] * 257
t_nBorderBlue = self.m_oBorderColor[2] * 257
t_oBorderColor = gtk.gdk.Color(red=t_nBorderRed,green=t_nBorderGreen,blue=t_nBorderBlue)
self.widgets['propBorderColor'].modify_bg(gtk.STATE_NORMAL,t_oBorderColor)
#load block color
self.m_oBackColor = self.m_oS2iBlockProperties.GetBackColor()
t_nBackRed = self.m_oBackColor[0] * 257
t_nBackGreen = self.m_oBackColor[1] * 257
t_nBackBlue = self.m_oBackColor[2] * 257
t_oBackColor = gtk.gdk.Color(red=t_nBackRed,green=t_nBackGreen,blue=t_nBackBlue)
self.widgets['propBackgroundColor'].modify_bg(gtk.STATE_NORMAL,t_oBackColor)
#load help text
t_oS2iHelp = bt.bind_file(self.m_sDataDir+"help/rotate"+ _("_en.help"))
t_oTextBuffer = gtk.TextBuffer()
t_oTextBuffer.set_text( unicode( str( t_oS2iHelp.help.content) ) )
self.widgets['propHelpView'].set_buffer( t_oTextBuffer )
#----------------------------------------------------------------------
def __del__(self):
pass
#----------------------------------------------------------------------
def on_prop_cancel_clicked( self, *args ):
self.widgets['Properties'].destroy()
#----------------------------------------------------------------------
def on_prop_confirm_clicked( self, *args ):
self.widgets['prop_confirm'].grab_focus()
for Property in self.m_oPropertiesXML.properties.block.property:
if Property.name == "xC":
Property.value = unicode(self.widgets['xC'].get_value())
if Property.name == "yC":
Property.value = unicode(self.widgets['yC'].get_value())
if Property.name == "isCenter":
if self.widgets['isAtCenter'].get_active():
Property.value = u"true"
else:
Property.value = u"false"
if Property.name == "isFilling":
if self.widgets['isFilling'].get_active():
Property.value = u"true"
else:
Property.value = u"false"
if Property.name == "isScalling":
if self.widgets['isScalling'].get_active():
Property.value = u"true"
else:
Property.value = u"false"
self.m_oS2iBlockProperties.SetPropertiesXML( self.m_oPropertiesXML )
self.m_oS2iBlockProperties.SetBorderColor( self.m_oBorderColor )
self.m_oS2iBlockProperties.SetBackColor( self.m_oBackColor )
self.widgets['Properties'].destroy()
#----------------------------------------------------------------------
def on_propBackColorButton_clicked(self,*args):
t_oColor = self.RunColorSelection()
if t_oColor <> None:
self.widgets['propBackgroundColor'].modify_bg(gtk.STATE_NORMAL,t_oColor)
self.m_oBackColor[0] = t_oColor.red / 257
self.m_oBackColor[1] = t_oColor.green / 257
self.m_oBackColor[2] = t_oColor.blue / 257
#----------------------------------------------------------------------
def on_propBorderColorButton_clicked(self,*args):
t_oColor = self.RunColorSelection()
if t_oColor <> None:
self.widgets['propBorderColor'].modify_bg(gtk.STATE_NORMAL,t_oColor)
self.m_oBorderColor[0] = t_oColor.red / 257
self.m_oBorderColor[1] = t_oColor.green / 257
self.m_oBorderColor[2] = t_oColor.blue / 257
#----------------------------------------------------------------------
#propProperties = Properties()()
#propProperties.show( center=0 )
|
erggo/Harpy
|
harpia/bpGUI/rotate.py
|
Python
|
gpl-3.0
| 7,466
|
#!/usr/bin/python
"""Test of check menu item output."""
from macaroon.playback import *
sequence = MacroSequence()
import utils
sequence.append(KeyComboAction("<Control>f"))
sequence.append(TypeAction("Application class"))
sequence.append(KeyComboAction("Return"))
sequence.append(KeyComboAction("Return"))
sequence.append(KeyComboAction("<Alt>p"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"1. Arrow to first check menu item - not checked",
["BRAILLE LINE: 'gtk3-demo-application application Application Class frame < > Hide Titlebar when maximized check menu item'",
" VISIBLE: '< > Hide Titlebar when maximized', cursor=1",
"SPEECH OUTPUT: 'Hide Titlebar when maximized check menu item not checked'"]))
sequence.append(KeyComboAction("Return"))
sequence.append(KeyComboAction("Escape"))
sequence.append(KeyComboAction("<Alt>p"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Arrow to first check menu item - checked",
["BRAILLE LINE: 'gtk3-demo-application application Application Class frame <x> Hide Titlebar when maximized check menu item'",
" VISIBLE: '<x> Hide Titlebar when maximized', cursor=1",
"SPEECH OUTPUT: 'Hide Titlebar when maximized check menu item checked'"]))
sequence.append(KeyComboAction("Return"))
sequence.append(KeyComboAction("Escape"))
sequence.append(KeyComboAction("<Alt>p"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Arrow to first check menu item - not checked",
["BRAILLE LINE: 'gtk3-demo-application application Application Class frame < > Hide Titlebar when maximized check menu item'",
" VISIBLE: '< > Hide Titlebar when maximized', cursor=1",
"SPEECH OUTPUT: 'Hide Titlebar when maximized check menu item not checked'"]))
sequence.append(KeyComboAction("Escape"))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
pvagner/orca
|
test/keystrokes/gtk3-demo/role_check_menu_item.py
|
Python
|
lgpl-2.1
| 2,208
|
import dataset
import os
# old_db_path = os.path.join("proyectos_de_ley", "leyes_sqlite3.db")
old_db_path = os.path.join("leyes_sqlite3.db")
new_db = dataset.connect("postgresql://proyectosdeley:PASSWORD@localhost:5432/pdl")
old_db = dataset.connect("sqlite:///" + old_db_path)
res = old_db.query("select * from pdl_proyecto")
table = new_db['pdl_proyecto']
table.insert_many(res)
res = old_db.query("select * from pdl_slug")
table = new_db['pdl_slug']
table.insert_many(res)
res = old_db.query("select * from pdl_seguimientos")
table = new_db['pdl_seguimientos']
table.insert_many(res)
|
proyectosdeley/proyectos_de_ley
|
migrate_db2postgres.py
|
Python
|
mit
| 597
|
#!/usr/bin/env python
import os
config = {
"default_actions": [
'clobber',
'checkout-sources',
'get-blobs',
'update-source-manifest',
'build',
'build-symbols',
'make-updates',
'prep-upload',
'upload',
# bug 1222227 - temporarily disable for S3 migration
# 'make-socorro-json',
# 'upload-source-manifest',
],
"upload": {
"default": {
"ssh_key": os.path.expanduser("~/.ssh/b2gbld_dsa"),
"ssh_user": "b2gbld",
"upload_remote_host": "pvtbuilds.pvt.build.mozilla.org",
"upload_remote_path": "/pvt/mozilla.org/b2gotoro/tinderbox-builds/%(branch)s-%(target)s/%(buildid)s",
"upload_remote_symlink": "/pvt/mozilla.org/b2gotoro/tinderbox-builds/%(branch)s-%(target)s/latest",
"upload_remote_nightly_path": "/pvt/mozilla.org/b2gotoro/nightly/%(branch)s-%(target)s/%(year)04i/%(month)02i/%(year)04i-%(month)02i-%(day)02i-%(hour)02i-%(minute)02i-%(second)02i",
"upload_remote_nightly_symlink": "/pvt/mozilla.org/b2gotoro/nightly/%(branch)s-%(target)s/latest",
"upload_dep_target_exclusions": [],
},
"public": {
"ssh_key": os.path.expanduser("~/.ssh/ffxbld_rsa"),
"ssh_user": "ffxbld",
"upload_remote_host": "upload.ffxbld.productdelivery.prod.mozaws.net",
"post_upload_cmd": "post_upload.py --tinderbox-builds-dir %(branch)s-%(target)s -p b2g -i %(buildid)s --revision %(revision)s --release-to-tinderbox-dated-builds",
"post_upload_nightly_cmd": "post_upload.py --tinderbox-builds-dir %(branch)s-%(target)s -b %(branch)s-%(target)s -p b2g -i %(buildid)s --revision %(revision)s --release-to-tinderbox-dated-builds --release-to-latest --release-to-dated",
},
},
"gittool_share_base": "/builds/git-shared/git",
"gittool_base_mirror_urls": [],
"vcs_share_base": "/builds/hg-shared",
"exes": {
"tooltool.py": "/tools/tooltool.py",
"python": "/tools/python27/bin/python2.7",
},
"manifest": {
"upload_remote_host": "stage.mozilla.org",
"upload_remote_basepath": "/pub/mozilla.org/b2g/manifests/nightly/%(version)s",
"depend_upload_remote_basepath": "/pub/mozilla.org/b2g/manifests/depend/%(branch)s/%(platform)s/%(buildid)s",
"ssh_key": os.path.expanduser("~/.ssh/b2gbld_dsa"),
"ssh_user": "b2gbld",
"branches": {
'mozilla-b2g37_v2_2': '2.2.0',
'mozilla-b2g44_v2_5': '2.5.0',
'mozilla-central': '2.6.0',
},
"translate_hg_to_git": True,
"translate_base_url": "http://cruncher.build.mozilla.org/mapper/{project}/{vcs}/{rev}",
"target_suffix": "-eng",
},
"env": {
"CCACHE_DIR": "/builds/ccache",
"CCACHE_COMPRESS": "1",
"CCACHE_UMASK": "002",
"GAIA_OPTIMIZE": "1",
"B2G_UPDATER": "1",
"B2G_SYSTEM_APPS": "1",
"WGET_OPTS": "-c -q",
"PATH": "/tools/python27/bin:%(PATH)s",
"B2G_UPDATE_CHANNEL": "default",
},
"clobberer_url": "https://api.pub.build.mozilla.org/clobberer/lastclobber",
"is_automation": True,
"variant": "eng",
"target_suffix": "-eng",
"repo_mirror_dir": "/builds/git-shared/repo",
"repo_remote_mappings": {
'https://android.googlesource.com/': 'https://git.mozilla.org/external/aosp',
'git://codeaurora.org/': 'https://git.mozilla.org/external/caf',
'https://git.mozilla.org/b2g': 'https://git.mozilla.org/b2g',
'git://github.com/mozilla-b2g/': 'https://git.mozilla.org/b2g',
'git://github.com/mozilla/': 'https://git.mozilla.org/b2g',
'https://git.mozilla.org/releases': 'https://git.mozilla.org/releases',
'http://android.git.linaro.org/git-ro/': 'https://git.mozilla.org/external/linaro',
'git://github.com/apitrace/': 'https://git.mozilla.org/external/apitrace',
},
}
|
cstipkovic/spidermonkey-research
|
testing/mozharness/configs/b2g/releng-otoro-eng.py
|
Python
|
mpl-2.0
| 3,997
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-07-28 14:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gestioneide', '0044_perfil'),
]
operations = [
migrations.AddField(
model_name='asistencia',
name='borrada',
field=models.BooleanField(default=False),
),
]
|
Etxea/gestioneide
|
gestioneide/migrations/0045_asistencia_borrada.py
|
Python
|
gpl-3.0
| 448
|
from pytradfri.const import ROOT_DEVICES
from pytradfri.gateway import Gateway
def test_get_device():
gateway = Gateway()
command = gateway.get_device(123)
assert command.method == 'get'
assert command.path == [ROOT_DEVICES, 123]
|
r41d/pytradfri
|
tests/test_gateway.py
|
Python
|
mit
| 249
|
"""Int textbox class."""
from invisible_ui.elements import Textbox
class IntTextbox(Textbox):
"""A text box that only allows integers."""
def __init__(self, parent, title, value="", hidden = False):
allowedChars = "1234567890"
super().__init__(parent, title, value=value, hidden=hidden, allowedChars=allowedChars)
|
chrisnorman7/invisible_ui
|
invisible_ui/elements/extras/intTextbox.py
|
Python
|
gpl-2.0
| 342
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteEnvironment
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2beta1_generated_Environments_DeleteEnvironment_sync]
from google.cloud import dialogflow_v2beta1
def sample_delete_environment():
# Create a client
client = dialogflow_v2beta1.EnvironmentsClient()
# Initialize request argument(s)
request = dialogflow_v2beta1.DeleteEnvironmentRequest(
name="name_value",
)
# Make the request
client.delete_environment(request=request)
# [END dialogflow_v2beta1_generated_Environments_DeleteEnvironment_sync]
|
googleapis/python-dialogflow
|
samples/generated_samples/dialogflow_v2beta1_generated_environments_delete_environment_sync.py
|
Python
|
apache-2.0
| 1,447
|
#
# Copyright Red Hat, Inc. 2014
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
#
'''
Unit tests for testing some bug.py magic
'''
import pickle
import sys
import unittest
from tests import StringIO
from bugzilla import RHBugzilla
from bugzilla.bug import _Bug
rhbz = RHBugzilla(cookiefile=None, tokenfile=None)
class BugTest(unittest.TestCase):
maxDiff = None
bz = rhbz
def testBasic(self):
data = {
"bug_id": 123456,
"status": "NEW",
"assigned_to": "foo@bar.com",
"component": "foo",
"product": "bar",
"short_desc": "some short desc",
"cf_fixed_in": "nope",
"fixed_in": "1.2.3.4",
"devel_whiteboard": "some status value",
}
bug = _Bug(bugzilla=self.bz, dict=data)
def _assert_bug():
self.assertEqual(hasattr(bug, "component"), True)
self.assertEqual(getattr(bug, "components"), ["foo"])
self.assertEqual(getattr(bug, "product"), "bar")
self.assertEqual(hasattr(bug, "short_desc"), True)
self.assertEqual(getattr(bug, "summary"), "some short desc")
self.assertEqual(bool(getattr(bug, "cf_fixed_in")), True)
self.assertEqual(getattr(bug, "fixed_in"), "1.2.3.4")
self.assertEqual(bool(getattr(bug, "cf_devel_whiteboard")), True)
self.assertEqual(getattr(bug, "devel_whiteboard"),
"some status value")
_assert_bug()
self.assertEqual(str(bug),
"#123456 NEW - foo@bar.com - some short desc")
self.assertTrue(repr(bug).startswith("<Bug #123456"))
# This triggers some code in __getattr__
dir(bug)
# Test special pickle support
if hasattr(sys.version_info, "major") and sys.version_info.major >= 3:
from io import BytesIO
fd = BytesIO()
else:
fd = StringIO()
pickle.dump(bug, fd)
fd.seek(0)
bug = pickle.load(fd)
self.assertEqual(getattr(bug, "bugzilla"), None)
bug.bugzilla = self.bz
_assert_bug()
def testBugNoID(self):
try:
_Bug(bugzilla=self.bz, dict={"component": "foo"})
raise AssertionError("Expected lack of ID failure.")
except TypeError:
pass
|
pombredanne/python-bugzilla
|
tests/bug.py
|
Python
|
gpl-2.0
| 2,445
|
TYPEMAP = {
"geo": "Location",
"cip": "Education",
"naics": "Industry",
"soc": "Occupation",
"story": "Story",
"map": "Map"
}
HOMEFEED = [
{
"link": "/story/04-04-2016_customStory/",
"featured": True
},
{
"link": "/story/04-04-2016_men-still-dominate/"
},
{
"link": "/story/04-04-2016_characteristics-power-occupations/",
"featured": True
},
{
"link": "/story/04-04-2016_are-we-having-fun-yet/",
"featured": False
},
{
"link": "/story/04-04-2016_dangerous-running-mates/",
"featured": False
},
]
|
tgarland1/datausa-site
|
datausa/general/home.py
|
Python
|
agpl-3.0
| 663
|
import csv
def fix_turnstile_data(filenames):
'''
Filenames is a list of MTA Subway turnstile text files. A link to an example
MTA Subway turnstile text file can be seen at the URL below:
http://web.mta.info/developers/data/nyct/turnstile/turnstile_110507.txt
As you can see, there are numerous data points included in each row of the
a MTA Subway turnstile text file.
You want to write a function that will update each row in the text
file so there is only one entry per row. A few examples below:
A002,R051,02-00-00,05-28-11,00:00:00,REGULAR,003178521,001100739
A002,R051,02-00-00,05-28-11,04:00:00,REGULAR,003178541,001100746
A002,R051,02-00-00,05-28-11,08:00:00,REGULAR,003178559,001100775
Write the updates to a different text file in the format of "updated_" + filename.
For example:
1) if you read in a text file called "turnstile_110521.txt"
2) you should write the updated data to "updated_turnstile_110521.txt"
The order of the fields should be preserved.
You can see a sample of the turnstile text file that's passed into this function
and the the corresponding updated file in the links below:
Sample input file:
https://www.dropbox.com/s/mpin5zv4hgrx244/turnstile_110528.txt
Sample updated file:
https://www.dropbox.com/s/074xbgio4c39b7h/solution_turnstile_110528.txt
'''
for name in filenames:
# your code here
with open(name) as rawData:
data = csv.reader(rawData)
with open('updated_'+name, 'wb') as updatedData:
newData = csv.writer(updatedData)
for row in data:
newData.writerows([row[:3]+row[x:x+5] for x in range(3, len(row),5)])
|
kwailamchan/programming-languages
|
python/data_science/NYC/wrangle05_fix_turnstile_data.py
|
Python
|
mit
| 1,781
|
# *- coding: utf-8 -*-
# mailbox.py
# Copyright (C) 2013-2015 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
IMAP Mailbox.
"""
import re
import logging
import os
import cStringIO
import StringIO
import time
from collections import defaultdict
from email.utils import formatdate
from twisted.internet import defer
from twisted.internet import reactor
from twisted.python import log
from twisted.mail import imap4
from zope.interface import implements
from leap.common.check import leap_assert
from leap.common.check import leap_assert_type
from leap.bitmask.mail.constants import INBOX_NAME, MessageFlags
from leap.bitmask.mail.imap.messages import IMAPMessage
logger = logging.getLogger(__name__)
# TODO LIST
# [ ] Restore profile_cmd instrumentation
# [ ] finish the implementation of IMailboxListener
# [ ] implement the rest of ISearchableMailbox
"""
If the environment variable `LEAP_SKIPNOTIFY` is set, we avoid
notifying clients of new messages. Use during stress tests.
"""
NOTIFY_NEW = not os.environ.get('LEAP_SKIPNOTIFY', False)
PROFILE_CMD = os.environ.get('LEAP_PROFILE_IMAPCMD', False)
if PROFILE_CMD:
def _debugProfiling(result, cmdname, start):
took = (time.time() - start) * 1000
log.msg("CMD " + cmdname + " TOOK: " + str(took) + " msec")
return result
def do_profile_cmd(d, name):
"""
Add the profiling debug to the passed callback.
:param d: deferred
:param name: name of the command
:type name: str
"""
d.addCallback(_debugProfiling, name, time.time())
d.addErrback(lambda f: log.msg(f.getTraceback()))
INIT_FLAGS = (MessageFlags.SEEN_FLAG, MessageFlags.ANSWERED_FLAG,
MessageFlags.FLAGGED_FLAG, MessageFlags.DELETED_FLAG,
MessageFlags.DRAFT_FLAG, MessageFlags.RECENT_FLAG,
MessageFlags.LIST_FLAG)
def make_collection_listener(mailbox):
"""
Wrap a mailbox in a class that can be hashed according to the mailbox name.
This means that dicts or sets will use this new equality rule, so we won't
collect multiple instances of the same mailbox in collections like the
MessageCollection set where we keep track of listeners.
"""
class HashableMailbox(object):
def __init__(self, mbox):
self.mbox = mbox
# See #8083, pixelated adaptor seems to be misusing this class.
self.mailbox_name = self.mbox.mbox_name
def __hash__(self):
return hash(self.mbox.mbox_name)
def __eq__(self, other):
return self.mbox.mbox_name == other.mbox.mbox_name
def notify_new(self):
self.mbox.notify_new()
return HashableMailbox(mailbox)
class IMAPMailbox(object):
"""
A Soledad-backed IMAP mailbox.
Implements the high-level method needed for the Mailbox interfaces.
The low-level database methods are contained in the generic
MessageCollection class. We receive an instance of it and it is made
accessible in the `collection` attribute.
"""
implements(
imap4.IMailbox,
imap4.IMailboxInfo,
imap4.ISearchableMailbox,
# XXX I think we do not need to implement CloseableMailbox, do we?
# We could remove ourselves from the collectionListener, although I
# think it simply will be garbage collected.
# imap4.ICloseableMailbox
imap4.IMessageCopier)
init_flags = INIT_FLAGS
CMD_MSG = "MESSAGES"
CMD_RECENT = "RECENT"
CMD_UIDNEXT = "UIDNEXT"
CMD_UIDVALIDITY = "UIDVALIDITY"
CMD_UNSEEN = "UNSEEN"
# TODO we should turn this into a datastructure with limited capacity
_listeners = defaultdict(set)
def __init__(self, collection, rw=1):
"""
:param collection: instance of MessageCollection
:type collection: MessageCollection
:param rw: read-and-write flag for this mailbox
:type rw: int
"""
self.rw = rw
self._uidvalidity = None
self.collection = collection
self.collection.addListener(make_collection_listener(self))
@property
def mbox_name(self):
return self.collection.mbox_name
@property
def listeners(self):
"""
Returns listeners for this mbox.
The server itself is a listener to the mailbox.
so we can notify it (and should!) after changes in flags
and number of messages.
:rtype: set
"""
return self._listeners[self.mbox_name]
def get_imap_message(self, message):
d = defer.Deferred()
IMAPMessage(message, store=self.collection.store, d=d)
return d
# FIXME this grows too crazily when many instances are fired, like
# during imaptest stress testing. Should have a queue of limited size
# instead.
def addListener(self, listener):
"""
Add a listener to the listeners queue.
The server adds itself as a listener when there is a SELECT,
so it can send EXIST commands.
:param listener: listener to add
:type listener: an object that implements IMailboxListener
"""
if not NOTIFY_NEW:
return
listeners = self.listeners
logger.debug('adding mailbox listener: %s. Total: %s' % (
listener, len(listeners)))
listeners.add(listener)
def removeListener(self, listener):
"""
Remove a listener from the listeners queue.
:param listener: listener to remove
:type listener: an object that implements IMailboxListener
"""
self.listeners.remove(listener)
def getFlags(self):
"""
Returns the flags defined for this mailbox.
:returns: tuple of flags for this mailbox
:rtype: tuple of str
"""
flags = self.collection.mbox_wrapper.flags
if not flags:
flags = self.init_flags
flags_str = map(str, flags)
return flags_str
def setFlags(self, flags):
"""
Sets flags for this mailbox.
:param flags: a tuple with the flags
:type flags: tuple of str
"""
# XXX this is setting (overriding) old flags.
# Better pass a mode flag
leap_assert(isinstance(flags, tuple),
"flags expected to be a tuple")
return self.collection.set_mbox_attr("flags", flags)
def getUIDValidity(self):
"""
Return the unique validity identifier for this mailbox.
:return: unique validity identifier
:rtype: int
"""
return self.collection.get_mbox_attr("created")
def getUID(self, message_number):
"""
Return the UID of a message in the mailbox
.. note:: this implementation does not make much sense RIGHT NOW,
but in the future will be useful to get absolute UIDs from
message sequence numbers.
:param message: the message sequence number.
:type message: int
:rtype: int
:return: the UID of the message.
"""
# TODO support relative sequences. The (imap) message should
# receive a sequence number attribute: a deferred is not expected
return message_number
def getUIDNext(self):
"""
Return the likely UID for the next message added to this
mailbox. Currently it returns the higher UID incremented by
one.
:return: deferred with int
:rtype: Deferred
"""
d = self.collection.get_uid_next()
return d
def getMessageCount(self):
"""
Returns the total count of messages in this mailbox.
:return: deferred with int
:rtype: Deferred
"""
return self.collection.count()
def getUnseenCount(self):
"""
Returns the number of messages with the 'Unseen' flag.
:return: count of messages flagged `unseen`
:rtype: int
"""
return self.collection.count_unseen()
def getRecentCount(self):
"""
Returns the number of messages with the 'Recent' flag.
:return: count of messages flagged `recent`
:rtype: int
"""
return self.collection.count_recent()
def isWriteable(self):
"""
Get the read/write status of the mailbox.
:return: 1 if mailbox is read-writeable, 0 otherwise.
:rtype: int
"""
# XXX We don't need to store it in the mbox doc, do we?
# return int(self.collection.get_mbox_attr('rw'))
return self.rw
def getHierarchicalDelimiter(self):
"""
Returns the character used to delimite hierarchies in mailboxes.
:rtype: str
"""
return '/'
def requestStatus(self, names):
"""
Handles a status request by gathering the output of the different
status commands.
:param names: a list of strings containing the status commands
:type names: iter
"""
r = {}
maybe = defer.maybeDeferred
if self.CMD_MSG in names:
r[self.CMD_MSG] = maybe(self.getMessageCount)
if self.CMD_RECENT in names:
r[self.CMD_RECENT] = maybe(self.getRecentCount)
if self.CMD_UIDNEXT in names:
r[self.CMD_UIDNEXT] = maybe(self.getUIDNext)
if self.CMD_UIDVALIDITY in names:
r[self.CMD_UIDVALIDITY] = maybe(self.getUIDValidity)
if self.CMD_UNSEEN in names:
r[self.CMD_UNSEEN] = maybe(self.getUnseenCount)
def as_a_dict(values):
return dict(zip(r.keys(), values))
d = defer.gatherResults(r.values())
d.addCallback(as_a_dict)
return d
def addMessage(self, message, flags, date=None, notify_just_mdoc=True):
"""
Adds a message to this mailbox.
:param message: the raw message
:type message: str
:param flags: flag list
:type flags: list of str
:param date: timestamp
:type date: str, or None
:param notify_just_mdoc:
boolean passed to the wrapper.create method, to indicate whether
we're insterested in being notified right after the mdoc has been
written (as it's the first doc to be written, and quite small, this
is faster, though potentially unsafe).
Setting it to True improves a *lot* the responsiveness of the
APPENDS: we just need to be notified when the mdoc is saved, and
let's just expect that the other parts are doing just fine. This
will not catch any errors when the inserts of the other parts
fail, but on the other hand allows us to return very quickly,
which seems a good compromise given that we have to serialize the
appends.
However, some operations like the saving of drafts need to wait for
all the parts to be saved, so if some heuristics are met down in
the call chain a Draft message will unconditionally set this flag
to False, and therefore ignoring the setting of this flag here.
:type notify_just_mdoc: bool
:return: a deferred that will be triggered with the UID of the added
message.
"""
# TODO should raise ReadOnlyMailbox if not rw.
# TODO have a look at the cases for internal date in the rfc
# XXX we could treat the message as an IMessage from here
# TODO change notify_just_mdoc to something more meaningful, like
# fast_insert_notify?
# TODO notify_just_mdoc *sometimes* make the append tests fail.
# have to find a better solution for this. A workaround could probably
# be to have a list of the ongoing deferreds related to append, so that
# we queue for later all the requests having to do with these.
# A better solution will probably involve implementing MULTIAPPEND
# extension or patching imap server to support pipelining.
if isinstance(message, (cStringIO.OutputType, StringIO.StringIO)):
message = message.getvalue()
leap_assert_type(message, basestring)
if flags is None:
flags = tuple()
else:
flags = tuple(str(flag) for flag in flags)
if date is None:
date = formatdate(time.time())
d = self.collection.add_msg(message, flags, date=date,
notify_just_mdoc=notify_just_mdoc)
d.addErrback(lambda failure: log.err(failure))
return d
def notify_new(self, *args):
"""
Notify of new messages to all the listeners.
This will be called indirectly by the underlying collection, that will
notify this IMAPMailbox whenever there are changes in the number of
messages in the collection, since we have added ourselves to the
collection listeners.
:param args: ignored.
"""
if not NOTIFY_NEW:
return
def cbNotifyNew(result):
exists, recent = result
for listener in self.listeners:
listener.newMessages(exists, recent)
d = self._get_notify_count()
d.addCallback(cbNotifyNew)
d.addCallback(self.collection.cb_signal_unread_to_ui)
d.addErrback(lambda failure: log.err(failure))
def _get_notify_count(self):
"""
Get message count and recent count for this mailbox.
:return: a deferred that will fire with a tuple, with number of
messages and number of recent messages.
:rtype: Deferred
"""
# XXX this is way too expensive in cases like multiple APPENDS.
# We should have a way of keep a cache or do a self-increment for that
# kind of calls.
d_exists = defer.maybeDeferred(self.getMessageCount)
d_recent = defer.maybeDeferred(self.getRecentCount)
d_list = [d_exists, d_recent]
def log_num_msg(result):
exists, recent = tuple(result)
logger.debug("NOTIFY (%r): there are %s messages, %s recent" % (
self.mbox_name, exists, recent))
return result
d = defer.gatherResults(d_list)
d.addCallback(log_num_msg)
return d
# commands, do not rename methods
def destroy(self):
"""
Called before this mailbox is permanently deleted.
Should cleanup resources, and set the \\Noselect flag
on the mailbox.
"""
# XXX this will overwrite all the existing flags
# should better simply addFlag
self.setFlags((MessageFlags.NOSELECT_FLAG,))
def remove_mbox(_):
uuid = self.collection.mbox_uuid
d = self.collection.mbox_wrapper.delete(self.collection.store)
d.addCallback(
lambda _: self.collection.mbox_indexer.delete_table(uuid))
return d
d = self.deleteAllDocs()
d.addCallback(remove_mbox)
return d
def expunge(self):
"""
Remove all messages flagged \\Deleted
"""
if not self.isWriteable():
raise imap4.ReadOnlyMailbox
return self.collection.delete_all_flagged()
def _get_message_fun(self, uid):
"""
Return the proper method to get a message for this mailbox, depending
on the passed uid flag.
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:rtype: callable
"""
get_message_fun = [
self.collection.get_message_by_sequence_number,
self.collection.get_message_by_uid][uid]
return get_message_fun
def _get_messages_range(self, messages_asked, uid=True):
def get_range(messages_asked):
return self._filter_msg_seq(messages_asked)
d = self._bound_seq(messages_asked, uid)
if uid:
d.addCallback(get_range)
d.addErrback(lambda f: log.err(f))
return d
def _bound_seq(self, messages_asked, uid):
"""
Put an upper bound to a messages sequence if this is open.
:param messages_asked: IDs of the messages.
:type messages_asked: MessageSet
:return: a Deferred that will fire with a MessageSet
"""
def set_last_uid(last_uid):
messages_asked.last = last_uid
return messages_asked
def set_last_seq(all_uid):
messages_asked.last = len(all_uid)
return messages_asked
if not messages_asked.last:
try:
iter(messages_asked)
except TypeError:
# looks like we cannot iterate
if uid:
d = self.collection.get_last_uid()
d.addCallback(set_last_uid)
else:
d = self.collection.all_uid_iter()
d.addCallback(set_last_seq)
return d
return defer.succeed(messages_asked)
def _filter_msg_seq(self, messages_asked):
"""
Filter a message sequence returning only the ones that do exist in the
collection.
:param messages_asked: IDs of the messages.
:type messages_asked: MessageSet
:rtype: set
"""
# TODO we could pass the asked sequence to the indexer
# all_uid_iter, and bound the sql query instead.
def filter_by_asked(all_msg_uid):
set_asked = set(messages_asked)
set_exist = set(all_msg_uid)
return set_asked.intersection(set_exist)
d = self.collection.all_uid_iter()
d.addCallback(filter_by_asked)
return d
def fetch(self, messages_asked, uid):
"""
Retrieve one or more messages in this mailbox.
from rfc 3501: The data items to be fetched can be either a single atom
or a parenthesized list.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If true, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: bool
:rtype: deferred with a generator that yields...
"""
get_msg_fun = self._get_message_fun(uid)
getimapmsg = self.get_imap_message
def get_imap_messages_for_range(msg_range):
def _get_imap_msg(messages):
d_imapmsg = []
# just in case we got bad data in here
for msg in filter(None, messages):
d_imapmsg.append(getimapmsg(msg))
return defer.gatherResults(d_imapmsg, consumeErrors=True)
def _zip_msgid(imap_messages):
zipped = zip(
list(msg_range), imap_messages)
return (item for item in zipped)
# XXX not called??
def _unset_recent(sequence):
reactor.callLater(0, self.unset_recent_flags, sequence)
return sequence
d_msg = []
for msgid in msg_range:
# XXX We want cdocs because we "probably" are asked for the
# body. We should be smarter at do_FETCH and pass a parameter
# to this method in order not to prefetch cdocs if they're not
# going to be used.
d_msg.append(get_msg_fun(msgid, get_cdocs=True))
d = defer.gatherResults(d_msg, consumeErrors=True)
d.addCallback(_get_imap_msg)
d.addCallback(_zip_msgid)
d.addErrback(lambda failure: log.err(failure))
return d
d = self._get_messages_range(messages_asked, uid)
d.addCallback(get_imap_messages_for_range)
d.addErrback(lambda failure: log.err(failure))
return d
def fetch_flags(self, messages_asked, uid):
"""
A fast method to fetch all flags, tricking just the
needed subset of the MIME interface that's needed to satisfy
a generic FLAGS query.
Given how LEAP Mail is supposed to work without local cache,
this query is going to be quite common, and also we expect
it to be in the form 1:* at the beginning of a session, so
it's not bad to fetch all the FLAGS docs at once.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If 1, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: int
:return: A tuple of two-tuples of message sequence numbers and
flagsPart, which is a only a partial implementation of
MessagePart.
:rtype: tuple
"""
# is_sequence = True if uid == 0 else False
# XXX FIXME -----------------------------------------------------
# imap/tests, or muas like mutt, it will choke until we implement
# sequence numbers. This is an easy hack meanwhile.
is_sequence = False
# ---------------------------------------------------------------
if is_sequence:
raise NotImplementedError(
"FETCH FLAGS NOT IMPLEMENTED FOR MESSAGE SEQUENCE NUMBERS YET")
d = defer.Deferred()
reactor.callLater(0, self._do_fetch_flags, messages_asked, uid, d)
if PROFILE_CMD:
do_profile_cmd(d, "FETCH-ALL-FLAGS")
return d
def _do_fetch_flags(self, messages_asked, uid, d):
"""
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If 1, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: int
:param d: deferred whose callback will be called with result.
:type d: Deferred
:rtype: A generator that yields two-tuples of message sequence numbers
and flagsPart
"""
class flagsPart(object):
def __init__(self, uid, flags):
self.uid = uid
self.flags = flags
def getUID(self):
return self.uid
def getFlags(self):
return map(str, self.flags)
def pack_flags(result):
_uid, _flags = result
return _uid, flagsPart(_uid, _flags)
def get_flags_for_seq(sequence):
d_all_flags = []
for msgid in sequence:
# TODO implement sequence numbers here too
d_flags_per_uid = self.collection.get_flags_by_uid(msgid)
d_flags_per_uid.addCallback(pack_flags)
d_all_flags.append(d_flags_per_uid)
gotflags = defer.gatherResults(d_all_flags)
gotflags.addCallback(get_uid_flag_generator)
return gotflags
def get_uid_flag_generator(result):
generator = (item for item in result)
d.callback(generator)
d_seq = self._get_messages_range(messages_asked, uid)
d_seq.addCallback(get_flags_for_seq)
return d_seq
@defer.inlineCallbacks
def fetch_headers(self, messages_asked, uid):
"""
A fast method to fetch all headers, tricking just the
needed subset of the MIME interface that's needed to satisfy
a generic HEADERS query.
Given how LEAP Mail is supposed to work without local cache,
this query is going to be quite common, and also we expect
it to be in the form 1:* at the beginning of a session, so
**MAYBE** it's not too bad to fetch all the HEADERS docs at once.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If true, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: bool
:return: A tuple of two-tuples of message sequence numbers and
headersPart, which is a only a partial implementation of
MessagePart.
:rtype: tuple
"""
# TODO implement sequences
is_sequence = True if uid == 0 else False
if is_sequence:
raise NotImplementedError(
"FETCH HEADERS NOT IMPLEMENTED FOR SEQUENCE NUMBER YET")
class headersPart(object):
def __init__(self, uid, headers):
self.uid = uid
self.headers = headers
def getUID(self):
return self.uid
def getHeaders(self, _):
return dict(
(str(key), str(value))
for key, value in
self.headers.items())
messages_asked = yield self._bound_seq(messages_asked, uid)
seq_messg = yield self._filter_msg_seq(messages_asked)
result = []
for msgid in seq_messg:
msg = yield self.collection.get_message_by_uid(msgid)
headers = headersPart(msgid, msg.get_headers())
result.append((msgid, headers))
defer.returnValue(iter(result))
def store(self, messages_asked, flags, mode, uid):
"""
Sets the flags of one or more messages.
:param messages: The identifiers of the messages to set the flags
:type messages: A MessageSet object with the list of messages requested
:param flags: The flags to set, unset, or add.
:type flags: sequence of str
:param mode: If mode is -1, these flags should be removed from the
specified messages. If mode is 1, these flags should be
added to the specified messages. If mode is 0, all
existing flags should be cleared and these flags should be
added.
:type mode: -1, 0, or 1
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:return: A deferred, that will be called with a dict mapping message
sequence numbers to sequences of str representing the flags
set on the message after this operation has been performed.
:rtype: deferred
:raise ReadOnlyMailbox: Raised if this mailbox is not open for
read-write.
"""
if not self.isWriteable():
log.msg('read only mailbox!')
raise imap4.ReadOnlyMailbox
d = defer.Deferred()
reactor.callLater(0, self._do_store, messages_asked, flags,
mode, uid, d)
if PROFILE_CMD:
do_profile_cmd(d, "STORE")
d.addCallback(self.collection.cb_signal_unread_to_ui)
d.addErrback(lambda f: log.err(f))
return d
def _do_store(self, messages_asked, flags, mode, uid, observer):
"""
Helper method, invoke set_flags method in the IMAPMessageCollection.
See the documentation for the `store` method for the parameters.
:param observer: a deferred that will be called with the dictionary
mapping UIDs to flags after the operation has been
done.
:type observer: deferred
"""
# TODO we should prevent client from setting Recent flag
get_msg_fun = self._get_message_fun(uid)
leap_assert(not isinstance(flags, basestring),
"flags cannot be a string")
flags = tuple(flags)
def set_flags_for_seq(sequence):
def return_result_dict(list_of_flags):
result = dict(zip(list(sequence), list_of_flags))
observer.callback(result)
return result
d_all_set = []
for msgid in sequence:
d = get_msg_fun(msgid)
d.addCallback(lambda msg: self.collection.update_flags(
msg, flags, mode))
d_all_set.append(d)
got_flags_setted = defer.gatherResults(d_all_set)
got_flags_setted.addCallback(return_result_dict)
return got_flags_setted
d_seq = self._get_messages_range(messages_asked, uid)
d_seq.addCallback(set_flags_for_seq)
return d_seq
# ISearchableMailbox
def search(self, query, uid):
"""
Search for messages that meet the given query criteria.
Warning: this is half-baked, and it might give problems since
it offers the SearchableInterface.
We'll be implementing it asap.
:param query: The search criteria
:type query: list
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:return: A list of message sequence numbers or message UIDs which
match the search criteria or a C{Deferred} whose callback
will be invoked with such a list.
:rtype: C{list} or C{Deferred}
"""
# TODO see if we can raise w/o interrupting flow
# :raise IllegalQueryError: Raised when query is not valid.
# example query:
# ['UNDELETED', 'HEADER', 'Message-ID',
# XXX fixme, does not exist
# '52D44F11.9060107@dev.bitmask.net']
# TODO hardcoding for now! -- we'll support generic queries later on
# but doing a quickfix for avoiding duplicate saves in the draft
# folder. # See issue #4209
if len(query) > 2:
if query[1] == 'HEADER' and query[2].lower() == "message-id":
msgid = str(query[3]).strip()
logger.debug("Searching for %s" % (msgid,))
d = self.collection.get_uid_from_msgid(str(msgid))
d.addCallback(lambda result: [result])
return d
# nothing implemented for any other query
logger.warning("Cannot process query: %s" % (query,))
return []
# IMessageCopier
def copy(self, message):
"""
Copy the given message object into this mailbox.
:param message: an IMessage implementor
:type message: LeapMessage
:return: a deferred that will be fired with the message
uid when the copy succeed.
:rtype: Deferred
"""
# if PROFILE_CMD:
# do_profile_cmd(d, "COPY")
# A better place for this would be the COPY/APPEND dispatcher
# in server.py, but qtreactor hangs when I do that, so this seems
# to work fine for now.
# d.addCallback(lambda r: self.reactor.callLater(0, self.notify_new))
# deferLater(self.reactor, 0, self._do_copy, message, d)
# return d
d = self.collection.copy_msg(message.message,
self.collection.mbox_uuid)
return d
# convenience fun
def deleteAllDocs(self):
"""
Delete all docs in this mailbox
"""
# FIXME not implemented
return self.collection.delete_all_docs()
def unset_recent_flags(self, uid_seq):
"""
Unset Recent flag for a sequence of UIDs.
"""
# FIXME not implemented
return self.collection.unset_recent_flags(uid_seq)
def __repr__(self):
"""
Representation string for this mailbox.
"""
return u"<IMAPMailbox: mbox '%s' (%s)>" % (
self.mbox_name, self.collection.count())
_INBOX_RE = re.compile(INBOX_NAME, re.IGNORECASE)
def normalize_mailbox(name):
"""
Return a normalized representation of the mailbox ``name``.
This method ensures that an eventual initial 'inbox' part of a
mailbox name is made uppercase.
:param name: the name of the mailbox
:type name: unicode
:rtype: unicode
"""
# XXX maybe it would make sense to normalize common folders too:
# trash, sent, drafts, etc...
if _INBOX_RE.match(name):
# ensure inital INBOX is uppercase
return INBOX_NAME + name[len(INBOX_NAME):]
return name
|
kalikaneko/bitmask-dev
|
src/leap/bitmask/mail/imap/mailbox.py
|
Python
|
gpl-3.0
| 33,130
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Byacc(AutotoolsPackage):
"""Berkeley Yacc is an LALR(1) parser generator. Berkeley Yacc has
been made as compatible as possible with AT&T Yacc. Berkeley Yacc
can accept any input specification that conforms to the AT&T Yacc
documentation. Specifications that take advantage of undocumented
features of AT&T Yacc will probably be rejected."""
homepage = "https://github.com/grandseiken/byacc"
git = "https://github.com/grandseiken/byacc.git"
version('master', branch='master')
depends_on('m4', type='build')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
|
rspavel/spack
|
var/spack/repos/builtin/packages/byacc/package.py
|
Python
|
lgpl-2.1
| 908
|
import statsmodels.api as sm
from . import common_fields
from . import make_gaps
from . import tools
from .device_event import make_alarm_event
def apply_loess(solution, num_days, gaps):
"""Solves the blood glucose equation over specified period of days
and applies a loess smoothing regression to the data
Returns numpy arrays for glucose and time values
"""
#solving for smbg valuesn
smbg_gluc = solution[:, 1]
smbg_time = solution[:, 2]
#make gaps in cbg data, if needed
solution = make_gaps.gaps(solution, num_days=num_days, gaps=gaps)
#solving for cbg values
cbg_gluc = solution[:, 1]
cbg_time = solution[:, 2]
#smoothing blood glucose eqn
lowess = sm.nonparametric.lowess
smoothing_distance = 1.5 #1.5 minutes
fraction = (smoothing_distance / (num_days * 60 * 24)) * 100
result = lowess(cbg_gluc, cbg_time, frac=fraction, is_sorted=True)
smoothed_cbg_time = result[:, 0]
smoothed_cbg_gluc = result[:, 1]
return smoothed_cbg_gluc, smoothed_cbg_time, smbg_gluc, smbg_time
def cbg(gluc, timesteps, zonename):
""" construct cbg events
gluc -- a list of glucose values at each timestep
timesteps -- a list of epoch times
zonename -- name of timezone in effect
"""
cbg_data = []
for value, timestamp in zip(gluc, timesteps):
cbg_reading = {}
cbg_reading = common_fields.add_common_fields('cbg', cbg_reading, timestamp, zonename)
cbg_reading["value"] = tools.convert_to_mmol(value)
cbg_reading["units"] = "mmol/L"
if value > 400:
cbg_reading["annotation"] = [{"code": "bg/out-of-range", "threshold": 400, "value": "high"}]
cbg_reading["value"] = tools.convert_to_mmol(401)
elif value < 40:
cbg_reading["annotation"] = [{"code": "bg/out-of-range", "threshold": 40, "value": "low"}]
cbg_reading["value"] = tools.convert_to_mmol(39)
#add a device meta alarm for low insulin reading
meta_alarm = make_alarm_event(timestamp, zonename)
cbg_data.append(meta_alarm)
cbg_data.append(cbg_reading)
return cbg_data
|
tidepool-org/dfaker
|
dfaker/cbg.py
|
Python
|
bsd-2-clause
| 2,185
|
'''
:since: 10/09/2016
:author: oblivion
'''
import json
import os.path
import logging
from serverhud import ws
from serverhud.ws import logger
import tornado.ioloop
from functools import partial
from watchdog.events import FileSystemEventHandler
class AccessHandler(FileSystemEventHandler):
instances = 0
def __init__(self, handler, *args, **kwargs):
self.logger = logging.getLogger(__name__)
self.logger.debug("Creating WebSocket file access handler")
super(AccessHandler, self).__init__(*args, **kwargs)
self.handlers = [handler]
self.accesses = 0
self.lastline = ""
AccessHandler.instances += 1
self.logger.debug(str(AccessHandler.instances) + " active handlers")
self.id = AccessHandler.instances
self.read_access_log()
def handle(self, data):
self.logger.debug("(" + str(self.id) + ") Handling: " + str(data))
for handler in self.handlers:
self.logger.debug("(" + str(self.id) + ") Calling handler: " + str(handler))
tornado.ioloop.IOLoop.instance().add_callback(partial(handler, data))
#handler(data)
def read_access_log(self):
# Count number of lines
self.logger.debug("(" + str(self.id) + ") Reading log file.")
try:
with open(ws.config.CONFIG['ACCESS_LOG']) as log_file:
for line_number, line in enumerate(log_file, 1):
pass
if (self.accesses <= line_number):
self.accesses = line_number
else:
# Try compensating when logrotate empties the file.
self.accesses += line_number
self.lastline = line
except FileNotFoundError:
self.logger.exception("Could not open HTTPd access log.")
self.accesses = -1
def on_modified(self, event):
filename = os.path.basename(event.src_path)
self.logger.debug("(" + str(self.id) + ") Access: " + filename)
if (filename == os.path.basename(ws.config.CONFIG['ACCESS_LOG'])):
self.read_access_log()
self.handle({"accesses": self.accesses, "lastline": self.lastline})
def add_handler(self, handler):
self.handlers.append(handler)
def remove_handler(self, handler):
if handler in self.handlers:
self.handlers.remove(handler)
AccessHandler.instances -= 1
self.logger.debug(str(AccessHandler.instances) + " active handlers")
|
deadbok/server-hud
|
serverhud/ws/access.py
|
Python
|
gpl-2.0
| 2,509
|
from __future__ import absolute_import, unicode_literals
import os
from django import VERSION as DJANGO_VERSION
from django.utils.translation import ugettext_lazy as _
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "media-library"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_ROOT, "templates")
],
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.static",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
],
"builtins": [
"mezzanine.template.loader_tags",
],
"loaders": [
"mezzanine.template.loaders.host_themes.Loader",
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
},
},
]
if DJANGO_VERSION < (1, 9):
del TEMPLATES[0]["OPTIONS"]["builtins"]
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
# "mezzanine.accounts",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
if DJANGO_VERSION < (1, 10):
MIDDLEWARE_CLASSES = MIDDLEWARE
del MIDDLEWARE
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
# Also force into sys.modules so it's visible to Django's autoreload.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
import sys
import imp
module_name = "%s.local_settings" % PROJECT_APP
module = imp.new_module(module_name)
module.__file__ = f
sys.modules[module_name] = module
exec(open(f, "rb").read())
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
|
molokov/mezzanine
|
mezzanine/project_template/project_name/settings.py
|
Python
|
bsd-2-clause
| 11,708
|
# First run the _ImportScript.py so that these don't have to be imported:
#import numpy as np
#import math
#import beatbox
#import os.path
#import healpy as hp
np.random.seed(1)
# declaring initial objects
#You=beatbox.Multiverse(truncated_nmax=2, truncated_nmin=1, truncated_lmax=8, truncated_lmin=2)
beatbox.You.create_original_Universe()
#make a realization of the sky to be used as a mock:
beatbox.You.initiate_simulated_universe()
# Calculate C_yy from the 100 posterior sample Commander Planck CMB temperature maps
# or load the C_yy matrix if already calculated
if not os.path.isfile('../data/covCyy_lmax%d_lmin%d.txt' % (beatbox.Multiverse.truncated_lmax, beatbox.Multiverse.truncated_lmin)):
beatbox.You.read_Planck_samples()
beatbox.You.calculate_covariance_matrix(filename='lmax%d_lmin%d' % (beatbox.Multiverse.truncated_lmax, beatbox.Multiverse.truncated_lmin))
else:
beatbox.You.load_covariance_matrix(filename='covCyy_lmax%d_lmin%d.txt' % (beatbox.Multiverse.truncated_lmax, beatbox.Multiverse.truncated_lmin))
# Calculate the inverse of the a_y covariance matrix
beatbox.You.calculate_sdv_Cyy_inverse()
# Fake Cyy matrix
#beatbox.You.inv_Cyy = np.diag(1000*np.ones(beatbox.You.inv_Cyy.shape[1]))
#print "this is a fake Cyy inverse"
if MOCK == 1:
# Use the simulated Universe as mock data
# First, generate one realization of noise
noise = beatbox.You.generate_one_realization_of_noise()
# Add the noise to the realization of the sky map
datamap = beatbox.You.all_simulated_universes[-1].ay2ayreal_for_inference(beatbox.You.all_simulated_universes[-1].ay)+noise
datamap = datamap.T
# Plot the generated T map
beatbox.You.all_simulated_universes[-1].show_CMB_T_map(from_perspective_of="observer")
# Plot the Mock Univers with the noise
MockUniverse=beatbox.Universe()
MockUniverse.ayreal2ay_for_mapping(datamap)
MockUniverse.ay2alm(MockUniverse.ay)
MockUniverse.show_CMB_T_map( from_perspective_of="observer")
#_______________________________________________________________
else:
beatbox.You.all_data_universes = np.append(beatbox.You.all_data_universes, beatbox.Universe())
beatbox.You.all_data_universes[-1].read_in_CMB_T_map(from_this = 'data/commander_32band_Clsamples100/cmb_Cl_c0001_k00031.fits')
beatbox.You.all_data_universes[-1].decompose_T_map_into_spherical_harmonics()
beatbox.You.all_data_universes[-1].alm2ay()
beatbox.You.all_data_universes[-1].ay2alm(beatbox.You.all_data_universes[-1].ay)
#beatbox.You.all_data_universes[-1].show_CMB_T_map(from_perspective_of="observer")
datamap = beatbox.You.all_data_universes[-1].ay2ayreal_for_inference(beatbox.You.all_data_universes[-1].ay)
# Plot the generated T map
beatbox.You.all_data_universes[-1].show_CMB_T_map(from_perspective_of="observer")
#________________________________________________________________
# Reconstruct the potential
beatbox.You.solve_for_3D_potential(datamap)
# Give those reconstructed f_n's to one realization of the Universe
We = beatbox.Universe()
We.fn = beatbox.You.reconstrunct_fn
#We.fn = beatbox.You.all_simulated_universes[-1].fn * 0.5
We.transform_3D_potential_into_alm(truncated_nmax=We.truncated_nmax, truncated_nmin=We.truncated_nmin,truncated_lmax=We.truncated_lmax, truncated_lmin=We.truncated_lmin,usedefault=1, fn=1)
We.show_CMB_T_map(from_perspective_of="observer")
if MOCK == 1:
# Plot the residuals:
hp.mollview(MockUniverse.Tmap-We.Tmap, rot=(-90,0,0), title="CMB graviational potential fluctuations as seen from inside the LSS, l_max=%d, Tmap diff" % We.truncated_lmax)
# residuals of the Tmap:
WeRes = beatbox.Universe()
WeRes.alm = (beatbox.You.all_simulated_universes[-1].alm-We.alm)
#WeRes.ay2alm(WeRes.ay)
WeRes.NSIDE = 256
WeRes.Tmap = hp.alm2map(WeRes.alm,WeRes.NSIDE)
hp.mollview(WeRes.Tmap, rot=(-90,0,0),title="CMB graviational potential fluctuations as seen from inside the LSS, l_max=%d, alms diff" % We.truncated_lmax)
#WeRes.show_CMB_T_map( from_perspective_of="observer")
else:
# Plot the residuals:
hp.mollview(beatbox.You.all_data_universes[-1].Tmap-We.Tmap, rot=(-90,0,0),title="CMB graviational potential fluctuations as seen from inside the LSS, l_max=%d, Tmap diff" % We.truncated_lmax)
# residuals of the Tmap:
WeRes = beatbox.Universe()
WeRes.alm = (beatbox.You.all_data_universes[-1].alm-We.alm)
#WeRes.ay2alm(WeRes.ay)
WeRes.NSIDE = 256
WeRes.Tmap = hp.alm2map(WeRes.alm,WeRes.NSIDE)
hp.mollview(WeRes.Tmap, rot=(-90,0,0),title="CMB graviational potential fluctuations as seen from inside the LSS, l_max=%d, alms diff" % We.truncated_lmax)
#WeRes.show_CMB_T_map( from_perspective_of="observer")
|
drphilmarshall/Music
|
Scripts/_ReconstructionScript.py
|
Python
|
mit
| 4,797
|
import datetime
import uuid
from sqlalchemy import (
Column, Index, String, Text, DateTime, Integer, ForeignKey, Table, func)
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.mysql import DOUBLE
from porick import app, db
QSTATUS = {'unapproved': 0,
'approved': 1,
'disapproved': 2,
'reported': 3,
'deleted': 4}
def now():
return datetime.datetime.utcnow()
class Tag(db.Model):
__tablename__ = 'tags'
__table_args__ = {'mysql_engine': 'InnoDB',
'sqlite_autoincrement': True}
id = Column(Integer, nullable=False, primary_key=True)
tag = Column(String(255), nullable=False, primary_key=True)
QuoteToTag = Table('quote_to_tag', db.Model.metadata,
Column('quote_id', Integer, ForeignKey('quotes.id')),
Column('tag_id', Integer, ForeignKey('tags.id'))
)
Favourites = Table('favourites', db.Model.metadata,
Column('user_id', Integer, ForeignKey('users.id')),
Column('quote_id', Integer, ForeignKey('quotes.id'))
)
ReportedQuotes = Table('reported_quotes', db.Model.metadata,
Column('user_id', Integer, ForeignKey('users.id')),
Column('quote_id', Integer, ForeignKey('quotes.id')),
Column('time', DateTime, nullable=False, default=now)
)
DeletedQuotes = Table('deleted_quotes', db.Model.metadata,
Column('user_id', Integer, ForeignKey('users.id')),
Column('quote_id', Integer, ForeignKey('quotes.id')),
Column('time', DateTime, nullable=False, default=now)
)
class User(db.Model):
__tablename__ = 'users'
__table_args__ = {'mysql_engine': 'InnoDB',
'sqlite_autoincrement': True}
id = Column(Integer, nullable=False, primary_key=True)
username = Column(String(32), nullable=False, unique=True)
password = Column(String(60), nullable=False)
level = Column(Integer, nullable=False, default=0)
email = Column(String(64), nullable=False)
favourites = relationship("Quote", secondary=Favourites)
reported_quotes = relationship("Quote", secondary=ReportedQuotes)
deleted_quotes = relationship("Quote", secondary=DeletedQuotes)
@property
def is_admin(self):
return self.level == 1
QuoteToUser = Table('quote_to_user', db.Model.metadata,
Column('quote_id', Integer, ForeignKey('quotes.id')),
Column('user_id', Integer, ForeignKey('users.id'))
)
class VoteToUser(db.Model):
__tablename__ = 'vote_to_user'
quote_id = Column(Integer, ForeignKey('quotes.id'), primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'), primary_key=True)
direction = Column(String(4), nullable=False)
user = relationship("User")
class PasswordReset(db.Model):
__tablename__ = 'password_resets'
user_id = Column(Integer, ForeignKey('users.id'), primary_key=True)
key = Column(String(36), nullable=False, default=uuid.uuid4)
created = Column(DateTime, nullable=False, default=now)
user = relationship("User")
@property
def is_valid(self):
expiry = datetime.timedelta(hours=app.config['PASSWORD_RESET_REQUEST_EXPIRY'])
return now() < self.created + expiry
class Quote(db.Model):
__tablename__ = 'quotes'
__table_args__ = {'mysql_engine': 'InnoDB',
'sqlite_autoincrement': True}
id = Column(Integer, nullable=False, primary_key=True)
body = Column(Text, nullable=False)
notes = Column(Text, nullable=True)
rating = Column(Integer, nullable=False, default=0)
votes = Column(Integer, nullable=False, default=0)
submitted = Column(DateTime, nullable=False, default=now)
status = Column(Integer, nullable=False, default=0)
score = Column(DOUBLE(unsigned=True), nullable=False, default=1)
tags = relationship("Tag", secondary=QuoteToTag)
submitted_by = relationship("User", secondary=QuoteToUser, uselist=False)
voters = relationship("VoteToUser")
@property
def upvotes(self):
return len([v for v in self.voters if v.direction == 'up'])
@property
def downvotes(self):
return len([v for v in self.voters if v.direction == 'down'])
AREA_ORDER_MAP = {
'best': [Quote.rating.desc()],
'worst': [Quote.rating],
'random': [func.rand()],
'controversial': [Quote.votes, Quote.rating/Quote.votes]
}
DEFAULT_ORDER = [Quote.submitted.desc()]
|
stesh/porick-flask
|
porick/models.py
|
Python
|
apache-2.0
| 4,414
|
"""
.. versionadded:: 2017.7
Management of Zabbix Valuemap object over Zabbix API.
:codeauthor: Jakub Sliva <jakub.sliva@ultimum.io>
"""
import json
import logging
from salt.exceptions import SaltException
log = logging.getLogger(__name__)
def __virtual__():
"""
Only make these states available if Zabbix module and run_query function is available
and all 3rd party modules imported.
"""
if "zabbix.run_query" in __salt__:
return True
return False, "Import zabbix or other needed modules failed."
def present(name, params, **kwargs):
"""
Creates Zabbix Value map object or if differs update it according defined parameters
:param name: Zabbix Value map name
:param params: Definition of the Zabbix Value map
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
zabbix-valuemap-present:
zabbix_valuemap.present:
- name: Number mapping
- params:
mappings:
- value: 1
newvalue: one
- value: 2
newvalue: two
"""
zabbix_id_mapper = __salt__["zabbix.get_zabbix_id_mapper"]()
dry_run = __opts__["test"]
ret = {"name": name, "result": False, "comment": "", "changes": {}}
# Create input params substituting functions with their results
params["name"] = name
input_params = __salt__["zabbix.substitute_params"](params, **kwargs)
log.info(
"Zabbix Value map: input params: %s",
str(json.dumps(input_params, indent=4)),
)
search = {"output": "extend", "selectMappings": "extend", "filter": {"name": name}}
# GET Value map object if exists
valuemap_get = __salt__["zabbix.run_query"]("valuemap.get", search, **kwargs)
log.info(
"Zabbix Value map: valuemap.get result: %s",
str(json.dumps(valuemap_get, indent=4)),
)
existing_obj = (
__salt__["zabbix.substitute_params"](valuemap_get[0], **kwargs)
if valuemap_get and len(valuemap_get) == 1
else False
)
if existing_obj:
diff_params = __salt__["zabbix.compare_params"](input_params, existing_obj)
log.info(
"Zabbix Value map: input params: {%s",
str(json.dumps(input_params, indent=4)),
)
log.info(
"Zabbix Value map: Object comparison result. Differences: %s",
str(diff_params),
)
if diff_params:
diff_params[zabbix_id_mapper["valuemap"]] = existing_obj[
zabbix_id_mapper["valuemap"]
]
log.info(
"Zabbix Value map: update params: %s",
str(json.dumps(diff_params, indent=4)),
)
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" would be fixed.'.format(name)
ret["changes"] = {
name: {
"old": (
'Zabbix Value map "{}" differs '
"in following parameters: {}".format(name, diff_params)
),
"new": (
'Zabbix Value map "{}" would correspond to definition.'.format(
name
)
),
}
}
else:
valuemap_update = __salt__["zabbix.run_query"](
"valuemap.update", diff_params, **kwargs
)
log.info(
"Zabbix Value map: valuemap.update result: %s",
str(valuemap_update),
)
if valuemap_update:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" updated.'.format(name)
ret["changes"] = {
name: {
"old": (
'Zabbix Value map "{}" differed '
"in following parameters: {}".format(name, diff_params)
),
"new": 'Zabbix Value map "{}" fixed.'.format(name),
}
}
else:
ret["result"] = True
ret[
"comment"
] = 'Zabbix Value map "{}" already exists and corresponds to a definition.'.format(
name
)
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" would be created.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" does not exist.'.format(name),
"new": (
'Zabbix Value map "{}" would be created '
"according definition.".format(name)
),
}
}
else:
# ACTION.CREATE
valuemap_create = __salt__["zabbix.run_query"](
"valuemap.create", input_params, **kwargs
)
log.info(
"Zabbix Value map: valuemap.create result: %s",
str(valuemap_create),
)
if valuemap_create:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" created.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" did not exist.'.format(name),
"new": (
'Zabbix Value map "{}" created according definition.'.format(
name
)
),
}
}
return ret
def absent(name, **kwargs):
"""
Makes the Zabbix Value map to be absent (either does not exist or delete it).
:param name: Zabbix Value map name
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
zabbix-valuemap-absent:
zabbix_valuemap.absent:
- name: Value map name
"""
dry_run = __opts__["test"]
ret = {"name": name, "result": False, "comment": "", "changes": {}}
try:
object_id = __salt__["zabbix.get_object_id_by_params"](
"valuemap", {"filter": {"name": name}}, **kwargs
)
except SaltException:
object_id = False
if not object_id:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" does not exist.'.format(name)
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" would be deleted.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" exists.'.format(name),
"new": 'Zabbix Value map "{}" would be deleted.'.format(name),
}
}
else:
valuemap_delete = __salt__["zabbix.run_query"](
"valuemap.delete", [object_id], **kwargs
)
if valuemap_delete:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" deleted.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" existed.'.format(name),
"new": 'Zabbix Value map "{}" deleted.'.format(name),
}
}
return ret
|
saltstack/salt
|
salt/states/zabbix_valuemap.py
|
Python
|
apache-2.0
| 8,306
|
from rest_framework.serializers import Serializer
from rest_framework_expander.context import ExpanderContext
from rest_framework_expander.exceptions import ExpanderFieldMissing, ExpanderDepthBreached
from rest_framework_expander.settings import expander_settings
class ExpanderParser(object):
"""
Parses the expander query parameters.
"""
expansion_key = expander_settings.EXPANSION_KEY
expansion_item_separator = expander_settings.EXPANSION_ITEM_SEPARATOR
expansion_path_separator = expander_settings.EXPANSION_PATH_SEPARATOR
fail_on_depth_breached = expander_settings.FAIL_ON_DEPTH_BREACHED
fail_on_field_missing = expander_settings.FAIL_ON_FIELD_MISSING
max_depth = expander_settings.MAX_DEPTH
def __init__(self, adapter):
self.adapter = adapter
def parse(self):
"""
Returns the root expander context by parsing the query parameters.
"""
root = ExpanderContext(None, None)
request = self.adapter.context['request']
param = request.query_params.get(self.expansion_key)
if not param:
return root
for item in param.split(self.expansion_item_separator):
parts = item.split(self.expansion_path_separator, self.max_depth + 1)
if self.max_depth < len(parts):
if self.fail_on_depth_breached:
raise ExpanderDepthBreached()
else:
parts = parts[:self.max_depth]
serializer = self.adapter.object_serializer
node = root
for part in parts:
if not isinstance(serializer.fields.get(part), Serializer):
if self.fail_on_field_missing:
raise ExpanderFieldMissing()
else:
break
serializer = serializer.fields[part]
while hasattr(serializer, 'child'):
serializer = serializer.child
if part not in node.children:
node.children[part] = ExpanderContext(node, serializer)
node = node.children[part]
return root
|
pombredanne/drf-expander
|
rest_framework_expander/parsers.py
|
Python
|
isc
| 2,182
|
#!/usr/bin/python
import urllib2
import sys
import os
import os.path
import tarfile
#-------------------------------------------------------------------------------
# FUNCTION: DOWNLOAD FILE
#-------------------------------------------------------------------------------
def download(url, filename):
print "Fetching " + url
webfile = urllib2.urlopen(url)
diskfile = open(filename,"wb")
diskfile.write(webfile.read())
diskfile.close()
webfile.close()
#-------------------------------------------------------------------------------
# MAIN
#-------------------------------------------------------------------------------
destfile = "external.tar.gz"
#check if file was downloaded before.
if (os.path.isfile(destfile)):
print "File already downloaded..."
else:
download("https://github.com/downloads/OpenWebGlobe/WebViewer/external.tar.gz", destfile)
# extract archive in "WebViewer/external"
print "Extracting external files..."
tar = tarfile.open(destfile)
tar.extractall("../")
tar.close()
print "Ok."
print "Done."
|
Hvitnov/WebViewer
|
scripts/download_external.py
|
Python
|
mit
| 1,073
|
import math
from math import sin, cos
import numarray
from OpenGL.GL import *
from flapp.pmath.vec3 import *
from flapp.glDrawUtils import DrawAxis
class GlobeLayer:
def __init__(self, latSize, lonSize):
self.latSize = latSize
self.lonSize = lonSize
print "Creating layer:", self.latSize, " x " , self.lonSize
self.heights = numarray.array([[0.0]*self.latSize]*self.lonSize, type=numarray.Float32)
class GlobeData:
def __init__(self, radius=6377830., heightsPerUnit = 0.000001):
self.layer1 = GlobeLayer(4, 4)
self.layer2 = GlobeLayer(8, 8)
self.layer3 = GlobeLayer(16, 16)
self.layers = [None, self.layer1, self.layer2, self.layer3]
def getLayer(self, index):
return self.layers[index]
class Globe:
def __init__(self, globeData, radius=6377830., heightsPerUnit = 0.000001):
# Earth radius is 6,377,830 meters. For now make this similarly sized
self.radius = radius
self.circumference = 2 * math.pi * self.radius
self.globeData = globeData
# lat range is -90 90 --> array[180]
# lon range is -180 180 --> array[360]
# self.latSizeLayer1 = int(round(self.circumference / 2.0 * heightsPerUnit))
# self.lonSizeLayer1 = int(round(self.circumference * heightsPerUnit))
self.camera = None
self.drawLayerIndex = 2 # for debug drawing
def setCamera(self, camera):
self.camera = camera
def _latLonToCartesian(self, lat, lon, alt):
zenith = 90 - lat
azimuth = lon
x = alt * sin (zenith) * cos (azimuth)
y = alt * sin (zenith) * sin (azimuth)
z = alt * cos (zenith)
return Vec3(x,y,z)
def drawFull(self, renderer):
# latStep = 10.0
# lonStep = 25.0
DrawAxis(self.radius)
layer = self.globeData.getLayer(self.drawLayerIndex)
glPushAttrib(GL_ENABLE_BIT)
glDisable(GL_LIGHTING)
glColor3f(0,0,0.5)
#for lat in range(-90, 90, latStep):
# for lon in range(-180, 180, lonStep):
# coords = []
for latIndex in range(layer.latSize-1):
lat = (float(latIndex) / layer.latSize * 180) - 90.
higherLat = ( (latIndex+1) / layer.latSize * 180) - 90.
glBegin(GL_TRIANGLE_STRIP)
for lonIndex in range(4): # range(layer.lonSize):
lon = (float(lonIndex) / layer.lonSize * 360) + -180.
# print higherLat, lon
cartPos1 = self._latLonToCartesian(lat, lon, self.radius + layer.heights[latIndex][lonIndex])
glNormal3fv(normalizeV3(cartPos1).asTuple()) # cartPos - vec(0,0,0)
glVertex3fv(cartPos1.asTuple())
cartPos2 = self._latLonToCartesian(higherLat, lon, self.radius + layer.heights[latIndex+1][lonIndex])
glNormal3fv(normalizeV3(cartPos2).asTuple())
glVertex3fv(cartPos2.asTuple())
# coords.append ( cartPos1, cartPos2 )
glEnd()
# print "Draw globe coords:", coords
glPopAttrib()
draw = drawFull
def getHeightAtLatLong(self, lat, lon):
# lat 0 == equator
# lat 90N (+90) == north pole
# lat 90S (-90) == south pole
# lon 0 == prime meridian
# 180W (-180) and 180E (+180) is antipodal meridian
# *** convert lat to array coordinate
# convert lat and lon to 0-1 range
normLat = ((lat + 90 ) % 180.) / 180.
normLon = ((lon + 180 ) % 360.) / 360.
latIndex = round(normLat * self.latSizeLayer1)
lonIndex = round(normLon * self.lonSizeLayer1)
return self.heightsLayer1[lat][lon]
getHeight = getHeightAtLatLong
|
rpwagner/tiled-display
|
flapp/globe.py
|
Python
|
apache-2.0
| 3,743
|
from microservices.queues.client import Client
client = Client()
q = client.queue('basic_queue')
q.publish({"message": "Hello, world!"})
|
aclef/microservices
|
examples/queue/hello_world_client.py
|
Python
|
mit
| 140
|
#!/usr/bin/python2.7
import numpy as np
import numpy.core.multiarray
import cv2
def to_hsv(img):
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
return hsv_img
def detect_white(img):
hsv_img = to_hsv(img)
sensitivity = 100
lower_white = np.array([0,0,255-sensitivity], dtype=np.uint8)
upper_white = np.array([255,sensitivity,255], dtype=np.uint8)
# Threshold the HSV image to get only white colors
mask = cv2.inRange(hsv_img, lower_white, upper_white)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(hsv_img,hsv_img, mask= mask)
whiteCount = cv2.countNonZero(mask)
return whiteCount
def detect_red(img):
hsv_img = to_hsv(img)
lower_red = np.array([0,50,50], dtype=np.uint8)
upper_red = np.array([10,255,255], dtype=np.uint8)
# Threshold the HSV image to get only white colors
mask = cv2.inRange(hsv_img, lower_red, upper_red)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(hsv_img,hsv_img, mask= mask)
redCount = cv2.countNonZero(mask)
return redCount
def to_gray(img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return gray_img
def color_detect(img):
whiteCount = detect_white(img)
redCount = detect_red(img)
if whiteCount > redCount: print("CROSS")
elif redCount >= whiteCount: print("DO NOT CROSS")
if __name__ == '__main__':
cv2.namedWindow("output", cv2.WINDOW_NORMAL)
cascade = cv2.CascadeClassifier('cascade.xml')
#cap = cv2.VideoCapture("video1.mp4")
while 1:
#ret, img = cap.read()
img = cv2.imread("image2.bmp")
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = to_gray(img)
matches = cascade.detectMultiScale(gray, 1.3, 5)
lightFound = False
for (x,y,w,h) in matches:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
#cropped = img[y: y + h, x: x + w]
cropped = img[y + int(0.1*h): y + int(0.9*h), x + int(0.1*w): x + int(0.9*w)]
#print("Match found!\n")
lightFound = True
cv2.imshow("output",img)
#cv2.imshow("img",cropped)
if lightFound == True:
color_detect(cropped)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
#cap.release()
cv2.destroyAllWindows()
|
lazim2142/carrt_goggles
|
src/crosswalk_detector.py
|
Python
|
mit
| 2,463
|
#!/usr/bin/env python
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import moviedata
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.movies = moviedata.MovieContainer()
self.table = QTableWidget()
self.setCentralWidget(self.table)
def updateTable(self, current=None):
self.table.clear()
self.table.setRowCount(len(self.movies))
self.table.setColumnCount(5)
self.table.setHorizontalHeaderLabels(['Title',
'Year', 'Mins', 'Acquired', 'Notes'])
self.table.setAlternatingRowColors(True)
self.table.setEditTriggers(QTableWidget.NoEditTriggers)
self.table.setSelectionBehavior(QTableWidget.SelectRows)
self.table.setSelectionMode(QTableWidget.SingleSelection)
selected = None
|
opensvn/python
|
mymovies.py
|
Python
|
gpl-2.0
| 877
|
# encoding: utf-8
# Copyright 2013 maker
# License
"""
Events module forms
"""
from django import forms
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from maker.events.models import Event
from maker.core.models import Object, Location
from maker.core.decorators import preprocess_form
import datetime
preprocess_form()
class MassActionForm(forms.Form):
"""
Mass action form for Reports
"""
delete = forms.ChoiceField(label=_("Delete"), choices=(('', '-----'), ('delete', _('Delete Completely')),
('trash', _('Move to Trash'))), required=False)
instance = None
def __init__(self, user, *args, **kwargs):
if 'instance' in kwargs:
self.instance = kwargs['instance']
del kwargs['instance']
super(MassActionForm, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
"Process form"
if self.instance:
if self.is_valid():
if self.cleaned_data['delete']:
if self.cleaned_data['delete'] == 'delete':
self.instance.delete()
if self.cleaned_data['delete'] == 'trash':
self.instance.trash = True
self.instance.save()
class EventForm(forms.ModelForm):
""" Event form """
def _set_initial(self, field, value):
"Sets initial value"
def __init__(self, user=None, date=None, hour=None, *args, **kwargs):
super(EventForm, self ).__init__(*args, **kwargs)
self.fields['name'].label = _('Title')
self.fields['name'].widget = forms.TextInput(attrs={'size':'30'})
self.fields['location'].queryset = Object.filter_permitted(user, Location.objects, mode='x')
self.fields['location'].widget.attrs.update({'popuplink': reverse('identities_location_add')})
self.fields['location'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_location_lookup')})
self.fields['location'].label = _("Location")
self.fields['start'].label = _("Start")
self.fields['end'].label = _("End")
self.fields['details'].label = _("Details")
if date:
rdate = None
try:
rdate = datetime.datetime.strptime(date, "%Y-%m-%d")
if hour:
hour = int(hour)
else:
hour = 12
rdate = datetime.datetime(year=rdate.year,
month=rdate.month,
day=rdate.day,
hour=hour)
self.fields['end'].initial = rdate
except ValueError:
pass
# Set datepicker
self.fields['start'].widget.attrs.update({'class': 'datetimepicker'})
self.fields['end'].widget.attrs.update({'class': 'datetimepicker'})
if self.fields['start'].initial:
self.fields['start'].widget.attrs.update({'initial': self.fields['start'].initial.strftime('%s')})
if self.fields['end'].initial:
self.fields['end'].widget.attrs.update({'initial': self.fields['end'].initial.strftime('%s')})
if 'instance' in kwargs:
instance = kwargs['instance']
if instance.start:
self.fields['start'].widget.attrs.update({'initial': instance.start.strftime('%s')})
if instance.end:
self.fields['end'].widget.attrs.update({'initial': instance.end.strftime('%s')})
def clean_end(self):
"Make sure end date is greater than start date, when specified"
try:
start = self.cleaned_data['start']
if start:
end = self.cleaned_data['end']
if end < start:
raise forms.ValidationError(_("End date can not be before the start date"))
except:
pass
return self.cleaned_data['end']
class Meta:
"Event"
model = Event
fields = ('name', 'location', 'start', 'end', 'details')
class GoToDateForm(forms.Form):
"""
Go to date form definition
"""
def __init__(self, date, *args, **kwargs):
super(GoToDateForm, self).__init__(*args, **kwargs)
self.fields['goto'] = forms.DateField(label=_("Go to date"), required = False)
self.fields['goto'].widget.attrs.update({'class': 'datepicker'})
class FilterForm(forms.Form):
"""
Filters for Events
"""
def __init__(self, *args, **kwargs):
super(FilterForm, self).__init__(*args, **kwargs)
self.fields['datefrom'] = forms.DateField(label=_("Date From"))
self.fields['datefrom'].widget.attrs.update({'class': 'datepicker'})
self.fields['dateto'] = forms.DateField(label=_("Date To"))
self.fields['dateto'].widget.attrs.update({'class': 'datepicker'})
def clean_dateto(self):
"Clean date_to"
if not self.cleaned_data['dateto'] >= self.cleaned_data['datefrom']:
raise forms.ValidationError("From date can not be greater than To date.")
|
alejo8591/maker
|
events/forms.py
|
Python
|
mit
| 5,514
|
## @file
# Apply fixup to VTF binary image for FFS Raw section
#
# Copyright (c) 2008, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import sys
# VBox - begin
#filename = sys.argv[1] - VBox changed it to:
OnlyPaddImage = (sys.argv[1] == '--only-padd-image');
SrcFilename = sys.argv[1 + OnlyPaddImage]
DstFilename = sys.argv[2 + OnlyPaddImage]
if len(sys.argv) > 3 + OnlyPaddImage:
raise Exception("Too many arguments");
# VBox - end
#if filename.lower().find('ia32') >= 0: - VBox changed it to:
if OnlyPaddImage:
#d = open(sys.argv[1], 'rb').read() - VBox changed it to:
d = open(SrcFilename, 'rb').read()
c = ((len(d) + 4 + 7) & ~7) - 4
if c > len(d):
c -= len(d)
#f = open(sys.argv[1], 'wb') - VBox changed it to:
f = open(DstFilename, 'wb')
f.write('\x90' * c)
f.write(d)
f.close()
else:
from struct import pack
PAGE_PRESENT = 0x01
PAGE_READ_WRITE = 0x02
PAGE_USER_SUPERVISOR = 0x04
PAGE_WRITE_THROUGH = 0x08
PAGE_CACHE_DISABLE = 0x010
PAGE_ACCESSED = 0x020
PAGE_DIRTY = 0x040
PAGE_PAT = 0x080
PAGE_GLOBAL = 0x0100
PAGE_2M_MBO = 0x080
PAGE_2M_PAT = 0x01000
def NopAlign4k(s):
c = ((len(s) + 0xfff) & ~0xfff) - len(s)
return ('\x90' * c) + s
def PageDirectoryEntries4GbOf2MbPages(baseAddress):
s = ''
for i in range(0x800):
i = (
baseAddress + long(i << 21) +
PAGE_2M_MBO +
PAGE_CACHE_DISABLE +
PAGE_ACCESSED +
PAGE_DIRTY +
PAGE_READ_WRITE +
PAGE_PRESENT
)
s += pack('Q', i)
return s
def PageDirectoryPointerTable4GbOf2MbPages(pdeBase):
s = ''
for i in range(0x200):
i = (
pdeBase +
(min(i, 3) << 12) +
PAGE_CACHE_DISABLE +
PAGE_ACCESSED +
PAGE_READ_WRITE +
PAGE_PRESENT
)
s += pack('Q', i)
return s
def PageMapLevel4Table4GbOf2MbPages(pdptBase):
s = ''
for i in range(0x200):
i = (
pdptBase +
(min(i, 0) << 12) +
PAGE_CACHE_DISABLE +
PAGE_ACCESSED +
PAGE_READ_WRITE +
PAGE_PRESENT
)
s += pack('Q', i)
return s
def First4GbPageEntries(topAddress):
PDE = PageDirectoryEntries4GbOf2MbPages(0L)
pml4tBase = topAddress - 0x1000
pdptBase = pml4tBase - 0x1000
pdeBase = pdptBase - len(PDE)
PDPT = PageDirectoryPointerTable4GbOf2MbPages(pdeBase)
PML4T = PageMapLevel4Table4GbOf2MbPages(pdptBase)
return PDE + PDPT + PML4T
def AlignAndAddPageTables():
#d = open(sys.argv[1], 'rb').read() - VBox changed it to:
d = open(SrcFilename, 'rb').read()
code = NopAlign4k(d)
topAddress = 0x100000000 - len(code)
d = ('\x90' * 4) + First4GbPageEntries(topAddress) + code
#f = open(sys.argv[1], 'wb') - VBox changed it to:
f = open(DstFilename, 'wb')
f.write(d)
f.close()
AlignAndAddPageTables()
|
carmark/vbox
|
src/VBox/Devices/EFI/Firmware/UefiCpuPkg/ResetVector/Vtf0/Tools/FixupForRawSection.py
|
Python
|
gpl-2.0
| 3,932
|
#!/usr/bin/env python
# Copyright (c) 2017,2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from mock import MagicMock
import f5_cccl.exceptions as exceptions
import f5_cccl.resource.ltm.monitor.monitor as target
api_monitors_cfg = [
{ "name": "myhttp",
"type": "http",
"send": "GET /\r\n",
"recv": "SERVER" },
{ "name": "my_ping",
"type": "icmp" },
{ "name": "my_tcp",
"type": "tcp" },
{ "name": "myhttp",
"type": "https",
"send": "GET /\r\n",
"recv": "HTTPS-SERVER" }
]
name = "test_monitor"
partition = "Test"
@pytest.fixture
def http_monitor():
return api_monitors_cfg[0]
@pytest.fixture
def icmp_monitor():
return api_monitors_cfg[1]
@pytest.fixture
def tcp_monitor():
return api_monitors_cfg[2]
@pytest.fixture
def https_monitor():
return api_monitors_cfg[3]
def test__eq__():
monitor1 = target.Monitor(name=name, partition=partition)
monitor2 = target.Monitor(name=name, partition=partition)
assert monitor1 == monitor2
def test__init__():
monitor = target.Monitor(name=name, partition=partition)
assert monitor
monitor_data = monitor.data
assert monitor_data
assert monitor_data['interval'] == 5
assert monitor_data['timeout'] == 16
assert not monitor_data.get('send', None)
assert not monitor_data.get('recv', None)
def test__init__xtra_params():
properties = {'foo': 'xtra1', 'send': "GET /\r\n"}
monitor = target.Monitor(name=name,
partition=partition,
**properties)
assert monitor
monitor_data = monitor.data
assert monitor_data
assert monitor_data['interval'] == 5
assert monitor_data['timeout'] == 16
assert monitor_data.get('send',"GET /\r\n")
assert not monitor_data.get('foo', None)
def test__str__():
monitor = target.Monitor(name=name,
partition=partition)
class_str = "<class \'f5_cccl.resource.ltm.monitor.monitor.Monitor\'>"
assert str(monitor) == (
"Monitor(partition: Test, name: test_monitor, type: {})".format(
class_str))
def test_uri_path():
monitor = target.Monitor(name=name,
partition=partition)
with pytest.raises(NotImplementedError):
monitor._uri_path(MagicMock())
def test_invalid_interval_and_timeout():
monitors = list(api_monitors_cfg)
for mon in monitors:
mon['interval'] = 10
mon['timeout'] = 5
with pytest.raises(ValueError):
monitor = target.Monitor(partition=partition, **mon)
|
ryan-talley/f5-cccl
|
f5_cccl/resource/ltm/monitor/test/test_monitor.py
|
Python
|
apache-2.0
| 3,156
|
"""TI Common module."""
# standard library
import logging
import re
from typing import Dict, List, Optional
from urllib.parse import quote
# third-party
import jmespath
from requests import Session
# first-party
from tcex.backports import cached_property
from tcex.exit.error_codes import handle_error
# get tcex logger
logger = logging.getLogger('tcex')
class ThreatIntelUtils:
"""Threat Intelligence Common Methods"""
INDICATOR = 'Indicator'
GROUP = 'Group'
def __init__(self, session_tc: Session) -> None:
"""Initialize class properties."""
self.session_tc = session_tc
# properties
self.log = logger
self._ti = None
@property
def resolvable_variables(self) -> Dict:
"""Return a dict of all the supported resolvable variables.
Each entry in the dict has the variables corresponding url and jmspath for their values
if available. Some variables do not have this data since no api endpoint is supported.
"""
return {
'${API_USERS}': {'url': '/v3/security/users', 'jmspath': 'data[*].userName'},
'${ARTIFACT_TYPES}': {'url': '/v3/artifactTypes', 'jmspath': 'data[*].name'},
'${ATTRIBUTES}': {'url': '/v3/attributeTypes', 'jmspath': 'data[*].name'},
'${GROUP_TYPES}': None, # Special Case since there is no api endpoint
'${INDICATOR_TYPES}': {
'url': '/v2/types/indicatorTypes',
'jmspath': 'data.indicatorType[*].name',
},
'${OWNERS}': {'url': '/v3/security/owners', 'jmspath': 'data[*].name'},
'${USER_GROUPS}': {'url': '/v3/security/userGroups', 'jmspath': 'data[*].name'},
'${USERS}': {'url': '/v3/security/users', 'jmspath': 'data[*].userName'},
'${WORKFLOW_TEMPLATES}': {
'url': '/v3/workflowTemplates?tql=active = true',
'jmspath': 'data[*].name',
},
}
def _association_types(self) -> None:
"""Retrieve Custom Indicator Associations types from the ThreatConnect API."""
@staticmethod
def expand_indicators(indicator: str) -> List[str]:
"""Process indicators expanding file hashes/custom indicators into multiple entries.
Args:
indicator: A " : " delimited string.
Returns:
A list of indicators split on " : ".
"""
if indicator.count(' : ') > 0:
# handle all multi-valued indicators types (file hashes and custom indicators)
indicator_list = []
iregx_pattern = (
# group 1 - lazy capture everything to first <space>:<space> or end of line
r'^(.*?(?=\s\:\s|$))?'
r'(?:\s\:\s)?' # remove <space>:<space>
# group 2 - look behind for <space>:<space>, lazy capture everything
# to look ahead (optional <space>):<space> or end of line
r'((?<=\s\:\s).*?(?=(?:\s)?\:\s|$))?'
r'(?:(?:\s)?\:\s)?' # remove (optional <space>):<space>
# group 3 - look behind for <space>:<space>, lazy capture everything
# to look ahead end of line
r'((?<=\s\:\s).*?(?=$))?$'
)
iregx = re.compile(iregx_pattern)
indicators = iregx.search(indicator)
if indicators is not None:
indicator_list = list(indicators.groups())
else:
# handle all single valued indicator types (address, host, etc)
indicator_list = [indicator]
return indicator_list
@property
def group_types(self) -> List[str]:
"""Return all defined ThreatConnect Group types.
Returns:
A list of ThreatConnect Group types.
"""
return self.group_types_data.keys()
@property
def group_types_data(self) -> Dict[str, dict]:
"""Return supported ThreatConnect Group types."""
return {
'Adversary': {'apiBranch': 'adversaries', 'apiEntity': 'adversary'},
'Attack Pattern': {'apiBranch': 'attackPatterns', 'apiEntity': 'attackPattern'},
'Campaign': {'apiBranch': 'campaigns', 'apiEntity': 'campaign'},
'Course of Action': {'apiBranch': 'coursesOfAction', 'apiEntity': 'courseOfAction'},
'Document': {'apiBranch': 'documents', 'apiEntity': 'document'},
'Email': {'apiBranch': 'emails', 'apiEntity': 'email'},
'Event': {'apiBranch': 'events', 'apiEntity': 'event'},
'Incident': {'apiBranch': 'incidents', 'apiEntity': 'incident'},
'Intrusion Set': {'apiBranch': 'intrusionSets', 'apiEntity': 'intrusionSet'},
'Malware': {'apiBranch': 'malware', 'apiEntity': 'malware'},
'Report': {'apiBranch': 'reports', 'apiEntity': 'report'},
'Signature': {'apiBranch': 'signatures', 'apiEntity': 'signature'},
'Tactic': {'apiBranch': 'tactics', 'apiEntity': 'tactic'},
'Task': {'apiBranch': 'tasks', 'apiEntity': 'task'},
'Threat': {'apiBranch': 'threats', 'apiEntity': 'threat'},
'Tool': {'apiBranch': 'tools', 'apiEntity': 'tool'},
'Vulnerability': {'apiBranch': 'vulnerabilities', 'apiEntity': 'vulnerability'},
}
def get_type_from_api_entity(self, api_entity: dict) -> Optional[str]:
"""Return the object type as a string given a api entity.
Args:
api_entity: A TCEntity object.
Returns:
str, None: The type value or None.
"""
merged = self.group_types_data.copy()
merged.update(self.indicator_types_data)
for key, value in merged.items():
if value.get('apiEntity') == api_entity:
return key
return None
@cached_property
def indicator_associations_types_data(self) -> Dict[str, dict]:
"""Return ThreatConnect associations type data.
Retrieve the data from the API if it hasn't already been retrieved.
Returns:
(dict): A dictionary of ThreatConnect associations types.
"""
_association_types = {}
# Dynamically create custom indicator class
r = self.session_tc.get('/v2/types/associationTypes')
# check for bad status code and response that is not JSON
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
self.log.warning(
'feature=threat-intel-common, event=association-types-download, status=failure'
)
return _association_types
# validate successful API results
data: dict = r.json()
if data.get('status') != 'Success':
self.log.warning(
'feature=threat-intel-common, event=association-types-download, status=failure'
)
return _association_types
# TODO: [low] make an Model for this data and return model?
try:
# Association Type Name is not a unique value at this time, but should be.
association_types = {}
for association in data.get('data', {}).get('associationType', []):
association_types[association.get('name')] = association
except Exception as e:
handle_error(code=200, message_values=[e])
return _association_types
@cached_property
def indicator_types(self) -> List[str]:
"""Return ThreatConnect Indicator types.
Retrieve the data from the API if it hasn't already been retrieved.
Returns:
(list): A list of ThreatConnect Indicator types.
"""
return list(self.indicator_types_data.keys())
def resolve_variables(self, inputs: List[str]) -> List[str]:
"""Resolve all of the provided inputs if appropriate.
Args:
inputs: A list of strings to resolve if string matches a entry in the
"resolved_variables" dict.
"""
resolved_inputs = []
for input_ in inputs:
if not input_:
resolved_inputs.append(None)
continue
if input_.strip() not in self.resolvable_variables:
resolved_inputs.append(input_)
continue
input_ = input_.strip()
if input_ == '${GROUP_TYPES}':
for type_ in self.group_types:
resolved_inputs.append(type_)
continue
resolvable_variable_details = self.resolvable_variables[input_]
r = self.session_tc.get(resolvable_variable_details.get('url'))
if not r.ok:
raise RuntimeError('Could not retrieve indicator types from ThreatConnect API.')
json_ = r.json()
# No TQL filter to filter out API users during REST call so have to do it manually here.
if input_ in ['${API_USERS}', '${USERS}']:
temp_data = []
for item in json_.get('data', []):
if item.get('role') == 'Api User' and input_ == '${API_USERS}':
temp_data.append(item)
elif item.get('role') != 'Api User' and input_ == '${USERS}':
temp_data.append(item)
json_['data'] = temp_data
for item in jmespath.search(resolvable_variable_details.get('jmspath'), json_):
resolved_inputs.append(str(item))
return resolved_inputs
@cached_property
def indicator_types_data(self) -> Dict[str, dict]:
"""Return ThreatConnect indicator types data.
Retrieve the data from the API if it hasn't already been retrieved.
Returns:
(dict): A dictionary of ThreatConnect Indicator data.
"""
# retrieve data from API
r = self.session_tc.get('/v2/types/indicatorTypes')
# TODO: [low] use handle error instead
if not r.ok:
raise RuntimeError('Could not retrieve indicator types from ThreatConnect API.')
_indicator_types = {}
for itd in r.json().get('data', {}).get('indicatorType'):
_indicator_types[itd.get('name')] = itd
return _indicator_types
@staticmethod
def safe_indicator(indicator: str) -> str:
"""Format indicator value for safe HTTP request.
Args:
indicator: Indicator to URL Encode
Returns:
(str): The urlencoded string
"""
if indicator is not None:
indicator = quote(indicator, safe='~')
return indicator
@staticmethod
def safe_rt(resource_type: str, lower: Optional[bool] = False) -> str:
"""Format the Resource Type.
Takes Custom Indicator types with a space character and return a *safe* string.
(e.g. *User Agent* is converted to User_Agent or user_agent.)
Args:
resource_type: The resource type to format.
lower: Return type in all lower case
Returns:
(str): The formatted resource type.
"""
if resource_type is not None:
resource_type = resource_type.replace(' ', '_')
if lower:
resource_type = resource_type.lower()
return resource_type
@staticmethod
def safe_group_name(
group_name: str, group_max_length: Optional[int] = 100, ellipsis: Optional[bool] = True
) -> str:
"""Truncate group name to match limit breaking on space and optionally add an ellipsis.
.. note:: Currently the ThreatConnect group name limit is 100 characters.
Args:
group_name: The raw group name to be truncated.
group_max_length: The max length of the group name.
ellipsis: If true the truncated name will have '...' appended.
Returns:
(str): The truncated group name with optional ellipsis.
"""
ellipsis_value = ''
if ellipsis:
ellipsis_value = ' ...'
if group_name is not None and len(group_name) > group_max_length:
# split name by spaces and reset group_name
group_name_array = group_name.split(' ')
group_name = ''
for word in group_name_array:
word = f'{word}'
if (len(group_name) + len(word) + len(ellipsis_value)) >= group_max_length:
group_name = f'{group_name}{ellipsis_value}'
group_name = group_name.lstrip(' ')
break
group_name += f' {word}'
return group_name
@staticmethod
def safe_tag(tag: str) -> str:
"""Encode and truncate tag to match limit (128 characters) of ThreatConnect API.
Args:
tag: The tag to be truncated
Returns:
(str): The truncated and quoted tag.
"""
if tag is not None:
tag = quote(tag[:128], safe='~')
return tag
@staticmethod
def safe_url(url: str) -> str:
"""Encode value for safe HTTP request.
Args:
url (str): The string to URL Encode.
Returns:
(str): The urlencoded string.
"""
if url is not None:
url: str = quote(url, safe='~')
return url
@property
def victim_asset_types(self) -> list:
"""Return all defined ThreatConnect Asset types.
Returns:
(list): A list of ThreatConnect Asset types.
"""
return [
'EmailAddress',
'SocialNetwork',
'NetworkAccount',
'WebSite',
'Phone',
]
def validate_intel_types(self, types_list: List[str], restrict_to: Optional[str] = None):
"""Validate that Types contained in types_list are valid Intel Types.
:param types_list: list of types to validate. An exception is raised if a member of this
list is not a valid intel type.
:param restrict_to: If None, types_list will be validated to contain valid Indicator
or Group types. If not None, this value must be set to self.INDICATOR or self.GROUP,
and types_list will be validated to contain only Indicators or Groups depending on this
value.
"""
if restrict_to is not None and restrict_to not in [self.INDICATOR, self.GROUP]:
raise ValueError(f'restrict_to must be {self.INDICATOR} or {self.GROUP}')
if not isinstance(types_list, list):
raise TypeError('types_list must be a a list.')
if restrict_to:
valid_types = self.group_types if restrict_to == self.GROUP else self.indicator_types
else:
valid_types = self.indicator_types + self.group_types
if any(_type not in valid_types for _type in types_list):
raise ValueError(f'Type list "{types_list}" contains invalid Intel Type.')
|
ThreatConnect-Inc/tcex
|
tcex/api/tc/utils/threat_intel_utils.py
|
Python
|
apache-2.0
| 14,975
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.core import exceptions
from telemetry import decorators
from telemetry.page import action_runner as action_runner_module
class TestNotSupportedOnPlatformError(Exception):
"""PageTest Exception raised when a required feature is unavailable.
The feature required to run the test could be part of the platform,
hardware configuration, or browser.
"""
class MultiTabTestAppCrashError(Exception):
"""PageTest Exception raised after browser or tab crash for multi-tab tests.
Used to abort the test rather than try to recover from an unknown state.
"""
class Failure(Exception):
"""PageTest Exception raised when an undesired but designed-for problem."""
class MeasurementFailure(Failure):
"""PageTest Exception raised when an undesired but designed-for problem."""
class PageTest(object):
"""A class styled on unittest.TestCase for creating page-specific tests.
Test should override ValidateAndMeasurePage to perform test
validation and page measurement as necessary.
class BodyChildElementMeasurement(PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
body_child_count = tab.EvaluateJavaScript(
'document.body.children.length')
results.AddValue(scalar.ScalarValue(
page, 'body_children', 'count', body_child_count))
Args:
discard_first_run: Discard the first run of this page. This is
usually used with page_repeat and pageset_repeat options.
"""
def __init__(self,
needs_browser_restart_after_each_page=False,
discard_first_result=False,
clear_cache_before_each_run=False):
super(PageTest, self).__init__()
self.options = None
self._needs_browser_restart_after_each_page = (
needs_browser_restart_after_each_page)
self._discard_first_result = discard_first_result
self._clear_cache_before_each_run = clear_cache_before_each_run
self._close_tabs_before_run = True
@property
def is_multi_tab_test(self):
"""Returns True if the test opens multiple tabs.
If the test overrides TabForPage, it is deemed a multi-tab test.
Multi-tab tests do not retry after tab or browser crashes, whereas,
single-tab tests too. That is because the state of multi-tab tests
(e.g., how many tabs are open, etc.) is unknown after crashes.
"""
return self.TabForPage.__func__ is not PageTest.TabForPage.__func__
@property
def discard_first_result(self):
"""When set to True, the first run of the test is discarded. This is
useful for cases where it's desirable to have some test resource cached so
the first run of the test can warm things up. """
return self._discard_first_result
@discard_first_result.setter
def discard_first_result(self, discard):
self._discard_first_result = discard
@property
def clear_cache_before_each_run(self):
"""When set to True, the browser's disk and memory cache will be cleared
before each run."""
return self._clear_cache_before_each_run
@property
def close_tabs_before_run(self):
"""When set to True, all tabs are closed before running the test for the
first time."""
return self._close_tabs_before_run
@close_tabs_before_run.setter
def close_tabs_before_run(self, close_tabs):
self._close_tabs_before_run = close_tabs
def RestartBrowserBeforeEachPage(self):
""" Should the browser be restarted for the page?
This returns true if the test needs to unconditionally restart the
browser for each page. It may be called before the browser is started.
"""
return self._needs_browser_restart_after_each_page
def StopBrowserAfterPage(self, browser, page): # pylint: disable=W0613
"""Should the browser be stopped after the page is run?
This is called after a page is run to decide whether the browser needs to
be stopped to clean up its state. If it is stopped, then it will be
restarted to run the next page.
A test that overrides this can look at both the page and the browser to
decide whether it needs to stop the browser.
"""
return False
def CustomizeBrowserOptions(self, options):
"""Override to add test-specific options to the BrowserOptions object"""
def WillStartBrowser(self, platform):
"""Override to manipulate the browser environment before it launches."""
def DidStartBrowser(self, browser):
"""Override to customize the browser right after it has launched."""
def SetOptions(self, options):
"""Sets the BrowserFinderOptions instance to use."""
self.options = options
def WillNavigateToPage(self, page, tab):
"""Override to do operations before the page is navigated, notably Telemetry
will already have performed the following operations on the browser before
calling this function:
* Ensure only one tab is open.
* Call WaitForDocumentReadyStateToComplete on the tab."""
def DidNavigateToPage(self, page, tab):
"""Override to do operations right after the page is navigated and after
all waiting for completion has occurred."""
@decorators.Deprecated(
2015, 8, 10, 'This hook is deprecated. Please use DidRunPage hook instead')
def CleanUpAfterPage(self, page, tab):
"""Called after the test run method was run, even if it failed."""
def DidRunPage(self, platform):
"""Called after the test run method was run, even if it failed."""
def TabForPage(self, page, browser): # pylint: disable=W0613
"""Override to select a different tab for the page. For instance, to
create a new tab for every page, return browser.tabs.New()."""
try:
return browser.tabs[0]
# The tab may have gone away in some case, so we create a new tab and retry
# (See crbug.com/496280)
except exceptions.DevtoolsTargetCrashException as e:
logging.error('Tab may have crashed: %s' % str(e))
browser.tabs.New()
# See comment in shared_page_state.WillRunStory for why this waiting
# is needed.
browser.tabs[0].WaitForDocumentReadyStateToBeComplete()
return browser.tabs[0]
def ValidateAndMeasurePage(self, page, tab, results):
"""Override to check test assertions and perform measurement.
When adding measurement results, call results.AddValue(...) for
each result. Raise an exception or add a failure.FailureValue on
failure. page_test.py also provides several base exception classes
to use.
Prefer metric value names that are in accordance with python
variable style. e.g., metric_name. The name 'url' must not be used.
Put together:
def ValidateAndMeasurePage(self, page, tab, results):
res = tab.EvaluateJavaScript('2+2')
if res != 4:
raise Exception('Oh, wow.')
results.AddValue(scalar.ScalarValue(
page, 'two_plus_two', 'count', res))
Args:
page: A telemetry.page.Page instance.
tab: A telemetry.core.Tab instance.
results: A telemetry.results.PageTestResults instance.
"""
raise NotImplementedError
# Deprecated: do not use this hook. (crbug.com/470147)
def RunNavigateSteps(self, page, tab):
"""Navigates the tab to the page URL attribute.
Runs the 'navigate_steps' page attribute as a compound action.
"""
action_runner = action_runner_module.ActionRunner(
tab, skip_waits=page.skip_waits)
page.RunNavigateSteps(action_runner)
|
lihui7115/ChromiumGStreamerBackend
|
tools/telemetry/telemetry/page/page_test.py
|
Python
|
bsd-3-clause
| 7,601
|
# -*- coding: utf-8 -*-
from django import forms
from django_countries.fields import countries
from api.models import Event
from api.models.events import EventTheme, EventAudience
class AddEventForm(forms.ModelForm):
email_errors = {
'required': u'Please enter a valid email, so we can contact you in case of questions.',
'invalid': u'Can you please check if this is a valid email address?',
}
user_email = forms.EmailField(
required=True,
label='Your contact email',
widget=forms.EmailInput(
attrs={
"class": "form-control"}),
error_messages=email_errors,
)
class Meta:
model = Event
fields = ['title',
'organizer',
'description',
'geoposition',
'location',
'country',
'start_date',
'end_date',
'event_url',
'contact_person',
'audience',
'theme',
'picture',
'tags',
'user_email',
]
widgets = {
'title': forms.TextInput(attrs={"class": "form-control",
"placeholder": "How do you call this event?"}),
'organizer': forms.TextInput(attrs={"class": "form-control",
"placeholder": "Who is organizing this event?"}),
'description': forms.Textarea(attrs={"class": "form-control",
"placeholder": "Tell us a bit about your event."}),
'location': forms.TextInput(attrs={"id": "autocomplete", "class": "form-control",
"placeholder": "Where will the event be taking place?", }),
'start_date': forms.TextInput(attrs={"id": "id_datepicker_start", "class": "form-control",
"autocomplete": "off",
"placeholder": "When does it start?"}),
'end_date': forms.TextInput(attrs={"id": "id_datepicker_end", "class": "form-control",
"autocomplete": "off", "placeholder": "When does it end?"}),
'event_url': forms.TextInput(attrs={"class": "form-control",
"placeholder": "Do you have a website with more information?"}),
'contact_person': forms.TextInput(attrs={"class": "form-control",
"placeholder": "Would you like to display a contact email?"}),
'audience': forms.CheckboxSelectMultiple(),
'theme': forms.CheckboxSelectMultiple(),
'tags': forms.TextInput(attrs={"class": "form-control",
"placeholder": "example: Python, Django, Slovenia"}),
}
labels = {
'title': 'Event title',
'organizer': 'Organizer(s)',
'description': 'Description',
'location': 'Location',
'country': 'Country',
'start_date': 'Start date',
'end_date': 'End date',
'event_url': 'Website',
'contact_person': 'Contact',
'picture': 'Image',
'audience': 'Audience',
'theme': 'Theme',
'tags': 'Tags',
}
help_texts = {
'start_date': "Example: YYYY/MM/DD h:m",
'end_date': "Example: YYYY/MM/DD h:m",
'audience': "Who is the event for?",
'theme': "Which aspect of coding will your event cover?",
'picture': 'Larger images will be resized to 256 x 512 pixels. Maximum upload size is 256 x 1024.',
}
error_messages = {
'title': {
'required': u'Please enter a title for your event.',
'invalid': u'Can you please check if this is a valid title?',
},
'organizer': {
'required': u'Please enter an organizer.',
'invalid': u'Can you please check if this is a valid organizer?',
},
'description': {
'required': u'Please write a short description of what the event is about.',
'invalid': u'Please check if the description only contains regular text.',
},
'geoposition': {
'invalid': u'Please enter valid coordinates.'
},
'location': {
'required': u'Please enter a location.',
'invalid': u'Please check your event\'s location',
},
'country': {
'required': u'The event\'s location should be in Europe.',
'invalid': u'Make sure the event country is written in English.',
},
'start_date': {
'required': u'Please enter a valid date and time (example: 2014-10-22 18:00).',
'invalid': u'This doesn\'t seem like a valid date and time. Can you check, please?',
},
'end_date': {
'required': u'Please enter a valid date and time (example: 2014-10-22 20:00).',
'invalid': u'This doesn\'t seem like a valid date and time. Can you check, please?',
},
'event_url': {
'invalid': u'Please enter a valid web address starting with http://',
},
'contact_person': {
'invalid': u'Please enter a valid email address.',
},
'picture': {
'invalid': u'Make sure this is a valid image.',
},
'audience': {
'required': u'If unsure, choose Other and provide more information in the description.',
'invalid': u'Choose one or more of the provided choices.',
},
'theme': {
'required': u'If unsure, choose Other and provide more information in the description.',
'invalid': u'Choose one or more of the provided choices.',
},
'tags': {
'required': u'Please type in some tags to categorize the event',
'invalid': u'Please enter tags in plain text, separated by commas.',
},
}
def clean(self):
cleaned_data = super(AddEventForm, self).clean()
start_date = cleaned_data.get('start_date')
end_date = cleaned_data.get('end_date')
if start_date and end_date and end_date < start_date:
msg = u'End date should be greater than start date.'
self._errors['end_date'] = self.error_class([msg])
return cleaned_data
def __init__(self, *args, **kwargs):
super(AddEventForm, self).__init__(*args, **kwargs)
class ReportEventForm(forms.ModelForm):
error_css_class = 'has-error'
required_css_class = 'required'
class Meta:
model = Event
fields = ['participants_count',
'average_participant_age',
'percentage_of_females',
'codeweek_for_all_participation_code',
'name_for_certificate',
]
help_texts = {
'codeweek_for_all_participation_code': "Optional. You can put here your Codeweek4All challenge code, if you got one. If you're not participating, just ignore this field.",
'percentage_of_females': "Required. Please provide a rough estimate, even if you don't have exact data. A number from 0 to 100.",
'average_participant_age': "Required. Please provide a rough estimate, even if you don't have exact data. A number greater than 0.",
'participants_count': "Required. Please provide a rough estimate, even if you don't have exact data. A number greater than 0.",
'name_for_certificate': "Required. Change this to the name of the event organizer who will be issued a certificate of participation in Code Week. Please use only ASCII (Latin) letters. Letters with accents, umlauts and the like are not supported.",
}
def __init__(self, *args, **kwargs):
super(ReportEventForm, self).__init__(*args, **kwargs)
# Mark all fields as required by default
for field_name in self.fields:
self.fields[field_name].required = True
# Optional fields
self.fields['codeweek_for_all_participation_code'].required = False
class SearchEventForm(forms.Form):
countries._countries.append(Event.CUSTOM_COUNTRY_ENTRIES[0])
countries._countries.append(Event.CUSTOM_COUNTRY_ENTRIES[1])
# XK is temp code for Kosovo; remove from COUNTRIES_OVERRIDE when
# Kosovo gets its own ISO code and is added to django-countries
if not 'Kosovo' in list(dict(countries._countries).values()):
countries._countries.append((u'XK', u'Kosovo'))
q = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'placeholder': 'Search for event name or tag',
'class': 'form-control'}))
past = forms.ChoiceField(
label='Include past events',
required=False,
choices=(('yes', 'yes'), ('no', 'no')),
widget=forms.RadioSelect(attrs={'class': 'search-form-element'}),
)
country = forms.ChoiceField(
label='Select country',
required=False,
widget=forms.Select(attrs={'class': 'search-form-element'}),
choices=countries
)
theme = forms.ModelMultipleChoiceField(
queryset=EventTheme.objects.all(),
label='Theme',
required=False,
widget=forms.CheckboxSelectMultiple(
attrs={
'class': 'search-form-element'}),
)
audience = forms.ModelMultipleChoiceField(
queryset=EventAudience.objects.all(),
label='Audience',
required=False,
widget=forms.CheckboxSelectMultiple(
attrs={
'class': 'search-form-element'}),
)
def __init__(self, *args, **kwargs):
country_code = kwargs.pop('country_code', None)
past_events = kwargs.pop('past_events')
search_query = kwargs.pop('search', None)
theme = kwargs.pop('theme', None)
audience = kwargs.pop('audience', None)
super(SearchEventForm, self).__init__(*args, **kwargs)
if country_code:
self.fields['country'].initial = country_code
self.fields['past'].initial = past_events
if search_query:
self.fields['q'].initial = search_query
if theme:
self.fields['theme'].initial = theme
if audience:
self.fields['audience'].initial = audience
|
codeeu/coding-events
|
web/forms/event_form.py
|
Python
|
mit
| 10,889
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# cleanexe - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.cleanexe import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/cleanexe.py
|
Python
|
gpl-2.0
| 1,104
|
"""
Some analysis of the gift file:
we begin by creating a list of the volumes of each gift, output to the file 'presentVolumes.csv'
as a file with each volume on a separate line
input is the presents.csv file from the challenge website
Tim Dellinger
Kaggle Santa Challenge, Christmas 2013
"""
from numpy import product, array
import csv
# first we read presents.csv into memory as a list of lists: [ [length1,width1,height1], ... ]
# I like the list instead of a dictionary since it preserves the original order
giftDimensions = []
with open('presents.csv', 'rb') as f:
f.readline() # skip the header
fcsv = csv.reader(f)
for row in fcsv:
giftDimensions.append(map(int,row[1:])) # strip the first element (presentID), keep the dimensions as ints
volumes = []
# volume is simply length * width * height)
for dims in giftDimensions:
volumes.append( product(dims))
with open('betterPresentVolumes.csv','w') as f:
for v in volumes:
f.write (str(v)+'\n') # each line is a volume, then a newline character
# if you have a peek at the output file, the last volume will have a newline
# so you'll see a blank line afterwards. This is actually useful since all
# lines will end with a newline character (including the last), so the last
# line won't have to be treated differently than the precending lines
|
timdellinger/kaggle-xmas-2013
|
calculateVolumes.py
|
Python
|
unlicense
| 1,516
|
# -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
m = pytest.importorskip("pybind11_tests.virtual_functions")
from pybind11_tests import ConstructorStats # noqa: E402
def test_override(capture, msg):
class ExtendedExampleVirt(m.ExampleVirt):
def __init__(self, state):
super(ExtendedExampleVirt, self).__init__(state + 1)
self.data = "Hello world"
def run(self, value):
print('ExtendedExampleVirt::run(%i), calling parent..' % value)
return super(ExtendedExampleVirt, self).run(value + 1)
def run_bool(self):
print('ExtendedExampleVirt::run_bool()')
return False
def get_string1(self):
return "override1"
def pure_virtual(self):
print('ExtendedExampleVirt::pure_virtual(): %s' % self.data)
class ExtendedExampleVirt2(ExtendedExampleVirt):
def __init__(self, state):
super(ExtendedExampleVirt2, self).__init__(state + 1)
def get_string2(self):
return "override2"
ex12 = m.ExampleVirt(10)
with capture:
assert m.runExampleVirt(ex12, 20) == 30
assert capture == """
Original implementation of ExampleVirt::run(state=10, value=20, str1=default1, str2=default2)
""" # noqa: E501 line too long
with pytest.raises(RuntimeError) as excinfo:
m.runExampleVirtVirtual(ex12)
assert msg(excinfo.value) == 'Tried to call pure virtual function "ExampleVirt::pure_virtual"'
ex12p = ExtendedExampleVirt(10)
with capture:
assert m.runExampleVirt(ex12p, 20) == 32
assert capture == """
ExtendedExampleVirt::run(20), calling parent..
Original implementation of ExampleVirt::run(state=11, value=21, str1=override1, str2=default2)
""" # noqa: E501 line too long
with capture:
assert m.runExampleVirtBool(ex12p) is False
assert capture == "ExtendedExampleVirt::run_bool()"
with capture:
m.runExampleVirtVirtual(ex12p)
assert capture == "ExtendedExampleVirt::pure_virtual(): Hello world"
ex12p2 = ExtendedExampleVirt2(15)
with capture:
assert m.runExampleVirt(ex12p2, 50) == 68
assert capture == """
ExtendedExampleVirt::run(50), calling parent..
Original implementation of ExampleVirt::run(state=17, value=51, str1=override1, str2=override2)
""" # noqa: E501 line too long
cstats = ConstructorStats.get(m.ExampleVirt)
assert cstats.alive() == 3
del ex12, ex12p, ex12p2
assert cstats.alive() == 0
assert cstats.values() == ['10', '11', '17']
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 0
def test_alias_delay_initialization1(capture):
"""`A` only initializes its trampoline class when we inherit from it
If we just create and use an A instance directly, the trampoline initialization is
bypassed and we only initialize an A() instead (for performance reasons).
"""
class B(m.A):
def __init__(self):
super(B, self).__init__()
def f(self):
print("In python f()")
# C++ version
with capture:
a = m.A()
m.call_f(a)
del a
pytest.gc_collect()
assert capture == "A.f()"
# Python version
with capture:
b = B()
m.call_f(b)
del b
pytest.gc_collect()
assert capture == """
PyA.PyA()
PyA.f()
In python f()
PyA.~PyA()
"""
def test_alias_delay_initialization2(capture):
"""`A2`, unlike the above, is configured to always initialize the alias
While the extra initialization and extra class layer has small virtual dispatch
performance penalty, it also allows us to do more things with the trampoline
class such as defining local variables and performing construction/destruction.
"""
class B2(m.A2):
def __init__(self):
super(B2, self).__init__()
def f(self):
print("In python B2.f()")
# No python subclass version
with capture:
a2 = m.A2()
m.call_f(a2)
del a2
pytest.gc_collect()
a3 = m.A2(1)
m.call_f(a3)
del a3
pytest.gc_collect()
assert capture == """
PyA2.PyA2()
PyA2.f()
A2.f()
PyA2.~PyA2()
PyA2.PyA2()
PyA2.f()
A2.f()
PyA2.~PyA2()
"""
# Python subclass version
with capture:
b2 = B2()
m.call_f(b2)
del b2
pytest.gc_collect()
assert capture == """
PyA2.PyA2()
PyA2.f()
In python B2.f()
PyA2.~PyA2()
"""
# PyPy: Reference count > 1 causes call with noncopyable instance
# to fail in ncv1.print_nc()
@pytest.mark.xfail("env.PYPY")
@pytest.mark.skipif(not hasattr(m, "NCVirt"), reason="NCVirt test broken on ICPC")
def test_move_support():
class NCVirtExt(m.NCVirt):
def get_noncopyable(self, a, b):
# Constructs and returns a new instance:
nc = m.NonCopyable(a * a, b * b)
return nc
def get_movable(self, a, b):
# Return a referenced copy
self.movable = m.Movable(a, b)
return self.movable
class NCVirtExt2(m.NCVirt):
def get_noncopyable(self, a, b):
# Keep a reference: this is going to throw an exception
self.nc = m.NonCopyable(a, b)
return self.nc
def get_movable(self, a, b):
# Return a new instance without storing it
return m.Movable(a, b)
ncv1 = NCVirtExt()
assert ncv1.print_nc(2, 3) == "36"
assert ncv1.print_movable(4, 5) == "9"
ncv2 = NCVirtExt2()
assert ncv2.print_movable(7, 7) == "14"
# Don't check the exception message here because it differs under debug/non-debug mode
with pytest.raises(RuntimeError):
ncv2.print_nc(9, 9)
nc_stats = ConstructorStats.get(m.NonCopyable)
mv_stats = ConstructorStats.get(m.Movable)
assert nc_stats.alive() == 1
assert mv_stats.alive() == 1
del ncv1, ncv2
assert nc_stats.alive() == 0
assert mv_stats.alive() == 0
assert nc_stats.values() == ['4', '9', '9', '9']
assert mv_stats.values() == ['4', '5', '7', '7']
assert nc_stats.copy_constructions == 0
assert mv_stats.copy_constructions == 1
assert nc_stats.move_constructions >= 0
assert mv_stats.move_constructions >= 0
def test_dispatch_issue(msg):
"""#159: virtual function dispatch has problems with similar-named functions"""
class PyClass1(m.DispatchIssue):
def dispatch(self):
return "Yay.."
class PyClass2(m.DispatchIssue):
def dispatch(self):
with pytest.raises(RuntimeError) as excinfo:
super(PyClass2, self).dispatch()
assert msg(excinfo.value) == 'Tried to call pure virtual function "Base::dispatch"'
p = PyClass1()
return m.dispatch_issue_go(p)
b = PyClass2()
assert m.dispatch_issue_go(b) == "Yay.."
def test_override_ref():
"""#392/397: overriding reference-returning functions"""
o = m.OverrideTest("asdf")
# Not allowed (see associated .cpp comment)
# i = o.str_ref()
# assert o.str_ref() == "asdf"
assert o.str_value() == "asdf"
assert o.A_value().value == "hi"
a = o.A_ref()
assert a.value == "hi"
a.value = "bye"
assert a.value == "bye"
def test_inherited_virtuals():
class AR(m.A_Repeat):
def unlucky_number(self):
return 99
class AT(m.A_Tpl):
def unlucky_number(self):
return 999
obj = AR()
assert obj.say_something(3) == "hihihi"
assert obj.unlucky_number() == 99
assert obj.say_everything() == "hi 99"
obj = AT()
assert obj.say_something(3) == "hihihi"
assert obj.unlucky_number() == 999
assert obj.say_everything() == "hi 999"
for obj in [m.B_Repeat(), m.B_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 13
assert obj.lucky_number() == 7.0
assert obj.say_everything() == "B says hi 1 times 13"
for obj in [m.C_Repeat(), m.C_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
class CR(m.C_Repeat):
def lucky_number(self):
return m.C_Repeat.lucky_number(self) + 1.25
obj = CR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 889.25
assert obj.say_everything() == "B says hi 1 times 4444"
class CT(m.C_Tpl):
pass
obj = CT()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
class CCR(CR):
def lucky_number(self):
return CR.lucky_number(self) * 10
obj = CCR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 8892.5
assert obj.say_everything() == "B says hi 1 times 4444"
class CCT(CT):
def lucky_number(self):
return CT.lucky_number(self) * 1000
obj = CCT()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888000.0
assert obj.say_everything() == "B says hi 1 times 4444"
class DR(m.D_Repeat):
def unlucky_number(self):
return 123
def lucky_number(self):
return 42.0
for obj in [m.D_Repeat(), m.D_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
obj = DR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 123
assert obj.lucky_number() == 42.0
assert obj.say_everything() == "B says hi 1 times 123"
class DT(m.D_Tpl):
def say_something(self, times):
return "DT says:" + (' quack' * times)
def unlucky_number(self):
return 1234
def lucky_number(self):
return -4.25
obj = DT()
assert obj.say_something(3) == "DT says: quack quack quack"
assert obj.unlucky_number() == 1234
assert obj.lucky_number() == -4.25
assert obj.say_everything() == "DT says: quack 1234"
class DT2(DT):
def say_something(self, times):
return "DT2: " + ('QUACK' * times)
def unlucky_number(self):
return -3
class BT(m.B_Tpl):
def say_something(self, times):
return "BT" * times
def unlucky_number(self):
return -7
def lucky_number(self):
return -1.375
obj = BT()
assert obj.say_something(3) == "BTBTBT"
assert obj.unlucky_number() == -7
assert obj.lucky_number() == -1.375
assert obj.say_everything() == "BT -7"
def test_issue_1454():
# Fix issue #1454 (crash when acquiring/releasing GIL on another thread in Python 2.7)
m.test_gil()
m.test_gil_from_thread()
|
google-research/motion_imitation
|
third_party/unitree_legged_sdk/pybind11/tests/test_virtual_functions.py
|
Python
|
apache-2.0
| 11,417
|
#!/usr/bin/env python
#
# mock_data.py: utility classes and functions for generating test data
# Copyright (C) University of Manchester 2014 Peter Briggs
#
########################################################################
#
# mock_utils.py
#
#########################################################################
"""mock_utils
Utility classes and functions for generating test data and directory
structures, intended to be used in unit tests.
* TestUtils provides a set of class methods for creating arbitrary files,
directories and symlinks.
* BaseExampleDir can be used as a base class for building and interrogating
disposable example directories.
* ExampleDirScooby, ExampleDirSpiders and ExampleDirLanguages are
classes that can be used to make instant test directory structures of
varying complexity.
"""
#######################################################################
# Import modules that this module depends on
#######################################################################
import os
import tempfile
import shutil
import copy
import bcftbx.Md5sum
#######################################################################
# Modules constants
#######################################################################
class TestUtils:
"""Utilities to help with setting up/running tests etc
"""
@classmethod
def make_file(self,filename,text,basedir=None):
"""Create test file
"""
if filename is None:
# mkstemp returns a tuple
tmpfile = tempfile.mkstemp(dir=basedir,text=True)
filename = tmpfile[1]
elif basedir is not None:
filename = os.path.join(basedir,filename)
fp = open(filename,'w')
fp.write(text)
fp.close()
return filename
@classmethod
def make_dir(self,dirname=None):
"""Create test directory
"""
if dirname is None:
dirname = tempfile.mkdtemp()
else:
os.mkdir(dirname)
return dirname
@classmethod
def make_sub_dir(self,basedir,dirname):
"""Create a subdirectory in an existing directory
"""
subdir = os.path.join(basedir,dirname)
if not os.path.exists(subdir):
os.makedirs(subdir)
return subdir
@classmethod
def make_sym_link(self,target,link_name=None,basedir=None):
"""Create a symbolic link
"""
if link_name is None:
link_name = os.path.basename(target)
if basedir is not None:
link_name = os.path.join(basedir,link_name)
os.symlink(target,link_name)
return link_name
@classmethod
def remove_dir(self,dirname):
"""Remove directory
"""
shutil.rmtree(dirname)
# Base class for making test data directories
class BaseExampleDir:
"""Base class for making test data directories
Create, populate and destroy directory with test data.
Typically you should subclass the BaseExampleDir and then
use method calls to add files, links and directories. For
example:
>>> class MyExampleDir(BaseExampleDir):
>>> def __init__(self):
>>> BaseExampleDir.__init__(self)
>>> self.add_file("Test","This is a test file")
>>>
Then to use in a program or unit test method:
>>> d = MyExampleDir()
>>> d.create_directory()
>>> # do stuff
>>> d.delete_directory()
There are also methods to get information about the directory
structure, for example:
>>> files = d.filelist() # List all files and links, return full paths
Paths are implicitly relative to the base directory, which
is a temporary directory created automatically when the
'create_directory' method is invoked.
"""
def __init__(self):
self.dirn = None
self.files = []
self.content = {}
self.links = []
self.targets = {}
self.dirs = []
def add_dir(self,path):
if path not in self.dirs:
dirpath = path
while dirpath:
if dirpath not in self.dirs:
self.dirs.append(dirpath)
dirpath = os.path.dirname(dirpath)
if self.dirn is not None:
TestUtils.make_sub_dir(self.dirn,path)
def add_file(self,path,content=''):
self.files.append(path)
self.content[path] = content
self.add_dir(os.path.dirname(path))
if self.dirn is not None:
TestUtils.make_file(path,self.content[path],basedir=self.dirn)
def add_link(self,path,target=None):
self.links.append(path)
self.targets[path] = target
self.add_dir(os.path.dirname(path))
if self.dirn is not None:
TestUtils.make_sym_link(self.targets[path],path,basedir=self.dirn)
def path(self,filen):
if self.dirn is not None:
return os.path.join(self.dirn,filen)
else:
return filen
def filelist(self,include_links=True,include_dirs=False,full_path=True):
filelist = copy.copy(self.files)
if include_links:
for link in self.links:
resolved_link = os.path.join(os.path.dirname(self.path(link)),
os.readlink(self.path(link)))
if not os.path.isdir(resolved_link) or include_dirs:
filelist.append(link)
if include_dirs:
filelist.extend(copy.copy(self.dirs))
filelist.sort()
if full_path:
filelist = [self.path(x) for x in filelist]
return filelist
def create_directory(self,dirname=None):
self.dirn = TestUtils.make_dir(dirname=dirname)
for d in self.dirs:
TestUtils.make_sub_dir(self.dirn,d)
for f in self.files:
TestUtils.make_file(f,self.content[f],basedir=self.dirn)
for l in self.links:
TestUtils.make_sym_link(self.targets[l],l,basedir=self.dirn)
return self.dirn
def delete_directory(self):
if self.dirn is not None:
shutil.rmtree(self.dirn)
self.dirn = None
def checksum_for_file(self,path):
"""
"""
return bcftbx.Md5sum.md5sum(self.path(path))
class ExampleDirScooby(BaseExampleDir):
"""Small test data directory with files and subdirectories
"""
def __init__(self):
BaseExampleDir.__init__(self)
self.add_file("test.txt","This is a test file")
self.add_file("fred/test.txt","This is another test file")
self.add_file("daphne/test.txt","This is another test file")
self.add_file("thelma/test.txt","This is another test file")
self.add_file("shaggy/test.txt","This is another test file")
self.add_file("scooby/test.txt","This is another test file")
class ExampleDirSpiders(BaseExampleDir):
"""Small test data directory with files and links
"""
def __init__(self):
BaseExampleDir.__init__(self)
# Files
self.add_file("spider.txt","The itsy-bitsy spider\nClimbed up the chimney spout")
self.add_file("spider2.txt","The itsy-bitsy spider\nClimbed up the chimney spout")
self.add_file("fly.txt","'Come into my parlour'\nSaid the spider to the fly")
# Symbolic links
self.add_link("itsy-bitsy.txt","spider.txt")
self.add_link("itsy-bitsy2.txt","spider2.txt")
# Broken links
self.add_link("broken.txt","missing.txt")
self.add_link("broken2.txt","missing.txt")
class ExampleDirLanguages(BaseExampleDir):
"""Test data directory with more complicated structure and linking
"""
def __init__(self):
BaseExampleDir.__init__(self)
# Files
self.add_file("hello","Hello!")
self.add_file("goodbye","Goodbye!")
self.add_file("spanish/hola","Hello!")
self.add_file("spanish/adios","Goodbye!")
self.add_file("welsh/north_wales/maen_ddrwg_gen_i","Sorry!")
self.add_file("welsh/south_wales/maen_flin_da_fi","Sorry!")
self.add_file("icelandic/takk_fyrir","Thank you!")
# Symbolic links
self.add_link("hi","hello")
self.add_link("bye","goodbye")
self.add_dir("countries")
self.add_link("countries/spain","../spanish")
self.add_link("countries/north_wales","../welsh/north_wales")
self.add_link("countries/south_wales","../welsh/south_wales")
self.add_link("countries/iceland","../icelandic")
|
fw1121/genomics
|
bcftbx/test/mock_data.py
|
Python
|
artistic-2.0
| 8,581
|
import requests
from requests import Session
from requests.exceptions import HTTPError
try:
from urllib.parse import urlencode, quote
except:
from urllib import urlencode, quote
import json
import math
from random import uniform
import time
from collections import OrderedDict
from sseclient import SSEClient
import threading
import socket
from oauth2client.service_account import ServiceAccountCredentials
from gcloud import storage
from requests.packages.urllib3.contrib.appengine import is_appengine_sandbox
from requests_toolbelt.adapters import appengine
import python_jwt as jwt
from Crypto.PublicKey import RSA
import datetime
def initialize_app(config):
return Firebase(config)
class Firebase:
""" Firebase Interface """
def __init__(self, config):
self.api_key = config["apiKey"]
self.auth_domain = config["authDomain"]
self.database_url = config["databaseURL"]
self.storage_bucket = config["storageBucket"]
self.credentials = None
self.requests = requests.Session()
if config.get("serviceAccount"):
scopes = [
'https://www.googleapis.com/auth/firebase.database',
'https://www.googleapis.com/auth/userinfo.email',
"https://www.googleapis.com/auth/cloud-platform"
]
service_account_type = type(config["serviceAccount"])
if service_account_type is str:
self.credentials = ServiceAccountCredentials.from_json_keyfile_name(config["serviceAccount"], scopes)
if service_account_type is dict:
self.credentials = ServiceAccountCredentials.from_json_keyfile_dict(config["serviceAccount"], scopes)
if is_appengine_sandbox():
# Fix error in standard GAE environment
# is releated to https://github.com/kennethreitz/requests/issues/3187
# ProtocolError('Connection aborted.', error(13, 'Permission denied'))
adapter = appengine.AppEngineAdapter(max_retries=3)
else:
adapter = requests.adapters.HTTPAdapter(max_retries=3)
for scheme in ('http://', 'https://'):
self.requests.mount(scheme, adapter)
def auth(self):
return Auth(self.api_key, self.requests, self.credentials)
def database(self):
return Database(self.credentials, self.api_key, self.database_url, self.requests)
def storage(self):
return Storage(self.credentials, self.storage_bucket, self.requests)
class Auth:
""" Authentication Service """
def __init__(self, api_key, requests, credentials):
self.api_key = api_key
self.current_user = None
self.requests = requests
self.credentials = credentials
def sign_in_with_email_and_password(self, email, password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"email": email, "password": password, "returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
self.current_user = request_object.json()
return request_object.json()
def create_custom_token(self, uid, additional_claims=None):
service_account_email = self.credentials.service_account_email
private_key = RSA.importKey(self.credentials._private_key_pkcs8_pem)
payload = {
"iss": service_account_email,
"sub": service_account_email,
"aud": "https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit",
"uid": uid
}
if additional_claims:
payload["claims"] = additional_claims
exp = datetime.timedelta(minutes=60)
return jwt.generate_jwt(payload, private_key, "RS256", exp)
def sign_in_with_custom_token(self, token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyCustomToken?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"returnSecureToken": True, "token": token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def refresh(self, refresh_token):
request_ref = "https://securetoken.googleapis.com/v1/token?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"grantType": "refresh_token", "refreshToken": refresh_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
request_object_json = request_object.json()
# handle weirdly formatted response
user = {
"userId": request_object_json["user_id"],
"idToken": request_object_json["id_token"],
"refreshToken": request_object_json["refresh_token"]
}
return user
def get_account_info(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getAccountInfo?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def send_email_verification(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getOobConfirmationCode?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"requestType": "VERIFY_EMAIL", "idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def send_password_reset_email(self, email):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getOobConfirmationCode?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"requestType": "PASSWORD_RESET", "email": email})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def verify_password_reset_code(self, reset_code, new_password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/resetPassword?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"oobCode": reset_code, "newPassword": new_password})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def create_user_with_email_and_password(self, email, password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8" }
data = json.dumps({"email": email, "password": password, "returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
class Database:
""" Database Service """
def __init__(self, credentials, api_key, database_url, requests):
if not database_url.endswith('/'):
url = ''.join([database_url, '/'])
else:
url = database_url
self.credentials = credentials
self.api_key = api_key
self.database_url = url
self.requests = requests
self.path = ""
self.build_query = {}
self.last_push_time = 0
self.last_rand_chars = []
def order_by_key(self):
self.build_query["orderBy"] = "$key"
return self
def order_by_value(self):
self.build_query["orderBy"] = "$value"
return self
def order_by_child(self, order):
self.build_query["orderBy"] = order
return self
def start_at(self, start):
self.build_query["startAt"] = start
return self
def end_at(self, end):
self.build_query["endAt"] = end
return self
def equal_to(self, equal):
self.build_query["equalTo"] = equal
return self
def limit_to_first(self, limit_first):
self.build_query["limitToFirst"] = limit_first
return self
def limit_to_last(self, limit_last):
self.build_query["limitToLast"] = limit_last
return self
def shallow(self):
self.build_query["shallow"] = True
return self
def child(self, *args):
new_path = "/".join([str(arg) for arg in args])
if self.path:
self.path += "/{}".format(new_path)
else:
if new_path.startswith("/"):
new_path = new_path[1:]
self.path = new_path
return self
def build_request_url(self, token):
parameters = {}
if token:
parameters['auth'] = token
for param in list(self.build_query):
if type(self.build_query[param]) is str:
parameters[param] = quote('"' + self.build_query[param] + '"')
elif type(self.build_query[param]) is bool:
parameters[param] = "true" if self.build_query[param] else "false"
else:
parameters[param] = self.build_query[param]
# reset path and build_query for next query
request_ref = '{0}{1}.json?{2}'.format(self.database_url, self.path, urlencode(parameters))
self.path = ""
self.build_query = {}
return request_ref
def build_headers(self, token=None):
headers = {"content-type": "application/json; charset=UTF-8"}
if not token and self.credentials:
access_token = self.credentials.get_access_token().access_token
headers['Authorization'] = 'Bearer ' + access_token
return headers
def get(self, token=None, json_kwargs={}):
build_query = self.build_query
query_key = self.path.split("/")[-1]
request_ref = self.build_request_url(token)
# headers
headers = self.build_headers(token)
# do request
request_object = self.requests.get(request_ref, headers=headers)
raise_detailed_error(request_object)
request_dict = request_object.json(**json_kwargs)
# if primitive or simple query return
if isinstance(request_dict, list):
return PyreResponse(convert_list_to_pyre(request_dict), query_key)
if not isinstance(request_dict, dict):
return PyreResponse(request_dict, query_key)
if not build_query:
return PyreResponse(convert_to_pyre(request_dict.items()), query_key)
# return keys if shallow
if build_query.get("shallow"):
return PyreResponse(request_dict.keys(), query_key)
# otherwise sort
sorted_response = None
if build_query.get("orderBy"):
if build_query["orderBy"] == "$key":
sorted_response = sorted(request_dict.items(), key=lambda item: item[0])
elif build_query["orderBy"] == "$value":
sorted_response = sorted(request_dict.items(), key=lambda item: item[1])
else:
sorted_response = sorted(request_dict.items(), key=lambda item: item[1][build_query["orderBy"]])
return PyreResponse(convert_to_pyre(sorted_response), query_key)
def push(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.post(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
raise_detailed_error(request_object)
return request_object.json()
def set(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.put(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
raise_detailed_error(request_object)
return request_object.json()
def update(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.patch(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
raise_detailed_error(request_object)
return request_object.json()
def remove(self, token=None):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.delete(request_ref, headers=headers)
raise_detailed_error(request_object)
return request_object.json()
def stream(self, stream_handler, token=None, stream_id=None):
request_ref = self.build_request_url(token)
return Stream(request_ref, stream_handler, self.build_headers, stream_id)
def check_token(self, database_url, path, token):
if token:
return '{0}{1}.json?auth={2}'.format(database_url, path, token)
else:
return '{0}{1}.json'.format(database_url, path)
def generate_key(self):
push_chars = '-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz'
now = int(time.time() * 1000)
duplicate_time = now == self.last_push_time
self.last_push_time = now
time_stamp_chars = [0] * 8
for i in reversed(range(0, 8)):
time_stamp_chars[i] = push_chars[now % 64]
now = int(math.floor(now / 64))
new_id = "".join(time_stamp_chars)
if not duplicate_time:
for i in range(0, 12):
self.last_rand_chars.append(int(math.floor(uniform(0, 1) * 64)))
else:
for i in range(0, 11):
if self.last_rand_chars[i] == 63:
self.last_rand_chars[i] = 0
self.last_rand_chars[i] += 1
for i in range(0, 12):
new_id += push_chars[self.last_rand_chars[i]]
return new_id
def sort(self, origin, by_key):
# unpack pyre objects
pyres = origin.each()
new_list = []
for pyre in pyres:
new_list.append(pyre.item)
# sort
data = sorted(dict(new_list).items(), key=lambda item: item[1][by_key])
return PyreResponse(convert_to_pyre(data), origin.key())
class Storage:
""" Storage Service """
def __init__(self, credentials, storage_bucket, requests):
self.storage_bucket = "https://firebasestorage.googleapis.com/v0/b/" + storage_bucket
self.credentials = credentials
self.requests = requests
self.path = ""
if credentials:
client = storage.Client(credentials=credentials, project=storage_bucket)
self.bucket = client.get_bucket(storage_bucket)
def child(self, *args):
new_path = "/".join(args)
if self.path:
self.path += "/{}".format(new_path)
else:
if new_path.startswith("/"):
new_path = new_path[1:]
self.path = new_path
return self
def put(self, file, token=None):
# reset path
path = self.path
self.path = None
if isinstance(file, str):
file_object = open(file, 'rb')
else:
file_object = file
request_ref = self.storage_bucket + "/o?name={0}".format(path)
if token:
headers = {"Authorization": "Firebase " + token}
request_object = self.requests.post(request_ref, headers=headers, data=file_object)
raise_detailed_error(request_object)
return request_object.json()
elif self.credentials:
blob = self.bucket.blob(path)
if isinstance(file, str):
return blob.upload_from_filename(filename=file)
else:
return blob.upload_from_file(file_obj=file)
else:
request_object = self.requests.post(request_ref, data=file_object)
raise_detailed_error(request_object)
return request_object.json()
def delete(self, name):
self.bucket.delete_blob(name)
def download(self, filename, token=None):
# remove leading backlash
path = self.path
url = self.get_url(token)
self.path = None
if path.startswith('/'):
path = path[1:]
if self.credentials:
blob = self.bucket.get_blob(path)
blob.download_to_filename(filename)
else:
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r:
f.write(chunk)
def get_url(self, token):
path = self.path
self.path = None
if path.startswith('/'):
path = path[1:]
if token:
return "{0}/o/{1}?alt=media&token={2}".format(self.storage_bucket, quote(path, safe=''), token)
return "{0}/o/{1}?alt=media".format(self.storage_bucket, quote(path, safe=''))
def list_files(self):
return self.bucket.list_blobs()
def raise_detailed_error(request_object):
try:
request_object.raise_for_status()
except HTTPError as e:
# raise detailed error message
# TODO: Check if we get a { "error" : "Permission denied." } and handle automatically
raise HTTPError(e, request_object.text)
def convert_to_pyre(items):
pyre_list = []
for item in items:
pyre_list.append(Pyre(item))
return pyre_list
def convert_list_to_pyre(items):
pyre_list = []
for item in items:
pyre_list.append(Pyre([items.index(item), item]))
return pyre_list
class PyreResponse:
def __init__(self, pyres, query_key):
self.pyres = pyres
self.query_key = query_key
def val(self):
if isinstance(self.pyres, list):
# unpack pyres into OrderedDict
pyre_list = []
# if firebase response was a list
if isinstance(self.pyres[0].key(), int):
for pyre in self.pyres:
pyre_list.append(pyre.val())
return pyre_list
# if firebase response was a dict with keys
for pyre in self.pyres:
pyre_list.append((pyre.key(), pyre.val()))
return OrderedDict(pyre_list)
else:
# return primitive or simple query results
return self.pyres
def key(self):
return self.query_key
def each(self):
if isinstance(self.pyres, list):
return self.pyres
class Pyre:
def __init__(self, item):
self.item = item
def val(self):
return self.item[1]
def key(self):
return self.item[0]
class KeepAuthSession(Session):
"""
A session that doesn't drop Authentication on redirects between domains.
"""
def rebuild_auth(self, prepared_request, response):
pass
class ClosableSSEClient(SSEClient):
def __init__(self, *args, **kwargs):
self.should_connect = True
super(ClosableSSEClient, self).__init__(*args, **kwargs)
def _connect(self):
if self.should_connect:
super(ClosableSSEClient, self)._connect()
else:
raise StopIteration()
def close(self):
self.should_connect = False
self.retry = 0
self.resp.raw._fp.fp.raw._sock.shutdown(socket.SHUT_RDWR)
self.resp.raw._fp.fp.raw._sock.close()
class Stream:
def __init__(self, url, stream_handler, build_headers, stream_id):
self.build_headers = build_headers
self.url = url
self.stream_handler = stream_handler
self.stream_id = stream_id
self.sse = None
self.thread = None
self.start()
def make_session(self):
"""
Return a custom session object to be passed to the ClosableSSEClient.
"""
session = KeepAuthSession()
return session
def start(self):
self.thread = threading.Thread(target=self.start_stream)
self.thread.start()
return self
def start_stream(self):
self.sse = ClosableSSEClient(self.url, session=self.make_session(), build_headers=self.build_headers)
for msg in self.sse:
if msg:
msg_data = json.loads(msg.data)
msg_data["event"] = msg.event
if self.stream_id:
msg_data["stream_id"] = self.stream_id
self.stream_handler(msg_data)
def close(self):
while not self.sse and not hasattr(self.sse, 'resp'):
time.sleep(0.001)
self.sse.running = False
self.sse.close()
self.thread.join()
return self
|
ininex/geofire-python
|
resource/lib/python2.7/site-packages/pyrebase/pyrebase.py
|
Python
|
mit
| 21,697
|
# import the TypeFinder code to this folder
import matplotlib
import astropy
from TypeFinder import *
|
elliesch/UltracoolTypingKit
|
Tests/__init__.py
|
Python
|
bsd-3-clause
| 101
|
def issubstring(s1,s2):
#check if s1 is substring of s2
M = len(s1)
N = len(s2)
for i in range(N-M+1):
for j in range(M):
if s2[i+j] != s1[j]:
break
if j+1 == M:
return i
return -1
if __name__ == "__main__":
s1 = "ford"
s2 = "avfordc"
print(issubstring(s1,s2))
|
prashantas/MyDataScience
|
GeneralPython/PyDataStructure/isSubstring.py
|
Python
|
bsd-2-clause
| 360
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 15 20:55:19 2016
@author: ajaver
"""
import json
import os
from collections import OrderedDict
import zipfile
import numpy as np
import pandas as pd
import tables
from tierpsy.helper.misc import print_flush
from tierpsy.analysis.feat_create.obtainFeaturesHelper import WormStats
from tierpsy.helper.params import read_unit_conversions, read_ventral_side, read_fps
def getWCONMetaData(fname, READ_FEATURES=False, provenance_step='FEAT_CREATE'):
def _order_metadata(metadata_dict):
ordered_fields = ['strain', 'timestamp', 'gene', 'chromosome', 'allele',
'strain_description', 'sex', 'stage', 'ventral_side', 'media', 'arena', 'food',
'habituation', 'who', 'protocol', 'lab', 'software']
extra_fields = metadata_dict.keys() - set(ordered_fields)
ordered_fields += sorted(extra_fields)
ordered_metadata = OrderedDict()
for field in ordered_fields:
if field in metadata_dict:
ordered_metadata[field] = metadata_dict[field]
return ordered_metadata
with tables.File(fname, 'r') as fid:
if not '/experiment_info' in fid:
experiment_info = {}
else:
experiment_info = fid.get_node('/experiment_info').read()
experiment_info = json.loads(experiment_info.decode('utf-8'))
provenance_tracking = fid.get_node('/provenance_tracking/' + provenance_step).read()
provenance_tracking = json.loads(provenance_tracking.decode('utf-8'))
commit_hash = provenance_tracking['commit_hash']
if 'tierpsy' in commit_hash:
tierpsy_version = commit_hash['tierpsy']
else:
tierpsy_version = commit_hash['MWTracker']
MWTracker_ver = {"name":"tierpsy (https://github.com/ver228/tierpsy-tracker)",
"version": tierpsy_version,
"featureID":"@OMG"}
if not READ_FEATURES:
experiment_info["software"] = MWTracker_ver
else:
#add open_worm_analysis_toolbox info and save as a list of "softwares"
open_worm_ver = {"name":"open_worm_analysis_toolbox (https://github.com/openworm/open-worm-analysis-toolbox)",
"version":commit_hash['open_worm_analysis_toolbox'],
"featureID":""}
experiment_info["software"] = [MWTracker_ver, open_worm_ver]
return _order_metadata(experiment_info)
def __reformatForJson(A):
if isinstance(A, (int, float)):
return A
good = ~np.isnan(A) & (A != 0)
dd = A[good]
if dd.size > 0:
dd = np.abs(np.floor(np.log10(np.abs(dd)))-2)
precision = max(2, int(np.min(dd)))
A = np.round(A.astype(np.float64), precision)
A = np.where(np.isnan(A), None, A)
#wcon specification require to return a single number if it is only one element list
if A.size == 1:
return A[0]
else:
return A.tolist()
def __addOMGFeat(fid, worm_feat_time, worm_id):
worm_features = OrderedDict()
#add time series features
for col_name, col_dat in worm_feat_time.iteritems():
if not col_name in ['worm_index', 'timestamp']:
worm_features[col_name] = col_dat.values
worm_path = '/features_events/worm_%i' % worm_id
worm_node = fid.get_node(worm_path)
#add event features
for feature_name in worm_node._v_children:
feature_path = worm_path + '/' + feature_name
worm_features[feature_name] = fid.get_node(feature_path)[:]
return worm_features
def _get_ventral_side(features_file):
ventral_side = read_ventral_side(features_file)
if not ventral_side or ventral_side == 'unknown':
ventral_type = '?'
else:
#we will merge the ventral and dorsal contours so the ventral contour is clockwise
ventral_type='CW'
return ventral_type
def _getData(features_file, READ_FEATURES=False, IS_FOR_WCON=True):
if IS_FOR_WCON:
lab_prefix = '@OMG '
else:
lab_prefix = ''
with pd.HDFStore(features_file, 'r') as fid:
if not '/features_timeseries' in fid:
return {} #empty file nothing to do here
features_timeseries = fid['/features_timeseries']
feat_time_group_by_worm = features_timeseries.groupby('worm_index');
ventral_side = _get_ventral_side(features_file)
with tables.File(features_file, 'r') as fid:
#fps used to adjust timestamp to real time
fps = read_fps(features_file)
#get pointers to some useful data
skeletons = fid.get_node('/coordinates/skeletons')
dorsal_contours = fid.get_node('/coordinates/dorsal_contours')
ventral_contours = fid.get_node('/coordinates/ventral_contours')
#let's append the data of each individual worm as a element in a list
all_worms_feats = []
#group by iterator will return sorted worm indexes
for worm_id, worm_feat_time in feat_time_group_by_worm:
worm_id = int(worm_id)
#read worm skeletons data
worm_skel = skeletons[worm_feat_time.index]
worm_dor_cnt = dorsal_contours[worm_feat_time.index]
worm_ven_cnt = ventral_contours[worm_feat_time.index]
#start ordered dictionary with the basic features
worm_basic = OrderedDict()
worm_basic['id'] = str(worm_id)
worm_basic['head'] = 'L'
worm_basic['ventral'] = ventral_side
worm_basic['ptail'] = worm_ven_cnt.shape[1]-1 #index starting with 0
worm_basic['t'] = worm_feat_time['timestamp'].values/fps #convert from frames to seconds
worm_basic['x'] = worm_skel[:, :, 0]
worm_basic['y'] = worm_skel[:, :, 1]
contour = np.hstack((worm_ven_cnt, worm_dor_cnt[:, ::-1, :]))
worm_basic['px'] = contour[:, :, 0]
worm_basic['py'] = contour[:, :, 1]
if READ_FEATURES:
worm_features = __addOMGFeat(fid, worm_feat_time, worm_id)
for feat in worm_features:
worm_basic[lab_prefix + feat] = worm_features[feat]
if IS_FOR_WCON:
for x in worm_basic:
if not x in ['id', 'head', 'ventral', 'ptail']:
worm_basic[x] = __reformatForJson(worm_basic[x])
#append features
all_worms_feats.append(worm_basic)
return all_worms_feats
def _getUnits(features_file, READ_FEATURES=False):
fps_out, microns_per_pixel_out, _ = read_unit_conversions(features_file)
xy_units = microns_per_pixel_out[1]
time_units = fps_out[2]
units = OrderedDict()
units["size"] = "mm" #size of the plate
units['t'] = time_units #frames or seconds
for field in ['x', 'y', 'px', 'py']:
units[field] = xy_units #(pixels or micrometers)
if READ_FEATURES:
#TODO how to change microns to pixels when required
ws = WormStats()
for field, unit in ws.features_info['units'].iteritems():
units['@OMG ' + field] = unit
return units
def exportWCONdict(features_file, READ_FEATURES=False):
metadata = getWCONMetaData(features_file, READ_FEATURES)
data = _getData(features_file, READ_FEATURES)
units = _getUnits(features_file, READ_FEATURES)
#units = {x:units[x].replace('degrees', '1') for x in units}
#units = {x:units[x].replace('radians', '1') for x in units}
wcon_dict = OrderedDict()
wcon_dict['metadata'] = metadata
wcon_dict['units'] = units
wcon_dict['data'] = data
return wcon_dict
def getWCOName(features_file):
return features_file.replace('_features.hdf5', '.wcon.zip')
def exportWCON(features_file, READ_FEATURES=False):
base_name = os.path.basename(features_file).replace('_features.hdf5', '')
print_flush("{} Exporting data to WCON...".format(base_name))
wcon_dict = exportWCONdict(features_file, READ_FEATURES)
wcon_file = getWCOName(features_file)
#with gzip.open(wcon_file, 'wt') as fid:
# json.dump(wcon_dict, fid, allow_nan=False)
with zipfile.ZipFile(wcon_file, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zip_name = os.path.basename(wcon_file).replace('.zip', '')
wcon_txt = json.dumps(wcon_dict, allow_nan=False, separators=(',', ':'))
zf.writestr(zip_name, wcon_txt)
print_flush("{} Finised to export to WCON.".format(base_name))
if __name__ == '__main__':
features_file = '/Users/ajaver/OneDrive - Imperial College London/Local_Videos/single_worm/global_sample_v3/883 RC301 on food R_2011_03_07__11_10_27___8___1_features.hdf5'
#exportWCON(features_file)
wcon_file = getWCOName(features_file)
wcon_dict = exportWCONdict(features_file)
wcon_txt = json.dumps(wcon_dict, allow_nan=False, indent=4)
#%%
with zipfile.ZipFile(wcon_file, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zip_name = os.path.basename(wcon_file).replace('.zip', '')
zf.writestr(zip_name, wcon_txt)
#%%
# import wcon
# wc = wcon.WCONWorms()
# wc = wc.load_from_file(JSON_path, validate_against_schema = False)
|
ljschumacher/tierpsy-tracker
|
tierpsy/analysis/wcon_export/exportWCON.py
|
Python
|
mit
| 9,522
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Splinter would be proud."""
TEENAGE_MUTANT_NINJAS = ('Michaelangelo. Leonardo. Rafael. Donatello. Heroes '
'in a half shell.')
TURTLE_POWER = TEENAGE_MUTANT_NINJAS.split('. ')
|
rrafiringa/is210-week-03-warmup
|
task_05.py
|
Python
|
mpl-2.0
| 258
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi, Guewen Baconnier
# Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Account Credit Control',
'version': '0.1',
'author': 'Camptocamp',
'maintainer': 'Camptocamp',
'category': 'Finance',
'complexity': "normal",
'depends': ['base', 'account',
'email_template', 'report_webkit'],
'description': """
Credit Control
==============
Configuration
-------------
Configure the policies and policy levels in ``Accounting > Configuration >
Credit Control > Credit Policies``.
You can define as many policy levels as you need.
Configure a tolerance for the Credit control and a default policy
applied on all partners in each company, under the Accounting tab.
You are able to specify a particular policy for one partner or one invoice.
Usage
-----
Menu entries are located in ``Accounting > Periodical Processing > Credit
Control``.
Create a new "run" in the ``Credit Control Run`` menu with the controlling date.
Then, use the ``Compute credit lines`` button. All the credit control lines will
be generated. You can find them in the ``Credit Control Lines`` menu.
On each generated line, you have many choices:
* Send a email
* Print a letter
* Change the state (so you can ignore or reopen lines)
""",
'website': 'http://www.camptocamp.com',
'data': ["report/report.xml",
"data.xml",
"line_view.xml",
"account_view.xml",
"partner_view.xml",
"policy_view.xml",
"run_view.xml",
"company_view.xml",
"wizard/credit_control_emailer_view.xml",
"wizard/credit_control_marker_view.xml",
"wizard/credit_control_printer_view.xml",
"security/ir.model.access.csv"],
'demo_xml': ["credit_control_demo.xml"],
'tests': [],
'installable': True,
'license': 'AGPL-3',
'application': True
}
|
eneldoserrata/marcos_openerp
|
marcos_addons/account_credit_control/__openerp__.py
|
Python
|
agpl-3.0
| 2,707
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
from common import write_fake_link
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
write_fake_link(test)
test.write('myfortran.py', r"""
import getopt
import sys
comment = '#' + sys.argv[1]
opts, args = getopt.getopt(sys.argv[2:], 'co:')
for opt, arg in opts:
if opt == '-o': out = arg
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
for l in infile.readlines():
if l[:len(comment)] != comment:
outfile.write(l)
sys.exit(0)
""")
# Test non-default file suffix: .f/.F for F08
test.write('SConstruct', """
env = Environment(LINK = r'%(_python_)s mylink.py',
LINKFLAGS = [],
F77 = r'%(_python_)s myfortran.py f77',
F08 = r'%(_python_)s myfortran.py f08',
F08FILESUFFIXES = ['.f', '.F', '.f08', '.F08'],
tools = ['default', 'f08'])
env.Program(target = 'test01', source = 'test01.f')
env.Program(target = 'test02', source = 'test02.F')
env.Program(target = 'test03', source = 'test03.f08')
env.Program(target = 'test04', source = 'test04.F08')
env.Program(target = 'test05', source = 'test05.f77')
env.Program(target = 'test06', source = 'test06.F77')
""" % locals())
test.write('test01.f', "This is a .f file.\n#link\n#f08\n")
test.write('test02.F', "This is a .F file.\n#link\n#f08\n")
test.write('test03.f08', "This is a .f08 file.\n#link\n#f08\n")
test.write('test04.F08', "This is a .F08 file.\n#link\n#f08\n")
test.write('test05.f77', "This is a .f77 file.\n#link\n#f77\n")
test.write('test06.F77', "This is a .F77 file.\n#link\n#f77\n")
test.run(arguments = '.', stderr = None)
test.must_match('test01' + _exe, "This is a .f file.\n")
test.must_match('test02' + _exe, "This is a .F file.\n")
test.must_match('test03' + _exe, "This is a .f08 file.\n")
test.must_match('test04' + _exe, "This is a .F08 file.\n")
test.must_match('test05' + _exe, "This is a .f77 file.\n")
test.must_match('test06' + _exe, "This is a .F77 file.\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
andrewyoung1991/scons
|
test/Fortran/F08FILESUFFIXES2.py
|
Python
|
mit
| 3,301
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pypinyin.runner import get_parser
def test_default():
options = get_parser().parse_args(['你好'])
assert options.func == 'pinyin'
assert options.style == 'zh4ao'
assert options.separator == '-'
assert not options.heteronym
assert options.hans == '你好'
assert options.errors == 'default'
def test_custom():
options = get_parser().parse_args(['--func', 'slug',
'--style', 'zhao',
'--separator', ' ',
'--errors', 'ignore',
'--heteronym', '你好啊'])
assert options.func == 'slug'
assert options.style == 'zhao'
assert options.separator == ' '
assert options.errors == 'ignore'
assert options.heteronym
assert options.hans == '你好啊'
if __name__ == '__main__':
import pytest
pytest.cmdline.main()
|
mozillazg/python-pinyin
|
tests/test_cmd.py
|
Python
|
mit
| 1,023
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Optional, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import PolicyClientConfiguration
from .operations import PolicyAssignmentsOperations, PolicyDefinitionsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class PolicyClient:
"""To manage and control access to your resources, you can define customized policies and assign them at a scope.
:ivar policy_definitions: PolicyDefinitionsOperations operations
:vartype policy_definitions:
azure.mgmt.resource.policy.v2016_12_01.operations.PolicyDefinitionsOperations
:ivar policy_assignments: PolicyAssignmentsOperations operations
:vartype policy_assignments:
azure.mgmt.resource.policy.v2016_12_01.operations.PolicyAssignmentsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = PolicyClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.policy_definitions = PolicyDefinitionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.policy_assignments = PolicyAssignmentsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs: Any
) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> PolicyClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2016_12_01/_policy_client.py
|
Python
|
mit
| 4,159
|
#!/usr/bin/python
"""
Constructed Data
"""
import sys
from copy import deepcopy as _deepcopy
from .errors import DecodingError, EncodingError, \
MissingRequiredParameter, InvalidParameterDatatype, InvalidTag
from .debugging import ModuleLogger, bacpypes_debugging
from .primitivedata import Atomic, ClosingTag, OpeningTag, Tag, TagList, \
Unsigned
# some debugging
_debug = 0
_log = ModuleLogger(globals())
#
# Element
#
class Element:
def __init__(self, name, klass, context=None, optional=False):
self.name = name
self.klass = klass
self.context = context
self.optional = optional
def __repr__(self):
desc = "%s(%s" % (self.__class__.__name__, self.name)
desc += " " + self.klass.__name__
if self.context is not None:
desc += ", context=%r" % (self.context,)
if self.optional:
desc += ", optional"
desc += ")"
return '<' + desc + ' instance at 0x%08x' % (id(self),) + '>'
#
# Sequence
#
@bacpypes_debugging
class Sequence(object):
sequenceElements = []
def __init__(self, *args, **kwargs):
"""
Create a sequence element, optionally providing attribute/property values.
"""
if _debug: Sequence._debug("__init__ %r %r", args, kwargs)
# split out the keyword arguments that belong to this class
my_kwargs = {}
other_kwargs = {}
for element in self.sequenceElements:
if element.name in kwargs:
my_kwargs[element.name] = kwargs[element.name]
for kw in kwargs:
if kw not in my_kwargs:
other_kwargs[kw] = kwargs[kw]
if _debug: Sequence._debug(" - my_kwargs: %r", my_kwargs)
if _debug: Sequence._debug(" - other_kwargs: %r", other_kwargs)
# call some superclass, if there is one
super(Sequence, self).__init__(*args, **other_kwargs)
# set the attribute/property values for the ones provided
for element in self.sequenceElements:
setattr(self, element.name, my_kwargs.get(element.name, None))
def encode(self, taglist):
"""
"""
if _debug: Sequence._debug("encode %r", taglist)
global _sequence_of_classes, _list_of_classes
# make sure we're dealing with a tag list
if not isinstance(taglist, TagList):
raise TypeError("TagList expected")
for element in self.sequenceElements:
value = getattr(self, element.name, None)
if element.optional and value is None:
continue
if not element.optional and value is None:
raise MissingRequiredParameter("%s is a missing required element of %s" % (element.name, self.__class__.__name__))
if (element.klass in _sequence_of_classes) or (element.klass in _list_of_classes):
# might need to encode an opening tag
if element.context is not None:
taglist.append(OpeningTag(element.context))
if _debug: Sequence._debug(" - build sequence helper: %r %r", element.klass, value)
helper = element.klass(value)
# encode the value
helper.encode(taglist)
# might need to encode a closing tag
if element.context is not None:
taglist.append(ClosingTag(element.context))
elif issubclass(element.klass, (Atomic, AnyAtomic)):
# a helper cooperates between the atomic value and the tag
if _debug: Sequence._debug(" - build helper: %r %r", element.klass, value)
helper = element.klass(value)
# build a tag and encode the data into it
tag = Tag()
helper.encode(tag)
# convert it to context encoding iff necessary
if element.context is not None:
tag = tag.app_to_context(element.context)
# now append the tag
taglist.append(tag)
elif isinstance(value, element.klass):
# might need to encode an opening tag
if element.context is not None:
taglist.append(OpeningTag(element.context))
# encode the value
value.encode(taglist)
# might need to encode a closing tag
if element.context is not None:
taglist.append(ClosingTag(element.context))
else:
raise TypeError("%s must be of type %s" % (element.name, element.klass.__name__))
def decode(self, taglist):
"""
"""
if _debug: Sequence._debug("decode %r", taglist)
global _sequence_of_classes, _list_of_classes
# make sure we're dealing with a tag list
if not isinstance(taglist, TagList):
raise TypeError("TagList expected")
for element in self.sequenceElements:
tag = taglist.Peek()
if _debug: Sequence._debug(" - element, tag: %r, %r", element, tag)
# no more elements
if tag is None:
if element.optional:
# omitted optional element
setattr(self, element.name, None)
elif (element.klass in _sequence_of_classes) or (element.klass in _list_of_classes):
# empty list
setattr(self, element.name, [])
else:
raise MissingRequiredParameter("%s is a missing required element of %s" % (element.name, self.__class__.__name__))
# we have been enclosed in a context
elif tag.tagClass == Tag.closingTagClass:
if not element.optional:
raise MissingRequiredParameter("%s is a missing required element of %s" % (element.name, self.__class__.__name__))
# omitted optional element
setattr(self, element.name, None)
# check for a sequence element
elif element.klass in _sequence_of_classes:
# check for context encoding
if element.context is not None:
if tag.tagClass != Tag.openingTagClass or tag.tagNumber != element.context:
if not element.optional:
raise MissingRequiredParameter("%s expected opening tag %d" % (element.name, element.context))
else:
# omitted optional element
setattr(self, element.name, [])
continue
taglist.Pop()
# a helper cooperates between the atomic value and the tag
helper = element.klass()
helper.decode(taglist)
# now save the value
setattr(self, element.name, helper.value)
# check for context closing tag
if element.context is not None:
tag = taglist.Pop()
if tag.tagClass != Tag.closingTagClass or tag.tagNumber != element.context:
raise InvalidTag("%s expected closing tag %d" % (element.name, element.context))
# check for an any atomic element
elif issubclass(element.klass, AnyAtomic):
# convert it to application encoding
if element.context is not None:
raise InvalidTag("%s any atomic with context tag %d" % (element.name, element.context))
if tag.tagClass != Tag.applicationTagClass:
if not element.optional:
raise InvalidParameterDatatype("%s expected any atomic application tag" % (element.name,))
else:
setattr(self, element.name, None)
continue
# consume the tag
taglist.Pop()
# a helper cooperates between the atomic value and the tag
helper = element.klass(tag)
# now save the value
setattr(self, element.name, helper.value)
# check for specific kind of atomic element, or the context says what kind
elif issubclass(element.klass, Atomic):
# convert it to application encoding
if element.context is not None:
if tag.tagClass != Tag.contextTagClass or tag.tagNumber != element.context:
if not element.optional:
raise InvalidTag("%s expected context tag %d" % (element.name, element.context))
else:
setattr(self, element.name, None)
continue
tag = tag.context_to_app(element.klass._app_tag)
else:
if tag.tagClass != Tag.applicationTagClass or tag.tagNumber != element.klass._app_tag:
if not element.optional:
raise InvalidParameterDatatype("%s expected application tag %s" % (element.name, Tag._app_tag_name[element.klass._app_tag]))
else:
setattr(self, element.name, None)
continue
# consume the tag
taglist.Pop()
# a helper cooperates between the atomic value and the tag
helper = element.klass(tag)
# now save the value
setattr(self, element.name, helper.value)
# check for an AnyAtomic element
elif issubclass(element.klass, AnyAtomic):
# convert it to application encoding
if element.context is not None:
if tag.tagClass != Tag.contextTagClass or tag.tagNumber != element.context:
if not element.optional:
raise InvalidTag("%s expected context tag %d" % (element.name, element.context))
else:
setattr(self, element.name, None)
continue
tag = tag.context_to_app(element.klass._app_tag)
else:
if tag.tagClass != Tag.applicationTagClass:
if not element.optional:
raise InvalidParameterDatatype("%s expected application tag" % (element.name,))
else:
setattr(self, element.name, None)
continue
# consume the tag
taglist.Pop()
# a helper cooperates between the atomic value and the tag
helper = element.klass(tag)
# now save the value
setattr(self, element.name, helper.value)
# some kind of structure
else:
if element.context is not None:
if tag.tagClass != Tag.openingTagClass or tag.tagNumber != element.context:
if not element.optional:
raise InvalidTag("%s expected opening tag %d" % (element.name, element.context))
else:
setattr(self, element.name, None)
continue
taglist.Pop()
try:
# make a backup of the tag list in case the structure manages to
# decode some content but not all of it. This is not supposed to
# happen if the ASN.1 has been formed correctly.
backup = taglist.tagList[:]
# build a value and decode it
value = element.klass()
value.decode(taglist)
# save the result
setattr(self, element.name, value)
except (DecodingError, InvalidTag) as err:
# if the context tag was matched, the substructure has to be decoded
# correctly.
if element.context is None and element.optional:
# omitted optional element
setattr(self, element.name, None)
# restore the backup
taglist.tagList = backup
else:
raise
if element.context is not None:
tag = taglist.Pop()
if (not tag) or tag.tagClass != Tag.closingTagClass or tag.tagNumber != element.context:
raise InvalidTag("%s expected closing tag %d" % (element.name, element.context))
def debug_contents(self, indent=1, file=sys.stdout, _ids=None):
global _sequence_of_classes, _list_of_classes
for element in self.sequenceElements:
value = getattr(self, element.name, None)
if element.optional and value is None:
continue
if not element.optional and value is None:
file.write("%s%s is a missing required element of %s\n" % (" " * indent, element.name, self.__class__.__name__))
continue
if (element.klass in _sequence_of_classes) or (element.klass in _list_of_classes):
file.write("%s%s\n" % (" " * indent, element.name))
helper = element.klass(value)
helper.debug_contents(indent+1, file, _ids)
elif issubclass(element.klass, (Atomic, AnyAtomic)):
file.write("%s%s = %r\n" % (" " * indent, element.name, value))
elif isinstance(value, element.klass):
file.write("%s%s\n" % (" " * indent, element.name))
value.debug_contents(indent+1, file, _ids)
else:
file.write("%s%s must be a %s\n" % (" " * indent, element.name, element.klass.__name__))
def dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: Sequence._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
global _sequence_of_classes, _list_of_classes
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# loop through the elements
for element in self.sequenceElements:
value = getattr(self, element.name, None)
if value is None:
continue
if (element.klass in _sequence_of_classes) or (element.klass in _list_of_classes):
helper = element.klass(value)
mapped_value = helper.dict_contents(as_class=as_class)
elif issubclass(element.klass, Atomic):
mapped_value = value ### ambiguous
elif issubclass(element.klass, AnyAtomic):
mapped_value = value.value ### ambiguous
elif isinstance(value, element.klass):
mapped_value = value.dict_contents(as_class=as_class)
use_dict.__setitem__(element.name, mapped_value)
else:
continue
# update the dictionary being built
use_dict.__setitem__(element.name, mapped_value)
# return what we built/updated
return use_dict
#
# SequenceOf
#
_sequence_of_map = {}
_sequence_of_classes = {}
@bacpypes_debugging
def SequenceOf(klass):
"""Function to return a class that can encode and decode a list of
some other type."""
if _debug: SequenceOf._debug("SequenceOf %r", klass)
global _sequence_of_map
global _sequence_of_classes, _array_of_classes
# if this has already been built, return the cached one
if klass in _sequence_of_map:
if _debug: SequenceOf._debug(" - found in cache")
return _sequence_of_map[klass]
# no SequenceOf(SequenceOf(...)) allowed
if klass in _sequence_of_classes:
raise TypeError("nested sequences disallowed")
# no SequenceOf(ArrayOf(...)) allowed
if klass in _array_of_classes:
raise TypeError("sequences of arrays disallowed")
# define a generic class for lists
@bacpypes_debugging
class _SequenceOf:
subtype = None
def __init__(self, value=None):
if _debug: _SequenceOf._debug("(%r)__init__ %r (subtype=%r)", self.__class__.__name__, value, self.subtype)
if value is None:
self.value = []
elif isinstance(value, list):
self.value = value
else:
raise TypeError("invalid constructor datatype")
def append(self, value):
if issubclass(self.subtype, Atomic):
pass
elif issubclass(self.subtype, AnyAtomic) and not isinstance(value, Atomic):
raise TypeError("instance of an atomic type required")
elif not isinstance(value, self.subtype):
raise TypeError("%s value required" % (self.subtype.__name__,))
self.value.append(value)
def __len__(self):
return len(self.value)
def __getitem__(self, item):
return self.value[item]
def __iter__(self):
return iter(self.value)
def encode(self, taglist):
if _debug: _SequenceOf._debug("(%r)encode %r", self.__class__.__name__, taglist)
for value in self.value:
if issubclass(self.subtype, (Atomic, AnyAtomic)):
# a helper cooperates between the atomic value and the tag
helper = self.subtype(value)
# build a tag and encode the data into it
tag = Tag()
helper.encode(tag)
# now encode the tag
taglist.append(tag)
elif isinstance(value, self.subtype):
# it must have its own encoder
value.encode(taglist)
else:
raise TypeError("%s must be a %s" % (value, self.subtype.__name__))
def decode(self, taglist):
if _debug: _SequenceOf._debug("(%r)decode %r", self.__class__.__name__, taglist)
while len(taglist) != 0:
tag = taglist.Peek()
if tag.tagClass == Tag.closingTagClass:
return
if issubclass(self.subtype, (Atomic, AnyAtomic)):
if _debug: _SequenceOf._debug(" - building helper: %r %r", self.subtype, tag)
taglist.Pop()
# a helper cooperates between the atomic value and the tag
helper = self.subtype(tag)
# save the value
self.value.append(helper.value)
else:
if _debug: _SequenceOf._debug(" - building value: %r", self.subtype)
# build an element
value = self.subtype()
# let it decode itself
value.decode(taglist)
# save what was built
self.value.append(value)
def debug_contents(self, indent=1, file=sys.stdout, _ids=None):
i = 0
for value in self.value:
if issubclass(self.subtype, (Atomic, AnyAtomic)):
file.write("%s[%d] = %r\n" % (" " * indent, i, value))
elif isinstance(value, self.subtype):
file.write("%s[%d]" % (" " * indent, i))
value.debug_contents(indent+1, file, _ids)
else:
file.write("%s[%d] %s must be a %s" % (" " * indent, i, value, self.subtype.__name__))
i += 1
def dict_contents(self, use_dict=None, as_class=dict):
# return sequences as arrays
mapped_value = []
for value in self.value:
if issubclass(self.subtype, Atomic):
mapped_value.append(value) ### ambiguous
elif issubclass(self.subtype, AnyAtomic):
mapped_value.append(value.value) ### ambiguous
elif isinstance(value, self.subtype):
mapped_value.append(value.dict_contents(as_class=as_class))
# return what we built
return mapped_value
# constrain it to a list of a specific type of item
setattr(_SequenceOf, 'subtype', klass)
_SequenceOf.__name__ = 'SequenceOf' + klass.__name__
if _debug: SequenceOf._debug(" - build this class: %r", _SequenceOf)
# cache this type
_sequence_of_map[klass] = _SequenceOf
_sequence_of_classes[_SequenceOf] = 1
# return this new type
return _SequenceOf
#
# List
#
class List(object):
pass
#
# ListOf
#
_list_of_map = {}
_list_of_classes = {}
@bacpypes_debugging
def ListOf(klass):
"""Function to return a class that can encode and decode a list of
some other type."""
if _debug: ListOf._debug("ListOf %r", klass)
global _list_of_map
global _list_of_classes, _array_of_classes
# if this has already been built, return the cached one
if klass in _list_of_map:
if _debug: SequenceOf._debug(" - found in cache")
return _list_of_map[klass]
# no ListOf(ListOf(...)) allowed
if klass in _list_of_classes:
raise TypeError("nested lists disallowed")
# no ListOf(ArrayOf(...)) allowed
if klass in _array_of_classes:
raise TypeError("lists of arrays disallowed")
# define a generic class for lists
@bacpypes_debugging
class _ListOf(List):
subtype = None
def __init__(self, value=None):
if _debug: _ListOf._debug("(%r)__init__ %r (subtype=%r)", self.__class__.__name__, value, self.subtype)
if value is None:
self.value = []
elif isinstance(value, list):
self.value = value
else:
raise TypeError("invalid constructor datatype")
def append(self, value):
if issubclass(self.subtype, Atomic):
pass
elif issubclass(self.subtype, AnyAtomic) and not isinstance(value, Atomic):
raise TypeError("instance of an atomic type required")
elif not isinstance(value, self.subtype):
raise TypeError("%s value required" % (self.subtype.__name__,))
self.value.append(value)
def __len__(self):
return len(self.value)
def __getitem__(self, item):
return self.value[item]
def encode(self, taglist):
if _debug: _ListOf._debug("(%r)encode %r", self.__class__.__name__, taglist)
for value in self.value:
if issubclass(self.subtype, (Atomic, AnyAtomic)):
# a helper cooperates between the atomic value and the tag
helper = self.subtype(value)
# build a tag and encode the data into it
tag = Tag()
helper.encode(tag)
# now encode the tag
taglist.append(tag)
elif isinstance(value, self.subtype):
# it must have its own encoder
value.encode(taglist)
else:
raise TypeError("%s must be a %s" % (value, self.subtype.__name__))
def decode(self, taglist):
if _debug: _ListOf._debug("(%r)decode %r", self.__class__.__name__, taglist)
while len(taglist) != 0:
tag = taglist.Peek()
if tag.tagClass == Tag.closingTagClass:
return
if issubclass(self.subtype, (Atomic, AnyAtomic)):
if _debug: _ListOf._debug(" - building helper: %r %r", self.subtype, tag)
taglist.Pop()
# a helper cooperates between the atomic value and the tag
helper = self.subtype(tag)
# save the value
self.value.append(helper.value)
else:
if _debug: _ListOf._debug(" - building value: %r", self.subtype)
# build an element
value = self.subtype()
# let it decode itself
value.decode(taglist)
# save what was built
self.value.append(value)
def debug_contents(self, indent=1, file=sys.stdout, _ids=None):
i = 0
for value in self.value:
if issubclass(self.subtype, (Atomic, AnyAtomic)):
file.write("%s[%d] = %r\n" % (" " * indent, i, value))
elif isinstance(value, self.subtype):
file.write("%s[%d]" % (" " * indent, i))
value.debug_contents(indent+1, file, _ids)
else:
file.write("%s[%d] %s must be a %s" % (" " * indent, i, value, self.subtype.__name__))
i += 1
def dict_contents(self, use_dict=None, as_class=dict):
# return sequences as arrays
mapped_value = []
for value in self.value:
if issubclass(self.subtype, Atomic):
mapped_value.append(value) ### ambiguous
elif issubclass(self.subtype, AnyAtomic):
mapped_value.append(value.value) ### ambiguous
elif isinstance(value, self.subtype):
mapped_value.append(value.dict_contents(as_class=as_class))
# return what we built
return mapped_value
# constrain it to a list of a specific type of item
setattr(_ListOf, 'subtype', klass)
_ListOf.__name__ = 'ListOf' + klass.__name__
if _debug: ListOf._debug(" - build this class: %r", _ListOf)
# cache this type
_list_of_map[klass] = _ListOf
_list_of_classes[_ListOf] = 1
# return this new type
return _ListOf
#
# Array
#
# Arrays of things are a derived class of Array to make it easier to check
# to see if a property is an array of something.
#
class Array(object):
pass
#
# ArrayOf
#
_array_of_map = {}
_array_of_classes = {}
def ArrayOf(klass, fixed_length=None, prototype=None):
"""Function to return a class that can encode and decode a list of
some other type."""
global _array_of_map
global _array_of_classes, _sequence_of_classes
# check the parameters for consistency
if issubclass(klass, Atomic):
if prototype is None:
pass
elif not klass.is_valid(prototype):
raise ValueError("prototype %r not valid for %s" % (prototype, klass.__name__))
else:
if prototype is None:
### TODO This should be an error, a prototype should always be
### required for non-atomic types, even if it's only klass()
### for a default object which will be deep copied
pass
elif not isinstance(prototype, klass):
raise ValueError("prototype %r not valid for %s" % (prototype, klass.__name__))
# build a signature of the parameters
array_signature = (klass, fixed_length, prototype)
# if this has already been built, return the cached one
if array_signature in _array_of_map:
return _array_of_map[array_signature]
# no ArrayOf(ArrayOf(...)) allowed
if klass in _array_of_classes:
raise TypeError("nested arrays disallowed")
# no ArrayOf(SequenceOf(...)) allowed
if klass in _sequence_of_classes:
raise TypeError("arrays of SequenceOf disallowed")
# define a generic class for arrays
@bacpypes_debugging
class ArrayOf(Array):
subtype = None
fixed_length = None
prototype = None
def __init__(self, value=None):
if value is None:
self.value = [0]
if self.fixed_length is not None:
self.fix_length(self.fixed_length)
elif isinstance(value, list):
if (self.fixed_length is not None) and (len(value) != self.fixed_length):
raise ValueError("invalid array length")
self.value = [len(value)]
self.value.extend(value)
else:
raise TypeError("invalid constructor datatype")
def fix_length(self, new_length):
if len(self.value) > new_length + 1:
# trim off the excess
del self.value[new_length + 1:]
elif len(self.value) < new_length + 1:
# how many do we need
element_count = new_length - len(self.value) + 1
# extend or append
if issubclass(self.subtype, Atomic):
if self.prototype is None:
extend_value = self.subtype().value
else:
extend_value = self.prototype
self.value.extend( [extend_value] * element_count )
else:
for i in range(element_count):
if self.prototype is None:
append_value = self.subtype()
else:
append_value = _deepcopy(self.prototype)
self.value.append(append_value)
self.value[0] = new_length
def append(self, value):
if self.fixed_length is not None:
raise TypeError("fixed length array")
if issubclass(self.subtype, Atomic):
pass
elif issubclass(self.subtype, AnyAtomic) and not isinstance(value, Atomic):
raise TypeError("instance of an atomic type required")
elif not isinstance(value, self.subtype):
raise TypeError("%s value required" % (self.subtype.__name__,))
self.value.append(value)
self.value[0] = len(self.value) - 1
def __len__(self):
return self.value[0]
def __getitem__(self, item):
# no wrapping index
if (item < 0) or (item > self.value[0]):
raise IndexError("index out of range")
return self.value[item]
def __setitem__(self, item, value):
# no wrapping index
if (item < 0) or (item > self.value[0]):
raise IndexError("index out of range")
# special length handling for index 0
if item == 0:
if (self.fixed_length is not None):
if (value != self.value[0]):
raise TypeError("fixed length array")
return
self.fix_length(value)
else:
self.value[item] = value
def __delitem__(self, item):
if self.fixed_length is not None:
raise TypeError("fixed length array")
# no wrapping index
if (item < 1) or (item > self.value[0]):
raise IndexError("index out of range")
# delete the item and update the length
del self.value[item]
self.value[0] -= 1
def __iter__(self):
return iter(self.value[1:])
def index(self, value):
# only search through values
for i in range(1, self.value[0] + 1):
if value == self.value[i]:
return i
# not found
raise ValueError("%r not in array" % (value,))
def remove(self, item):
if self.fixed_length is not None:
raise TypeError("fixed length array")
# find the index of the item and delete it
indx = self.index(item)
self.__delitem__(indx)
def encode(self, taglist):
if _debug: ArrayOf._debug("(%r)encode %r", self.__class__.__name__, taglist)
for value in self.value[1:]:
if issubclass(self.subtype, (Atomic, AnyAtomic)):
# a helper cooperates between the atomic value and the tag
helper = self.subtype(value)
# build a tag and encode the data into it
tag = Tag()
helper.encode(tag)
# now encode the tag
taglist.append(tag)
elif isinstance(value, self.subtype):
# it must have its own encoder
value.encode(taglist)
else:
raise TypeError("%s must be a %s" % (value, self.subtype.__name__))
def decode(self, taglist):
if _debug: ArrayOf._debug("(%r)decode %r", self.__class__.__name__, taglist)
# start with an empty array
new_value = []
while len(taglist) != 0:
tag = taglist.Peek()
if tag.tagClass == Tag.closingTagClass:
break
if issubclass(self.subtype, (Atomic, AnyAtomic)):
if _debug: ArrayOf._debug(" - building helper: %r %r", self.subtype, tag)
taglist.Pop()
# a helper cooperates between the atomic value and the tag
helper = self.subtype(tag)
# save the value
new_value.append(helper.value)
else:
if _debug: ArrayOf._debug(" - building value: %r", self.subtype)
# build an element
value = self.subtype()
# let it decode itself
value.decode(taglist)
# save what was built
new_value.append(value)
# check the length
if self.fixed_length is not None:
if self.fixed_length != len(new_value):
raise ValueError("invalid array length")
# update the length
self.value = [len(new_value)] + new_value
def encode_item(self, item, taglist):
if _debug: ArrayOf._debug("(%r)encode_item %r %r", self.__class__.__name__, item, taglist)
if item == 0:
# a helper cooperates between the atomic value and the tag
helper = Unsigned(self.value[0])
# build a tag and encode the data into it
tag = Tag()
helper.encode(tag)
# now encode the tag
taglist.append(tag)
else:
value = self.value[item]
if issubclass(self.subtype, (Atomic, AnyAtomic)):
# a helper cooperates between the atomic value and the tag
helper = self.subtype(self.value[item])
# build a tag and encode the data into it
tag = Tag()
helper.encode(tag)
# now encode the tag
taglist.append(tag)
elif isinstance(value, self.subtype):
# it must have its own encoder
value.encode(taglist)
else:
raise TypeError("%s must be a %s" % (value, self.subtype.__name__))
def decode_item(self, item, taglist):
if _debug: ArrayOf._debug("(%r)decode_item %r %r", self.__class__.__name__, item, taglist)
if item == 0:
# a helper cooperates between the atomic value and the tag
helper = Unsigned(taglist.Pop())
# save the value
self.value = helper.value
elif issubclass(self.subtype, (Atomic, AnyAtomic)):
if _debug: ArrayOf._debug(" - building helper: %r", self.subtype)
# a helper cooperates between the atomic value and the tag
helper = self.subtype(taglist.Pop())
# save the value
self.value = helper.value
else:
if _debug: ArrayOf._debug(" - building value: %r", self.subtype)
# build an element
value = self.subtype()
# let it decode itself
value.decode(taglist)
# save what was built
self.value = value
def debug_contents(self, indent=1, file=sys.stdout, _ids=None):
try:
value_list = enumerate(self.value)
except TypeError:
file.write("%s(non-sequence) %r\n" % (" " * indent, self.value))
return
for i, value in value_list:
if i == 0:
file.write("%slength = %d\n" % (" " * indent, value))
elif issubclass(self.subtype, (Atomic, AnyAtomic)):
file.write("%s[%d] = %r\n" % (" " * indent, i, value))
elif isinstance(value, self.subtype):
file.write("%s[%d]\n" % (" " * indent, i))
value.debug_contents(indent+1, file, _ids)
else:
file.write("%s%s must be a %s" % (" " * indent, value, self.subtype.__name__))
def dict_contents(self, use_dict=None, as_class=dict):
# return arrays as arrays
mapped_value = []
for value in self.value:
if issubclass(self.subtype, Atomic):
mapped_value.append(value) ### ambiguous
elif issubclass(self.subtype, AnyAtomic):
mapped_value.append(value.value) ### ambiguous
elif isinstance(value, self.subtype):
mapped_value.append(value.dict_contents(as_class=as_class))
# return what we built
return mapped_value
# constrain it to a list of a specific type of item
setattr(ArrayOf, 'subtype', klass)
setattr(ArrayOf, 'fixed_length', fixed_length)
setattr(ArrayOf, 'prototype', prototype)
# update the name
ArrayOf.__name__ = 'ArrayOf' + klass.__name__
# cache this type
_array_of_map[array_signature] = ArrayOf
_array_of_classes[ArrayOf] = 1
# return this new type
return ArrayOf
#
# Choice
#
@bacpypes_debugging
class Choice(object):
choiceElements = []
def __init__(self, **kwargs):
"""
Create a choice element, optionally providing attribute/property values.
There should only be one, but that is not strictly enforced.
"""
if _debug: Choice._debug("__init__ %r", kwargs)
# split out the keyword arguments that belong to this class
my_kwargs = {}
other_kwargs = {}
for element in self.choiceElements:
if element.name in kwargs:
my_kwargs[element.name] = kwargs[element.name]
for kw in kwargs:
if kw not in my_kwargs:
other_kwargs[kw] = kwargs[kw]
if _debug: Choice._debug(" - my_kwargs: %r", my_kwargs)
if _debug: Choice._debug(" - other_kwargs: %r", other_kwargs)
# call some superclass, if there is one
super(Choice, self).__init__(**other_kwargs)
# set the attribute/property values for the ones provided
for element in self.choiceElements:
setattr(self, element.name, my_kwargs.get(element.name, None))
def encode(self, taglist):
if _debug: Choice._debug("(%r)encode %r", self.__class__.__name__, taglist)
for element in self.choiceElements:
value = getattr(self, element.name, None)
if value is None:
continue
if issubclass(element.klass, (Atomic, AnyAtomic)):
# a helper cooperates between the atomic value and the tag
helper = element.klass(value)
# build a tag and encode the data into it
tag = Tag()
helper.encode(tag)
# convert it to context encoding
if element.context is not None:
tag = tag.app_to_context(element.context)
# now encode the tag
taglist.append(tag)
break
elif isinstance(value, element.klass):
# encode an opening tag
if element.context is not None:
taglist.append(OpeningTag(element.context))
# encode the value
value.encode(taglist)
# encode a closing tag
if element.context is not None:
taglist.append(ClosingTag(element.context))
break
else:
raise TypeError("%s must be a %s" % (element.name, element.klass.__name__))
else:
raise AttributeError("missing choice of %s" % (self.__class__.__name__,))
def decode(self, taglist):
if _debug: Choice._debug("(%r)decode %r", self.__class__.__name__, taglist)
global _sequence_of_classes, _list_of_classes
# peek at the element
tag = taglist.Peek()
if tag is None:
raise AttributeError("missing choice of %s" % (self.__class__.__name__,))
if tag.tagClass == Tag.closingTagClass:
raise AttributeError("missing choice of %s" % (self.__class__.__name__,))
# keep track of which one was found
foundElement = {}
# figure out which choice it is
for element in self.choiceElements:
if _debug: Choice._debug(" - checking choice: %s", element.name)
# check for a sequence element
if (element.klass in _sequence_of_classes) or (element.klass in _list_of_classes):
# check for context encoding
if element.context is None:
raise NotImplementedError("choice of a SequenceOf must be context encoded")
# match the context tag number
if tag.tagClass != Tag.contextTagClass or tag.tagNumber != element.context:
continue
taglist.Pop()
# a helper cooperates between the atomic value and the tag
helper = element.klass()
helper.decode(taglist)
# now save the value
foundElement[element.name] = helper.value
# check for context closing tag
tag = taglist.Pop()
if tag.tagClass != Tag.closingTagClass or tag.tagNumber != element.context:
raise InvalidTag("%s expected closing tag %d" % (element.name, element.context))
# done
if _debug: Choice._debug(" - found choice (sequence)")
break
# check for an atomic element
elif issubclass(element.klass, (Atomic, AnyAtomic)):
# convert it to application encoding
if element.context is not None:
if tag.tagClass != Tag.contextTagClass or tag.tagNumber != element.context:
continue
tag = tag.context_to_app(element.klass._app_tag)
else:
if tag.tagClass != Tag.applicationTagClass or tag.tagNumber != element.klass._app_tag:
continue
# consume the tag
taglist.Pop()
# a helper cooperates between the atomic value and the tag
helper = element.klass(tag)
# now save the value
foundElement[element.name] = helper.value
# done
if _debug: Choice._debug(" - found choice (atomic)")
break
# some kind of structure
else:
# check for context encoding
if element.context is None:
raise NotImplementedError("choice of non-atomic data must be context encoded")
if tag.tagClass != Tag.openingTagClass or tag.tagNumber != element.context:
continue
taglist.Pop()
# build a value and decode it
value = element.klass()
value.decode(taglist)
# now save the value
foundElement[element.name] = value
# check for the correct closing tag
tag = taglist.Pop()
if tag.tagClass != Tag.closingTagClass or tag.tagNumber != element.context:
raise InvalidTag("%s expected closing tag %d" % (element.name, element.context))
# done
if _debug: Choice._debug(" - found choice (structure)")
break
else:
raise AttributeError("missing choice of %s" % (self.__class__.__name__,))
# now save the value and None everywhere else
for element in self.choiceElements:
setattr(self, element.name, foundElement.get(element.name, None))
def debug_contents(self, indent=1, file=sys.stdout, _ids=None):
for element in self.choiceElements:
value = getattr(self, element.name, None)
if value is None:
continue
elif issubclass(element.klass, (Atomic, AnyAtomic)):
file.write("%s%s = %r\n" % (" " * indent, element.name, value))
break
elif isinstance(value, element.klass):
file.write("%s%s\n" % (" " * indent, element.name))
value.debug_contents(indent+1, file, _ids)
break
else:
file.write("%s%s must be a %s" % (" " * indent, element.name, element.klass.__name__))
else:
file.write("%smissing choice of %s" % (" " * indent, self.__class__.__name__))
def dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: Choice._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# look for the chosen element
for element in self.choiceElements:
value = getattr(self, element.name, None)
if value is None:
continue
if issubclass(element.klass, Atomic):
mapped_value = value ### ambiguous
elif issubclass(element.klass, AnyAtomic):
mapped_value = value.value ### ambiguous
elif isinstance(value, element.klass):
mapped_value = value.dict_contents(as_class=as_class)
use_dict.__setitem__(element.name, mapped_value)
break
# return what we built/updated
return use_dict
#
# Any
#
@bacpypes_debugging
class Any:
def __init__(self, *args):
self.tagList = TagList()
# cast in the args
for arg in args:
self.cast_in(arg)
def encode(self, taglist):
if _debug: Any._debug("encode %r", taglist)
taglist.extend(self.tagList)
def decode(self, taglist):
if _debug: Any._debug("decode %r", taglist)
lvl = 0
while len(taglist) != 0:
tag = taglist.Peek()
if tag.tagClass == Tag.openingTagClass:
lvl += 1
elif tag.tagClass == Tag.closingTagClass:
lvl -= 1
if lvl < 0: break
self.tagList.append(taglist.Pop())
# make sure everything balances
if lvl > 0:
raise DecodingError("mismatched open/close tags")
def cast_in(self, element):
"""encode the element into the internal tag list."""
if _debug: Any._debug("cast_in %r", element)
t = TagList()
if isinstance(element, Atomic):
tag = Tag()
element.encode(tag)
t.append(tag)
elif isinstance(element, AnyAtomic):
tag = Tag()
element.value.encode(tag)
t.append(tag)
else:
element.encode(t)
self.tagList.extend(t.tagList)
def cast_out(self, klass):
"""Interpret the content as a particular class."""
if _debug: Any._debug("cast_out %r", klass)
global _sequence_of_classes, _list_of_classes
# check for a sequence element
if (klass in _sequence_of_classes) or (klass in _list_of_classes):
# build a sequence helper
helper = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
helper.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError("incomplete cast")
# return what was built
return helper.value
# check for an array element
elif klass in _array_of_classes:
# build a sequence helper
helper = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
helper.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError("incomplete cast")
# return what was built with Python list semantics
return helper.value[1:]
elif issubclass(klass, (Atomic, AnyAtomic)):
# make sure there's only one piece
if len(self.tagList) == 0:
raise DecodingError("missing cast component")
if len(self.tagList) > 1:
raise DecodingError("too many cast components")
if _debug: Any._debug(" - building helper: %r", klass)
# a helper cooperates between the atomic value and the tag
helper = klass(self.tagList[0])
# return the value
return helper.value
else:
if _debug: Any._debug(" - building value: %r", klass)
# build an element
value = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
value.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError("incomplete cast")
# return what was built
return value
def is_application_class_null(self):
if _debug: Any._debug("is_application_class_null")
return (len(self.tagList) == 1) and (self.tagList[0].tagClass == Tag.applicationTagClass) and (self.tagList[0].tagNumber == Tag.nullAppTag)
def debug_contents(self, indent=1, file=sys.stdout, _ids=None):
self.tagList.debug_contents(indent, file, _ids)
def dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: Any._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# result will be a list
rslt_list = []
# loop through the tags
for tag in self.tagList:
# build a tag thing
use_dict = as_class()
# save the pieces
use_dict.__setitem__('class', tag.tagClass)
use_dict.__setitem__('number', tag.tagNumber)
use_dict.__setitem__('lvt', tag.tagLVT)
### use_dict.__setitem__('data', '.'.join('%02X' % ord(c) for c in tag.tagData))
# add it to the list
rslt_list = use_dict
# return what we built
return rslt_list
#
# AnyAtomic
#
@bacpypes_debugging
class AnyAtomic(Atomic):
def __init__(self, arg=None):
if _debug: AnyAtomic._debug("__init__ %r", arg)
# default to no value
self.value = None
if arg is None:
pass
elif isinstance(arg, Atomic):
self.value = arg
elif isinstance(arg, Tag):
self.value = arg.app_to_object()
else:
raise TypeError("invalid constructor datatype")
def encode(self, tag):
if _debug: AnyAtomic._debug("encode %r", tag)
self.value.encode(tag)
def decode(self, tag):
if _debug: AnyAtomic._debug("decode %r", tag)
if (tag.tagClass != Tag.applicationTagClass):
raise ValueError("application tag required")
# get the data
self.value = tag.app_to_object()
@classmethod
def is_valid(cls, arg):
"""Return True if arg is valid value for the class."""
return isinstance(arg, Atomic) and not isinstance(arg, AnyAtomic)
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, str(self.value))
def __repr__(self):
desc = self.__module__ + '.' + self.__class__.__name__
if self.value:
desc += "(" + self.value.__class__.__name__ + ")"
desc += ' ' + str(self.value)
return '<' + desc + ' instance at 0x%08x' % (id(self),) + '>'
#
# SequenceOfAny
#
@bacpypes_debugging
class SequenceOfAny(Any):
def cast_in(self, element):
"""encode the element into the internal tag list."""
if _debug: SequenceOfAny._debug("cast_in %r", element)
# make sure it is a list
if not isinstance(element, List):
raise EncodingError("%r is not a list" % (element,))
t = TagList()
element.encode(t)
self.tagList.extend(t.tagList)
def cast_out(self, klass):
"""Interpret the content as a particular class."""
if _debug: SequenceOfAny._debug("cast_out %r", klass)
# make sure it is a list
if not issubclass(klass, List):
raise DecodingError("%r is not a list" % (klass,))
# build a helper
helper = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
helper.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError("incomplete cast")
return helper.value
|
JoelBender/bacpypes
|
py34/bacpypes/constructeddata.py
|
Python
|
mit
| 54,681
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.