text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import itertools
import time
import traceback
import jsonschema
from novaclient import exceptions as nova_exc
import six
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils
from rally import consts
from rally import exceptions
LOG = logging.getLogger(__name__)
def get_status(resource, status_attr="status"):
"""Get the status of a given resource object.
The status is returned in upper case. The status is checked for the
standard field names with special cases for Heat and Ceilometer.
:param resource: The resource object or dict.
:param status_attr: Allows to specify non-standard status fields.
:return: The status or "NONE" if it is not available.
"""
for s_attr in ["stack_status", "state", status_attr]:
status = getattr(resource, s_attr, None)
if isinstance(status, six.string_types):
return status.upper()
# Dict case
if ((isinstance(resource, dict) and status_attr in resource.keys() and
isinstance(resource[status_attr], six.string_types))):
return resource[status_attr].upper()
return "NONE"
class resource_is(object):
def __init__(self, desired_status, status_getter=None):
self.desired_status = desired_status
self.status_getter = status_getter or get_status
def __call__(self, resource):
return self.status_getter(resource) == self.desired_status.upper()
def __str__(self):
return str(self.desired_status)
def get_from_manager(error_statuses=None):
error_statuses = error_statuses or ["ERROR"]
error_statuses = map(lambda str: str.upper(), error_statuses)
def _get_from_manager(resource):
# catch client side errors
try:
res = resource.manager.get(resource.id)
except Exception as e:
if getattr(e, "code", getattr(e, "http_status", 400)) == 404:
raise exceptions.GetResourceNotFound(resource=resource)
raise exceptions.GetResourceFailure(resource=resource, err=e)
# catch abnormal status, such as "no valid host" for servers
status = get_status(res)
if status in ("DELETED", "DELETE_COMPLETE"):
raise exceptions.GetResourceNotFound(resource=res)
if status in error_statuses:
raise exceptions.GetResourceErrorStatus(
resource=res, status=status,
fault=getattr(res, "fault", "n/a"))
return res
return _get_from_manager
def manager_list_size(sizes):
def _list(mgr):
return len(mgr.list()) in sizes
return _list
@utils.log_deprecated("Use wait_for_status instead.", "0.1.2", once=True)
def wait_for(resource, is_ready=None, ready_statuses=None,
failure_statuses=None, status_attr="status", update_resource=None,
timeout=60, check_interval=1):
"""Waits for the given resource to come into the one of the given statuses.
The method can be used to check resource for status with a `is_ready`
function or with a list of expected statuses and the status attribute
In case when the is_ready checker is not provided the resource should have
status_attr. It may be an object attribute or a dictionary key. The value
of the attribute is checked against ready statuses list and failure
statuses. In case of a failure the wait exits with an exception. The
resource is updated between iterations with an update_resource call.
:param is_ready: A predicate that should take the resource object and
return True iff it is ready to be returned
:param ready_statuses: List of statuses which mean that the resource is
ready
:param failure_statuses: List of statuses which mean that an error has
occurred while waiting for the resource
:param status_attr: The name of the status attribute of the resource
:param update_resource: Function that should take the resource object
and return an 'updated' resource. If set to
None, no result updating is performed
:param timeout: Timeout in seconds after which a TimeoutException will be
raised
:param check_interval: Interval in seconds between the two consecutive
readiness checks
:returns: The "ready" resource object
"""
if is_ready is not None:
return wait_is_ready(resource=resource, is_ready=is_ready,
update_resource=update_resource, timeout=timeout,
check_interval=check_interval)
else:
return wait_for_status(resource=resource,
ready_statuses=ready_statuses,
failure_statuses=failure_statuses,
status_attr=status_attr,
update_resource=update_resource,
timeout=timeout,
check_interval=check_interval)
@utils.log_deprecated("Use wait_for_status instead.", "0.1.2", once=True)
def wait_is_ready(resource, is_ready, update_resource=None,
timeout=60, check_interval=1):
resource_repr = getattr(resource, "name", repr(resource))
start = time.time()
while True:
if update_resource is not None:
resource = update_resource(resource)
if is_ready(resource):
return resource
time.sleep(check_interval)
if time.time() - start > timeout:
raise exceptions.TimeoutException(
desired_status=str(is_ready),
resource_name=resource_repr,
resource_type=resource.__class__.__name__,
resource_id=getattr(resource, "id", "<no id>"),
resource_status=get_status(resource))
def wait_for_status(resource, ready_statuses, failure_statuses=None,
status_attr="status", update_resource=None,
timeout=60, check_interval=1, check_deletion=False):
resource_repr = getattr(resource, "name", repr(resource))
if not isinstance(ready_statuses, (set, list, tuple)):
raise ValueError("Ready statuses should be supplied as set, list or "
"tuple")
if failure_statuses and not isinstance(failure_statuses,
(set, list, tuple)):
raise ValueError("Failure statuses should be supplied as set, list or "
"tuple")
# make all statuses upper case
ready_statuses = set([s.upper() for s in ready_statuses or []])
failure_statuses = set([s.upper() for s in failure_statuses or []])
if len(ready_statuses & failure_statuses) > 0:
raise ValueError(
"Can't wait for resource's %s status. Ready and Failure"
"statuses conflict." % resource_repr)
if not ready_statuses:
raise ValueError(
"Can't wait for resource's %s status. No ready "
"statuses provided" % resource_repr)
if not update_resource:
raise ValueError(
"Can't wait for resource's %s status. No update method."
% resource_repr)
start = time.time()
latest_status = get_status(resource, status_attr)
latest_status_update = start
while True:
try:
resource = update_resource(resource)
except exceptions.GetResourceNotFound:
if check_deletion:
return
else:
raise
status = get_status(resource, status_attr)
if status != latest_status:
current_time = time.time()
delta = current_time - latest_status_update
LOG.debug(
"Waiting for resource %(resource)s. Status changed: "
"%(latest)s => %(current)s in %(delta)s" %
{"resource": resource_repr, "latest": latest_status,
"current": status, "delta": delta})
latest_status = status
latest_status_update = current_time
if status in ready_statuses:
return resource
if status in failure_statuses:
raise exceptions.GetResourceErrorStatus(
resource=resource,
status=status,
fault="Status in failure list %s" % str(failure_statuses))
time.sleep(check_interval)
if time.time() - start > timeout:
raise exceptions.TimeoutException(
desired_status=ready_statuses,
resource_name=resource_repr,
resource_type=resource.__class__.__name__,
resource_id=getattr(resource, "id", "<no id>"),
resource_status=get_status(resource))
@utils.log_deprecated("Use wait_for_status instead.", "0.1.2", once=True)
def wait_for_delete(resource, update_resource=None, timeout=60,
check_interval=1):
"""Wait for the full deletion of resource.
:param update_resource: Function that should take the resource object
and return an 'updated' resource, or raise
exception rally.exceptions.GetResourceNotFound
that means that resource is deleted.
:param timeout: Timeout in seconds after which a TimeoutException will be
raised
:param check_interval: Interval in seconds between the two consecutive
readiness checks
"""
start = time.time()
while True:
try:
resource = update_resource(resource)
except exceptions.GetResourceNotFound:
break
time.sleep(check_interval)
if time.time() - start > timeout:
raise exceptions.TimeoutException(
desired_status="deleted",
resource_name=getattr(resource, "name", repr(resource)),
resource_type=resource.__class__.__name__,
resource_id=getattr(resource, "id", "<no id>"),
resource_status=get_status(resource))
def format_exc(exc):
return [exc.__class__.__name__, str(exc), traceback.format_exc()]
def infinite_run_args_generator(args_func):
for i in itertools.count():
yield args_func(i)
def check_service_status(client, service_name):
"""Check if given openstack service is enabled and state is up."""
try:
for service in client.services.list():
if service_name in str(service):
if service.status == "enabled" and service.state == "up":
return True
except nova_exc.NotFound:
LOG.warning(_("Unable to retrieve a list of available services from "
"nova. Pre-Grizzly OpenStack deployment?"))
return False
return False
class ActionBuilder(object):
"""Builder class for mapping and creating action objects.
An action list is an array of single key/value dicts which takes
the form:
[{"action": times}, {"action": times}...]
Here 'action' is a string which indicates a action to perform and
'times' is a non-zero positive integer which specifies how many
times to run the action in sequence.
This utility builder class will build and return methods which
wrapper the action call the given amount of times.
"""
SCHEMA_TEMPLATE = {
"type": "array",
"$schema": consts.JSON_SCHEMA,
"items": {
"type": "object",
"properties": {},
"additionalProperties": False,
"minItems": 0
}
}
ITEM_TEMPLATE = {
"type": "integer",
"minimum": 0,
"exclusiveMinimum": True,
"optional": True
}
def __init__(self, action_keywords):
"""Create a new instance of the builder for the given action keywords.
:param action_keywords: A list of strings which are the keywords this
instance of the builder supports.
"""
self._bindings = {}
self.schema = dict(ActionBuilder.SCHEMA_TEMPLATE)
for kw in action_keywords:
self.schema["items"]["properties"][kw] = (
ActionBuilder.ITEM_TEMPLATE)
def bind_action(self, action_key, action, *args, **kwargs):
"""Bind an action to an action key.
Static args/kwargs can be optionally binded.
:param action_key: The action keyword to bind the action to.
:param action: A method/function to call for the action.
:param args: (optional) Static positional args to prepend
to all invocations of the action.
:param kwargs: (optional) Static kwargs to prepend to all
invocations of the action.
"""
self.validate([{action_key: 1}])
self._bindings[action_key] = {
"action": action,
"args": args or (),
"kwargs": kwargs or {}
}
def validate(self, actions):
"""Validate the list of action objects against the builder schema.
:param actions: The list of action objects to validate.
"""
jsonschema.validate(actions, self.schema)
def _build(self, func, times, *args, **kwargs):
"""Build the wrapper action call."""
def _f():
for i in range(times):
func(*args, **kwargs)
return _f
def build_actions(self, actions, *args, **kwargs):
"""Build a list of callable actions.
A list of callable actions based on the given action object list and
the actions bound to this builder.
:param actions: A list of action objects to build callable
action for.
:param args: (optional) Positional args to pass into each
built action. These will be appended to any args set for the
action via its binding.
:param kwargs: (optional) Keyword args to pass into each built
action. These will be appended to any kwards set for the action
via its binding.
"""
self.validate(actions)
bound_actions = []
for action in actions:
action_key = list(action)[0]
times = action.get(action_key)
binding = self._bindings.get(action_key)
dft_kwargs = dict(binding["kwargs"])
dft_kwargs.update(kwargs or {})
bound_actions.append(
self._build(binding["action"], times,
*(binding["args"] + args), **dft_kwargs))
return bound_actions
|
{
"content_hash": "c22d73e91f4d8ba1c02415fdfd4f02e2",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 79,
"avg_line_length": 37.25699745547074,
"alnum_prop": 0.6046305149569731,
"repo_name": "aforalee/RRally",
"id": "3a978def15169f69468f9a5561c295322c601103",
"size": "15272",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "rally/task/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "48863"
},
{
"name": "Python",
"bytes": "2746795"
},
{
"name": "Shell",
"bytes": "43908"
}
],
"symlink_target": ""
}
|
def extractBackstage02PressbooksCom(item):
'''
Parser for 'backstage02.pressbooks.com'
'''
if 'Manga' in item['tags']:
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "cae1aff3cc2fa7d70ee14e07ad2238b7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 104,
"avg_line_length": 27.545454545454547,
"alnum_prop": 0.636963696369637,
"repo_name": "fake-name/ReadableWebProxy",
"id": "f3e5d382382a622f700c251aa270bc55a322e4c8",
"size": "606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractBackstage02PressbooksCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
import sys
import argparse
import pyfastaq
import pymummer
import subprocess
import os
parser = argparse.ArgumentParser(
description = '''Compares FASTA files with blast or nucmer, writes input files for ACT.
Then start ACT. Files from top to bottom in ACT are same as order
listed on command line when this script is run.''',
usage = '%(prog)s [options] <blast|nucmer|promer> <outdir> <file1.fa> <file2.fa> [<file3.fa ...]')
parser.add_argument('--blast_ops', help='blastall options [%(default)s]', default='-p blastn -m 8 -F F -e 0.01 -b 10000 -v 10000')
parser.add_argument('--nucmer_ops', help='nucmer or promer options [promer:--maxmatch. nucmer: --maxmatch --nosimplify]')
parser.add_argument('--no_delta_filter', action='store_true')
parser.add_argument('--no_act', action='store_true', help='Do not start act, just make comparison files etc')
parser.add_argument('--delta_ops', help='delta-filter options [%(default)s]', default='-m')
parser.add_argument('aln_tool', help='blast, nucmer or promer')
parser.add_argument('outdir', help='Output directory (must not already exist)')
parser.add_argument('fa_list', help='List of fasta files', nargs=argparse.REMAINDER)
options = parser.parse_args()
assert len(options.fa_list) > 1
def index_to_union(ops, i):
return os.path.join(ops.outdir, 'infile.' + str(i) + '.union.fa')
def compare_with_blast(qry, ref, ops, outfile):
subprocess.check_output('formatdb -l ' + os.path.join(ops.outdir, '.formatdb.log') + ' -p F -i ' + ref, shell=True)
cmd = ' '.join([
'blastall', ops.blast_ops,
'-d', ref,
'-i', qry,
'-o', outfile
])
subprocess.check_output(cmd, shell=True)
def compare_with_nucmer(qry, ref, ops, outfile):
nucmer_out = outfile + '.nucmer.out'
delta_file = nucmer_out + '.delta'
filtered_file = delta_file + '.filter'
coords_file = filtered_file + '.coords'
if ops.nucmer_ops is None:
if ops.aln_tool == 'promer':
ops.nucmer_ops = '--maxmatch'
else:
ops.nucmer_ops = '--maxmatch --nosimplify'
cmd = ' '.join([
ops.aln_tool,
ops.nucmer_ops,
'-p', nucmer_out,
ref,
qry,
])
print('cmd:', cmd)
pyfastaq.utils.syscall(cmd)
if ops.no_delta_filter:
cmd = 'cp ' + delta_file + ' ' + filtered_file
else:
cmd = ' '.join([
'delta-filter',
ops.delta_ops,
delta_file,
'>', filtered_file,
])
print('cmd:', cmd)
pyfastaq.utils.syscall(cmd)
cmd = ' '.join([
'show-coords -dTlroH',
filtered_file,
'>', coords_file
])
print('cmd:', cmd)
pyfastaq.utils.syscall(cmd)
pyfastaq.utils.syscall('samtools faidx ' + qry)
pyfastaq.utils.syscall('samtools faidx ' + ref)
pymummer.coords_file.convert_to_msp_crunch(coords_file, outfile, qry + '.fai', ref + '.fai')
# check files exist
for i in range(len(options.fa_list)):
if not os.path.exists(options.fa_list[i]):
print('File not found:', options.fa_list[i], file=sys.stderr)
sys.exit(1)
options.fa_list[i] = os.path.abspath(options.fa_list[i])
try:
os.mkdir(options.outdir)
except:
print('Error making output directory', options.outdir)
sys.exit(1)
# make union files
for i in range(len(options.fa_list)):
seq = pyfastaq.sequences.Fasta('union', '')
reader = pyfastaq.sequences.file_reader(options.fa_list[i])
new_seq = []
for s in reader:
new_seq.append(s.seq)
f = pyfastaq.utils.open_file_write(index_to_union(options, i))
seq.seq = ''.join(new_seq)
print(seq, file=f)
pyfastaq.utils.close(f)
act_command = 'act ' + options.fa_list[0]
# run alignments
for i in range(len(options.fa_list)-1):
qry = index_to_union(options, i+1)
ref = index_to_union(options, i)
outfile = 'compare.' + str(i) + '.vs.' + str(i+1)
outfile_abs = os.path.join(options.outdir, outfile)
if options.aln_tool == 'blast':
compare_with_blast(qry, ref, options, outfile_abs)
elif options.aln_tool in ['nucmer', 'promer']:
compare_with_nucmer(qry, ref, options, outfile_abs)
else:
sys.exit('Unknown alignment tool:' + options.aln_tool)
act_command += ' ' + outfile + ' ' + options.fa_list[i+1]
# delete temporary union files
for i in range(len(options.fa_list)):
filename = index_to_union(options, i)
os.unlink(filename)
try:
os.unlink(filename + '.fai')
except:
pass
# write ACT script
try:
os.chdir(options.outdir)
except:
print('Error chdir', options.outdir)
sys.exit(1)
act_script = 'start_act.sh'
with open(act_script, 'w') as f:
print('#!/usr/bin/env bash', file=f)
print('set -e', file=f)
print('dir=$(dirname $0)', file=f)
print('cd $dir', file=f)
print(act_command, file=f)
os.chmod(act_script, 0o755)
if not options.no_act:
subprocess.check_output('./' + act_script, shell=True)
|
{
"content_hash": "6a14ca22586d987c364d5429a5409864",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 130,
"avg_line_length": 31.030864197530864,
"alnum_prop": 0.617863536900736,
"repo_name": "martinghunt/bioinf-scripts",
"id": "9ab6a74d75e664ed0847743fb9d4cdd2dafe19cf",
"size": "5051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/multi_act.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "13289"
},
{
"name": "Python",
"bytes": "47417"
},
{
"name": "R",
"bytes": "2223"
},
{
"name": "Shell",
"bytes": "1364"
}
],
"symlink_target": ""
}
|
import unittest
import sys
from plumbum import local
import re
#WARNING: this unittest should be run within the integration test folder
class TestTimingBackAnnotation(unittest.TestCase):
def extractTiming(self,output):
#patterns for extracting the timing information
patternVpr = re.compile(r'Final critical path: (?P<time>\S+) ns')
patternZuma = re.compile(r'Critical path max delay is: (?P<time>\S+) ps')
#first extract the timing from the output
timingVprStr = patternVpr.search(output).group('time')
#findall use the first group here implicitly and return a list of times
timingZumaStrs = patternZuma.findall(output)
#vpr use ns and zuma ps
timingVpr = float(timingVprStr)*1000
timingZuma = float(timingZumaStrs[1])
#return the results
return (timingVpr,timingZuma)
def setUp(self):
#get the path of the compile script
self.zumaTestDir = local.path(__file__).parent.parent
self.zumaTestFiles = self.zumaTestDir / "verilog"
self.zumaDir = self.zumaTestDir.parent
compilePath = self.zumaDir / "example/compile.sh"
self.compileScript = local[compilePath]
def test_combinatorial(self):
(returnCode,output,stderr) = self.compileScript[
str(self.zumaTestFiles / "simple.v"),
"--config",
str(self.zumaTestFiles / "zuma_config_timing.py")
].run()
#first test if the circuits are eqivialent.
self.assertEqual(returnCode, 0,"Compilation failed: " + output)
#now check if the timing is the same.
(timingVpr,timingZuma) = self.extractTiming(output)
#now check the timing
#we have sometimes a difference in the lower bits
self.assertTrue((abs(timingVpr-timingZuma)< 0.1),"Timing differ: zuma: " + str(timingZuma) + "vpr: " +str(timingVpr) + "Output:" + output)
#run the next test
(returnCode,output,stderr) = self.compileScript[
str(self.zumaTestFiles / "comb.v"),
"--config",
str(self.zumaTestFiles / "zuma_config_timing.py")
].run()
#first test if the circuits are eqivialent.
self.assertEqual(returnCode, 0,"Compilation failed: " + output)
#now check if the timing is the same.
(timingVpr,timingZuma) = self.extractTiming(output)
#we have sometimes a difference in the lower bits
self.assertTrue((abs(timingVpr-timingZuma)< 0.1),"Timing differ: zuma: " + str(timingZuma) + "vpr: " +str(timingVpr) + "Output:" + output)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "fd1cf1853449c2e96e102c686113ee03",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 146,
"avg_line_length": 37.46153846153846,
"alnum_prop": 0.5855578370978781,
"repo_name": "adbrant/zuma-fpga",
"id": "e021272076ce9117143f480b2731ac79c6b309a3",
"size": "2922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/TimingBackAnnotation.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Coq",
"bytes": "5741"
},
{
"name": "OpenEdge ABL",
"bytes": "305"
},
{
"name": "Python",
"bytes": "231487"
},
{
"name": "R",
"bytes": "1187"
},
{
"name": "Shell",
"bytes": "11823"
},
{
"name": "Tcl",
"bytes": "22949"
},
{
"name": "Verilog",
"bytes": "954550"
}
],
"symlink_target": ""
}
|
"""Concrete date/time and related types -- prototype implemented in Python.
See http://www.zope.org/Members/fdrake/DateTimeWiki/FrontPage
See also http://dir.yahoo.com/Reference/calendars/
For a primer on DST, including many current DST rules, see
http://webexhibits.org/daylightsaving/
For more about DST than you ever wanted to know, see
ftp://elsie.nci.nih.gov/pub/
Sources for time zone and DST data: http://www.twinsun.com/tz/tz-link.htm
This was originally copied from the sandbox of the CPython CVS repository.
Thanks to Tim Peters for suggesting using it.
"""
import time as _time
import math as _math
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
MINYEAR = 1
MAXYEAR = 9999
_MAXORDINAL = 3652059 # date.max.toordinal()
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [None]
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceeding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us):
# Skip trailing microseconds when us==0.
result = "%02d:%02d:%02d" % (hh, mm, ss)
if us:
result += ".%06d" % us
return result
# Correctly substitute for %z and %Z escapes in strftime formats.
def _wrap_strftime(object, format, timetuple):
# Don't call utcoffset() or tzname() unless actually needed.
freplace = None # the string to use for %f
zreplace = None # the string to use for %z
Zreplace = None # the string to use for %Z
# Scan format for %z and %Z escapes, replacing as needed.
newformat = []
push = newformat.append
i, n = 0, len(format)
while i < n:
ch = format[i]
i += 1
if ch == '%':
if i < n:
ch = format[i]
i += 1
if ch == 'f':
if freplace is None:
freplace = '%06d' % getattr(object,
'microsecond', 0)
newformat.append(freplace)
elif ch == 'z':
if zreplace is None:
zreplace = ""
if hasattr(object, "utcoffset"):
offset = object.utcoffset()
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, m = divmod(offset, timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
zreplace = '%c%02d%02d' % (sign, h, m)
assert '%' not in zreplace
newformat.append(zreplace)
elif ch == 'Z':
if Zreplace is None:
Zreplace = ""
if hasattr(object, "tzname"):
s = object.tzname()
if s is not None:
# strftime is going to have at this: escape %
Zreplace = s.replace('%', '%%')
newformat.append(Zreplace)
else:
push('%')
push(ch)
else:
push('%')
else:
push(ch)
newformat = "".join(newformat)
return _time.strftime(newformat, timetuple)
def _call_tzinfo_method(tzinfo, methname, tzinfoarg):
if tzinfo is None:
return None
return getattr(tzinfo, methname)(tzinfoarg)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range, and a whole # of minutes.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if offset % timedelta(minutes=1) or offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of minutes, got %s" % (name, offset))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be must be strictly between"
" -timedelta(hours=24) and timedelta(hours=24)"
% (name, offset))
def _check_date_fields(year, month, day):
if not isinstance(year, int):
raise TypeError('int expected')
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
def _check_time_fields(hour, minute, second, microsecond):
if not isinstance(hour, int):
raise TypeError('int expected')
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
class timedelta:
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int/long
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds'
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
# Doing this efficiently and accurately in C is going to be difficult
# and error-prone, due to ubiquitous overflow possibilities, and that
# C double doesn't have enough bits of precision to represent
# microseconds over 10K years faithfully. The code here tries to make
# explicit where go-fast assumptions can be relied on, in order to
# guide the C implementation; it's way more convoluted than speed-
# ignoring auto-overflow-to-long idiomatic Python could be.
# XXX Check that all inputs are ints or floats.
# Final values, all integer.
# s and us fit in 32-bit signed ints; d isn't bounded.
d = s = us = 0
# Normalize everything to days, seconds, microseconds.
days += weeks*7
seconds += minutes*60 + hours*3600
microseconds += milliseconds*1000
# Get rid of all fractions, and normalize s and us.
# Take a deep breath <wink>.
if isinstance(days, float):
dayfrac, days = _math.modf(days)
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
assert daysecondswhole == int(daysecondswhole) # can't overflow
s = int(daysecondswhole)
assert days == int(days)
d = int(days)
else:
daysecondsfrac = 0.0
d = days
assert isinstance(daysecondsfrac, float)
assert abs(daysecondsfrac) <= 1.0
assert isinstance(d, int)
assert abs(s) <= 24 * 3600
# days isn't referenced again before redefinition
if isinstance(seconds, float):
secondsfrac, seconds = _math.modf(seconds)
assert seconds == int(seconds)
seconds = int(seconds)
secondsfrac += daysecondsfrac
assert abs(secondsfrac) <= 2.0
else:
secondsfrac = daysecondsfrac
# daysecondsfrac isn't referenced again
assert isinstance(secondsfrac, float)
assert abs(secondsfrac) <= 2.0
assert isinstance(seconds, int)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 2 * 24 * 3600
# seconds isn't referenced again before redefinition
usdouble = secondsfrac * 1e6
assert abs(usdouble) < 2.1e6 # exact value not critical
# secondsfrac isn't referenced again
if isinstance(microseconds, float):
microseconds += usdouble
microseconds = round(microseconds, 0)
seconds, microseconds = divmod(microseconds, 1e6)
assert microseconds == int(microseconds)
assert seconds == int(seconds)
days, seconds = divmod(seconds, 24.*3600.)
assert days == int(days)
assert seconds == int(seconds)
d += int(days)
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
else:
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
microseconds = float(microseconds)
microseconds += usdouble
microseconds = round(microseconds, 0)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
assert isinstance(microseconds, float)
assert int(microseconds) == microseconds
us = int(microseconds)
seconds, us = divmod(us, 1000000)
s += seconds # cant't overflow
assert isinstance(s, int)
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
return self
def __repr__(self):
if self._microseconds:
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds,
self._microseconds)
if self._seconds:
return "%s(%d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds)
return "%s(%d)" % ('datetime.' + self.__class__.__name__, self._days)
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds)*10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return self * a / b
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec / other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, b * usec / a)
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
return hash(self._getstate())
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__cmp__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if (isinstance(year, bytes) and len(year) == 4 and
1 <= year[2] <= 12 and month is None): # Month is sane
# Pickle support
self = object.__new__(cls)
self.__setstate(year)
return self
_check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Contruct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __cmp__, __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
_check_date_fields(year, month, day)
return date(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __ne__(self, other):
if isinstance(other, date):
return self._cmp(other) != 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
return hash(self._getstate())
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return date.fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
if len(string) != 4 or not (1 <= string[2] <= 12):
raise TypeError("not enough arguments")
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo:
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> minutes east of UTC (negative for west of UTC)"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time:
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__cmp__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo
"""
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
"""
self = object.__new__(cls)
if isinstance(hour, bytes) and len(hour) == 6:
# Pickle support
self.__setstate(hour, minute or None)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) != 0
else:
return True
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware times")
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
tzoff = self.utcoffset()
if not tzoff: # zero or None
return hash(self._getstate()[0])
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
if 0 <= h < 24:
return hash(time(h, m, self.second, self.microsecond))
return hash((h, m, self.second, self.microsecond))
# Conversion to string
def _tzstr(self, sep=":"):
"""Return formatted timezone offset (+xx:xx) or None."""
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
assert 0 <= hh < 24
off = "%s%02d%s%02d" % (sign, hh, sep, mm)
return off
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
s= "%s(%d, %d%s)" % ('datetime.' + self.__class__.__name__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def isoformat(self):
"""Return the time formatted according to ISO.
This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
self.microsecond == 0.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
_check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return time(hour, minute, second, microsecond, tzinfo)
def __bool__(self):
if self.second or self.microsecond:
return True
offset = self.utcoffset() or timedelta(0)
return timedelta(hours=self.hour, minutes=self.minute) != offset
# Pickle support.
def _getstate(self):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if len(string) != 6 or string[0] >= 24:
raise TypeError("an integer is required")
(self._hour, self._minute, self._second,
us1, us2, us3) = string
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (time, self._getstate())
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints or longs.
"""
__slots__ = date.__slots__ + (
'_hour', '_minute', '_second',
'_microsecond', '_tzinfo')
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None):
if isinstance(year, bytes) and len(year) == 10:
# Pickle support
self = date.__new__(cls, year[:4])
self.__setstate(year, month)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self = date.__new__(cls, year, month, day)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@classmethod
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
converter = _time.localtime if tz is None else _time.gmtime
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is not None:
result = tz.fromutc(result)
return result
@classmethod
def utcfromtimestamp(cls, t):
"Construct a UTC datetime from a POSIX timestamp (like time.time())."
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
return cls(y, m, d, hh, mm, ss, us)
# XXX This is supposed to do better than we *can* do by using time.time(),
# XXX if the platform supports a more accurate way. The C implementation
# XXX uses gettimeofday on platforms that have it, but that isn't
# XXX available from Python. So now() may return different results
# XXX across the implementations.
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
time.tzinfo)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
return _time.mktime((self.year, self.month, self.day,
self.hour, self.minute, self.second,
-1, -1, -1)) + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_date_fields(year, month, day)
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return datetime(year, month, day, hour, minute, second,
microsecond, tzinfo)
def astimezone(self, tz=None):
if tz is None:
if self.tzinfo is None:
raise ValueError("astimezone() requires an aware datetime")
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
try:
# Extract TZ data if available
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
except AttributeError:
# Compute UTC offset and compare with the value implied
# by tm_isdst. If the values match, use the zone name
# implied by tm_isdst.
delta = local - datetime(*_time.gmtime(ts)[:6])
dst = _time.daylight and localtm.tm_isdst > 0
gmtoff = -(_time.altzone if dst else _time.timezone)
if delta == timedelta(seconds=gmtoff):
tz = timezone(delta, _time.tzname[dst])
else:
tz = timezone(delta)
else:
tz = timezone(timedelta(seconds=gmtoff), zone)
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
raise ValueError("astimezone() requires an aware datetime")
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
myoffset = self.utcoffset()
if myoffset is None:
raise ValueError("astimezone() requires an aware datetime")
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day,
sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = ", ".join(map(str, L))
s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
@classmethod
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
name = _call_tzinfo_method(self._tzinfo, "tzname", self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) != 0
elif not isinstance(other, date):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return datetime.combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
tzoff = self.utcoffset()
if tzoff is None:
return hash(self._getstate()[0])
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
return hash(timedelta(days, seconds, self.microsecond) - tzoff)
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([yhi, ylo, self._month, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
(yhi, ylo, self._month, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (self.__class__, self._getstate())
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
class timezone(tzinfo):
__slots__ = '_offset', '_name'
# Sentinel value to disallow None
_Omitted = object()
def __new__(cls, offset, name=_Omitted):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if name is cls._Omitted:
if not offset:
return cls.utc
name = None
elif not isinstance(name, str):
raise TypeError("name must be a string")
if not cls._minoffset <= offset <= cls._maxoffset:
raise ValueError("offset must be a timedelta"
" strictly between -timedelta(hours=24) and"
" timedelta(hours=24).")
if (offset.microseconds != 0 or
offset.seconds % 60 != 0):
raise ValueError("offset must be a timedelta"
" representing a whole number of minutes")
return cls._create(offset, name)
@classmethod
def _create(cls, offset, name=None):
self = tzinfo.__new__(cls)
self._offset = offset
self._name = name
return self
def __getinitargs__(self):
"""pickle support"""
if self._name is None:
return (self._offset,)
return (self._offset, self._name)
def __eq__(self, other):
if type(other) != timezone:
return False
return self._offset == other._offset
def __hash__(self):
return hash(self._offset)
def __repr__(self):
"""Convert to formal string, for repr().
>>> tz = timezone.utc
>>> repr(tz)
'datetime.timezone.utc'
>>> tz = timezone(timedelta(hours=-5), 'EST')
>>> repr(tz)
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
"""
if self is self.utc:
return 'datetime.timezone.utc'
if self._name is None:
return "%s(%r)" % ('datetime.' + self.__class__.__name__,
self._offset)
return "%s(%r, %r)" % ('datetime.' + self.__class__.__name__,
self._offset, self._name)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
if isinstance(dt, datetime) or dt is None:
return self._offset
raise TypeError("utcoffset() argument must be a datetime instance"
" or None")
def tzname(self, dt):
if isinstance(dt, datetime) or dt is None:
if self._name is None:
return self._name_from_offset(self._offset)
return self._name
raise TypeError("tzname() argument must be a datetime instance"
" or None")
def dst(self, dt):
if isinstance(dt, datetime) or dt is None:
return None
raise TypeError("dst() argument must be a datetime instance"
" or None")
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo "
"is not self")
return dt + self._offset
raise TypeError("fromutc() argument must be a datetime instance"
" or None")
_maxoffset = timedelta(hours=23, minutes=59)
_minoffset = -_maxoffset
@staticmethod
def _name_from_offset(delta):
if delta < timedelta(0):
sign = '-'
delta = -delta
else:
sign = '+'
hours, rest = divmod(delta, timedelta(hours=1))
minutes = rest // timedelta(minutes=1)
return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes)
timezone.utc = timezone._create(timedelta(0))
timezone.min = timezone._create(timezone._minoffset)
timezone.max = timezone._create(timezone._maxoffset)
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
"""
Some time zone algebra. For a datetime x, let
x.n = x stripped of its timezone -- its naive time.
x.o = x.utcoffset(), and assuming that doesn't raise an exception or
return None
x.d = x.dst(), and assuming that doesn't raise an exception or
return None
x.s = x's standard offset, x.o - x.d
Now some derived rules, where k is a duration (timedelta).
1. x.o = x.s + x.d
This follows from the definition of x.s.
2. If x and y have the same tzinfo member, x.s = y.s.
This is actually a requirement, an assumption we need to make about
sane tzinfo classes.
3. The naive UTC time corresponding to x is x.n - x.o.
This is again a requirement for a sane tzinfo class.
4. (x+k).s = x.s
This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
5. (x+k).n = x.n + k
Again follows from how arithmetic is defined.
Now we can explain tz.fromutc(x). Let's assume it's an interesting case
(meaning that the various tzinfo methods exist, and don't blow up or return
None when called).
The function wants to return a datetime y with timezone tz, equivalent to x.
x is already in UTC.
By #3, we want
y.n - y.o = x.n [1]
The algorithm starts by attaching tz to x.n, and calling that y. So
x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
becomes true; in effect, we want to solve [2] for k:
(y+k).n - (y+k).o = x.n [2]
By #1, this is the same as
(y+k).n - ((y+k).s + (y+k).d) = x.n [3]
By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
Substituting that into [3],
x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
k - (y+k).s - (y+k).d = 0; rearranging,
k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
k = y.s - (y+k).d
On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
approximate k by ignoring the (y+k).d term at first. Note that k can't be
very large, since all offset-returning methods return a duration of magnitude
less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
be 0, so ignoring it has no consequence then.
In any case, the new value is
z = y + y.s [4]
It's helpful to step back at look at [4] from a higher level: it's simply
mapping from UTC to tz's standard time.
At this point, if
z.n - z.o = x.n [5]
we have an equivalent time, and are almost done. The insecurity here is
at the start of daylight time. Picture US Eastern for concreteness. The wall
time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
sense then. The docs ask that an Eastern tzinfo class consider such a time to
be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
on the day DST starts. We want to return the 1:MM EST spelling because that's
the only spelling that makes sense on the local wall clock.
In fact, if [5] holds at this point, we do have the standard-time spelling,
but that takes a bit of proof. We first prove a stronger result. What's the
difference between the LHS and RHS of [5]? Let
diff = x.n - (z.n - z.o) [6]
Now
z.n = by [4]
(y + y.s).n = by #5
y.n + y.s = since y.n = x.n
x.n + y.s = since z and y are have the same tzinfo member,
y.s = z.s by #2
x.n + z.s
Plugging that back into [6] gives
diff =
x.n - ((x.n + z.s) - z.o) = expanding
x.n - x.n - z.s + z.o = cancelling
- z.s + z.o = by #2
z.d
So diff = z.d.
If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
spelling we wanted in the endcase described above. We're done. Contrarily,
if z.d = 0, then we have a UTC equivalent, and are also done.
If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
add to z (in effect, z is in tz's standard time, and we need to shift the
local clock into tz's daylight time).
Let
z' = z + z.d = z + diff [7]
and we can again ask whether
z'.n - z'.o = x.n [8]
If so, we're done. If not, the tzinfo class is insane, according to the
assumptions we've made. This also requires a bit of proof. As before, let's
compute the difference between the LHS and RHS of [8] (and skipping some of
the justifications for the kinds of substitutions we've done several times
already):
diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
x.n - (z.n + diff - z'.o) = replacing diff via [6]
x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
- z.n + z.n - z.o + z'.o = cancel z.n
- z.o + z'.o = #1 twice
-z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
z'.d - z.d
So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
we've found the UTC-equivalent so are done. In fact, we stop with [7] and
return z', not bothering to compute z'.d.
How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
would have to change the result dst() returns: we start in DST, and moving
a little further into it takes us out of DST.
There isn't a sane case where this can happen. The closest it gets is at
the end of DST, where there's an hour in UTC with no spelling in a hybrid
tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
UTC) because the docs insist on that, but 0:MM is taken as being in daylight
time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
standard time. Since that's what the local clock *does*, we want to map both
UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
in local time, but so it goes -- it's the way the local clock works.
When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
(correctly) concludes that z' is not UTC-equivalent to x.
Because we know z.d said z was in daylight time (else [5] would have held and
we would have stopped then), and we know z.d != z'.d (else [8] would have held
and we have stopped then), and there are only 2 possible values dst() can
return in Eastern, it follows that z'.d must be 0 (which it is in the example,
but the reasoning doesn't depend on the example -- it depends on there being
two possible dst() outcomes, one zero and the other non-zero). Therefore
z' must be in standard time, and is the spelling we want in this case.
Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
concerned (because it takes z' as being in standard time rather than the
daylight time we intend here), but returning it gives the real-life "local
clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
tz.
When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
the 1:MM standard time spelling we want.
So how can this break? One of the assumptions must be violated. Two
possibilities:
1) [2] effectively says that y.s is invariant across all y belong to a given
time zone. This isn't true if, for political reasons or continental drift,
a region decides to change its base offset from UTC.
2) There may be versions of "double daylight" time where the tail end of
the analysis gives up a step too early. I haven't thought about that
enough to say.
In any case, it's clear that the default fromutc() is strong enough to handle
"almost all" time zones: so long as the standard offset is invariant, it
doesn't matter if daylight time transition points change from year to year, or
if daylight time is skipped in some years; it doesn't matter how large or
small dst() may get within its bounds; and it doesn't even matter if some
perverse time zone returns a negative dst()). So a breaking case must be
pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
"""
try:
from _datetime import *
except ImportError:
pass
else:
# Clean up unused names
del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH,
_DI100Y, _DI400Y, _DI4Y, _MAXORDINAL, _MONTHNAMES,
_build_struct_time, _call_tzinfo_method, _check_date_fields,
_check_time_fields, _check_tzinfo_arg, _check_tzname,
_check_utc_offset, _cmp, _cmperror, _date_class, _days_before_month,
_days_before_year, _days_in_month, _format_time, _is_leap,
_isoweek1monday, _math, _ord2ymd, _time, _time_class, _tzinfo_class,
_wrap_strftime, _ymd2ord)
# XXX Since import * above excludes names that start with _,
# docstring does not get overwritten. In the future, it may be
# appropriate to maintain a single module level docstring and
# remove the following line.
from _datetime import __doc__
|
{
"content_hash": "c80f49c7361b0c9790154d1216e40e62",
"timestamp": "",
"source": "github",
"line_count": 2147,
"max_line_length": 85,
"avg_line_length": 35.11970190964136,
"alnum_prop": 0.5572000742685871,
"repo_name": "MalloyPower/parsing-python",
"id": "f506e9ab22f9311628746f3eda3dcc8a9e9ac5ad",
"size": "75402",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.3.0/Lib/datetime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
"""
eor.py
Created by Thomas Mangin on 2010-01-16.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
# from struct import unpack
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.bgp.message.message import Message
from exabgp.bgp.message.update.attribute import Attributes
from exabgp.bgp.message.update.nlri import NLRI as _NLRI
# =================================================================== End-Of-RIB
# not technically a different message type but easier to treat as one
class EOR (Message):
ID = Message.CODE.UPDATE
TYPE = chr(Message.CODE.UPDATE)
class NLRI (_NLRI):
PREFIX = '\x00\x00\x00\x07\x90\x0F\x00\x03'
EOR = True
nexthop = None
def __init__ (self, afi, safi, action):
_NLRI.__init__(self,afi,safi,action)
self.action = action
def pack (self, negotiated=None):
if self.afi == AFI.ipv4 and self.safi == SAFI.unicast:
return '\x00\x00\x00\x00'
return self.PREFIX + self.afi.pack() + self.safi.pack()
def __repr__ (self):
return self.extensive()
def extensive (self):
return 'eor %d/%d (%s %s)' % (self.afi,self.safi,self.afi,self.safi)
def json (self):
return '"eor": { "afi" : "%s", "safi" : "%s" }' % (self.afi,self.safi)
def __init__ (self, afi, safi, action=None):
Message.__init__(self)
self.nlris = [EOR.NLRI(afi,safi,action),]
self.attributes = Attributes()
def message (self,negotiated=None):
return self._message(
self.nlris[0].pack()
)
def __repr__ (self):
return 'EOR'
@classmethod
def unpack_message (cls, data, negotiated):
header_length = len(EOR.NLRI.PREFIX)
return cls(AFI.unpack(data[header_length:header_length+2]),SAFI.unpack(data[header_length+2]))
|
{
"content_hash": "cb6faca3303d850885302fa7fd9cd3de",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 96,
"avg_line_length": 26.96875,
"alnum_prop": 0.6546929316338355,
"repo_name": "blablacar/exabgp",
"id": "6f10a78c9c382be02032f9d9737d2ce34afe77e5",
"size": "1744",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/exabgp/bgp/message/update/eor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "1516"
},
{
"name": "Python",
"bytes": "1191461"
},
{
"name": "Shell",
"bytes": "17891"
}
],
"symlink_target": ""
}
|
from keystone.common.validation import parameter_types
# Schema for Identity v2 API
_role_properties_v2 = {
'name': parameter_types.name,
'id': parameter_types.id_string,
'description': parameter_types.description
}
role_create_v2 = {
'type': 'object',
'properties': _role_properties_v2,
'required': ['name'],
'additionalProperties': True
}
# Schema for Identity v3 API
_role_properties = {
'name': parameter_types.name
}
role_create = {
'type': 'object',
'properties': _role_properties,
'required': ['name'],
'additionalProperties': True
}
role_update = {
'type': 'object',
'properties': _role_properties,
'minProperties': 1,
'additionalProperties': True
}
|
{
"content_hash": "c3d2f72a5a145ec6e2ab2e8b8ac3b31e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 54,
"avg_line_length": 20.25,
"alnum_prop": 0.6474622770919067,
"repo_name": "rajalokan/keystone",
"id": "f93ba3ca49d931ed254400700038820d859baeb0",
"size": "1270",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/assignment/schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "3865941"
},
{
"name": "Shell",
"bytes": "4861"
}
],
"symlink_target": ""
}
|
from airflow.models import BaseOperator
from airflow.hooks.S3_hook import S3Hook
from airflow.contrib.hooks.ssh_hook import SSHHook
from tempfile import NamedTemporaryFile
from urllib.parse import urlparse
from airflow.utils.decorators import apply_defaults
class SFTPToS3Operator(BaseOperator):
"""
This operator enables the transferring of files from a SFTP server to
Amazon S3.
:param sftp_conn_id: The sftp connection id. The name or identifier for
establishing a connection to the SFTP server.
:type sftp_conn_id: string
:param sftp_path: The sftp remote path. This is the specified file path
for downloading the file from the SFTP server.
:type sftp_path: string
:param s3_conn_id: The s3 connection id. The name or identifier for
establishing a connection to S3
:type s3_conn_id: string
:param s3_bucket: The targeted s3 bucket. This is the S3 bucket to where
the file is uploaded.
:type s3_bucket: string
:param s3_key: The targeted s3 key. This is the specified path for
uploading the file to S3.
:type s3_key: string
"""
template_fields = ('s3_key', 'sftp_path')
@apply_defaults
def __init__(self,
s3_bucket,
s3_key,
sftp_path,
sftp_conn_id='ssh_default',
s3_conn_id='aws_default',
*args,
**kwargs):
super(SFTPToS3Operator, self).__init__(*args, **kwargs)
self.sftp_conn_id = sftp_conn_id
self.sftp_path = sftp_path
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.s3_conn_id = s3_conn_id
@staticmethod
def get_s3_key(s3_key):
"""This parses the correct format for S3 keys
regardless of how the S3 url is passed."""
parsed_s3_key = urlparse(s3_key)
return parsed_s3_key.path.lstrip('/')
def execute(self, context):
self.s3_key = self.get_s3_key(self.s3_key)
ssh_hook = SSHHook(ssh_conn_id=self.sftp_conn_id)
s3_hook = S3Hook(self.s3_conn_id)
sftp_client = ssh_hook.get_conn().open_sftp()
with NamedTemporaryFile("w") as f:
sftp_client.get(self.sftp_path, f.name)
s3_hook.load_file(
filename=f.name,
key=self.s3_key,
bucket_name=self.s3_bucket,
replace=True
)
|
{
"content_hash": "4a127313cf85cc1a5671ff9f3bf95390",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 76,
"avg_line_length": 33.986111111111114,
"alnum_prop": 0.6109521863506334,
"repo_name": "fenglu-g/incubator-airflow",
"id": "cefc838cf12d1aed626d7117d395e29a517e5fe4",
"size": "3259",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/sftp_to_s3_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "3634"
},
{
"name": "HTML",
"bytes": "129454"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5852162"
},
{
"name": "Shell",
"bytes": "41793"
}
],
"symlink_target": ""
}
|
from JumpScale import j
from EventHandler import EventHandler
from ErrorConditionHandler import ErrorConditionHandler
j.errorconditionhandler=ErrorConditionHandler()
j.events=EventHandler()
|
{
"content_hash": "894eda45022d75aefd90b8dfef66179c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 55,
"avg_line_length": 31.833333333333332,
"alnum_prop": 0.8795811518324608,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "c21e55a577792d4f9f1a9e8497c6b8ad01d55fc1",
"size": "191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/core/errorhandling/__init__.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
}
|
import unittest
import sys
from werkzeug.test import Client
from werkzeug.wrappers import Response
from profilomatic_analysis.wsgi_utils import not_found, returns_json
class NotFoundTest(unittest.TestCase):
def test_not_found(self):
client = Client(not_found, Response)
response = client.get('/hello')
self.assertEqual(404, response.status_code)
self.assertIn(b'/hello', response.data)
class ReturnsJsonTest(unittest.TestCase):
def test_success_skipped_start_response(self):
@returns_json
def app(environ, start_response):
return {'success': True}
client = Client(app, Response)
response = client.get('/')
self.assertEqual(200, response.status_code)
self.assertEqual(b'{"success": true}', response.data)
def test_success(self):
@returns_json
def app(environ, start_response):
start_response('404 Not Found', [('X-Hello', 'World')])
return {'found': False}
client = Client(app, Response)
response = client.get('/')
self.assertEqual(404, response.status_code)
self.assertEqual(b'{"found": false}', response.data)
self.assertEqual('World', response.headers['X-Hello'])
def test_failure(self):
@returns_json
def app(environ, start_response):
start_response('200 OK', [])
try:
raise ZeroDivisionError()
except:
start_response('503 Service Unavailable', [], sys.exc_info())
client = Client(app, Response)
with self.assertRaises(ZeroDivisionError):
client.get('/')
|
{
"content_hash": "2660d3e43efcd1fddc9a20176bb41d24",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 77,
"avg_line_length": 35.40425531914894,
"alnum_prop": 0.6201923076923077,
"repo_name": "jamespic/eliot-profiler-analysis",
"id": "c2cd0f655e80df5855c8a2af69436a42e50fbf19",
"size": "1664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_wsgi_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1108"
},
{
"name": "HTML",
"bytes": "311"
},
{
"name": "JavaScript",
"bytes": "37363"
},
{
"name": "Python",
"bytes": "42565"
}
],
"symlink_target": ""
}
|
import Skype4Py
import time
import re
class SkypeBot(object):
def __init__(self):
self.skype = Skype4Py.Skype(Events=self)
#self.skype.FriendlyName = "Skype Bot"
self.skype.Attach()
def AttachmentStatus(self, status):
if status == Skype4Py.apiAttachAvailable:
self.skype.Attach()
def MessageStatus(self, msg, status):
if status == Skype4Py.cmsReceived:
if msg.Chat.Type in (Skype4Py.chatTypeDialog, Skype4Py.chatTypeLegacyDialog):
for regexp, target in self.commands.items():
match = re.match(regexp, msg.Body, re.IGNORECASE)
if match:
msg.MarkAsSeen()
reply = target(self, *match.groups())
if reply:
msg.Chat.SendMessage(reply)
break
def cmd_userstatus(self, status):
if status:
try:
self.skype.ChangeUserStatus(status)
except Skype4Py.SkypeError, e:
return str(e)
return 'Current status: %s' % self.skype.CurrentUserStatus
def cmd_credit(self):
return self.skype.CurrentUserProfile.BalanceToText
commands = {
"@userstatus *(.*)": cmd_userstatus,
"@credit$": cmd_credit
}
if __name__ == "__main__":
bot = SkypeBot()
while True:
time.sleep(1.0)
|
{
"content_hash": "31a753ef235d69f39fa58b2b60a7e1a2",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 83,
"avg_line_length": 26.25,
"alnum_prop": 0.6246031746031746,
"repo_name": "davsebamse/skype4py",
"id": "458bb68eb6ec400919a3d98487b2cd0b49547964",
"size": "1283",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "SkypeBot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "702992"
}
],
"symlink_target": ""
}
|
import json
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import get_model
from django.http import (HttpResponse, HttpResponseNotFound,
HttpResponseBadRequest, HttpResponseForbidden)
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.views.decorators.http import require_POST
from tower import ugettext as _
from kitsune.access.decorators import login_required
from kitsune.upload.models import ImageAttachment
from kitsune.upload.utils import upload_imageattachment, FileTooLargeError
ALLOWED_MODELS = ['questions.Question', 'questions.Answer', 'auth.User']
@login_required
@require_POST
@xframe_options_sameorigin
def up_image_async(request, model_name, object_pk):
"""Upload all images in request.FILES."""
# Verify the model agaist our white-list
if model_name not in ALLOWED_MODELS:
message = _('Model not allowed.')
return HttpResponseBadRequest(
json.dumps({'status': 'error', 'message': message}))
# Get the model
m = get_model(*model_name.split('.'))
# Then look up the object by pk
try:
obj = m.objects.get(pk=object_pk)
except ObjectDoesNotExist:
message = _('Object does not exist.')
return HttpResponseNotFound(
json.dumps({'status': 'error', 'message': message}))
try:
file_info = upload_imageattachment(request, obj)
except FileTooLargeError as e:
return HttpResponseBadRequest(
json.dumps({'status': 'error', 'message': e.args[0]}))
if isinstance(file_info, dict) and 'thumbnail_url' in file_info:
return HttpResponse(
json.dumps({'status': 'success', 'file': file_info}))
message = _('Invalid or no image received.')
return HttpResponseBadRequest(
json.dumps({'status': 'error', 'message': message,
'errors': file_info}))
@require_POST
@xframe_options_sameorigin
def del_image_async(request, image_id):
"""Delete an image given its object id."""
user = request.user
if not user.is_authenticated():
message = _('You are not logged in.')
return HttpResponseForbidden(
json.dumps({'status': 'error', 'message': message}))
try:
image = ImageAttachment.objects.get(pk=image_id)
except ImageAttachment.DoesNotExist:
message = _('The requested image could not be found.')
return HttpResponseNotFound(
json.dumps({'status': 'error', 'message': message}))
if not ((user == image.creator) or
(user.has_perm('upload.delete_imageattachment'))):
message = _('You do not have permission to do that.')
return HttpResponseForbidden(
json.dumps({'status': 'error', 'message': message}))
image.file.delete()
if image.thumbnail:
image.thumbnail.delete()
image.delete()
return HttpResponse(json.dumps({'status': 'success'}))
|
{
"content_hash": "5693dffeddb4776bc93fe8c6a0406440",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 74,
"avg_line_length": 34.206896551724135,
"alnum_prop": 0.6629704301075269,
"repo_name": "iDTLabssl/kitsune",
"id": "fdf3fcf5a333c5e6e2c0f510548d7cf9967d7c5c",
"size": "2976",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "kitsune/upload/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "282192"
},
{
"name": "HTML",
"bytes": "625414"
},
{
"name": "JavaScript",
"bytes": "728519"
},
{
"name": "Python",
"bytes": "2717442"
},
{
"name": "Shell",
"bytes": "10281"
},
{
"name": "Smarty",
"bytes": "2062"
}
],
"symlink_target": ""
}
|
import csv
from urllib.request import Request, urlopen
import urllib.error
import re
from sys import argv
from bs4 import BeautifulSoup
from dateutil import parser
import scrape_util
default_sale, base_url, prefix = scrape_util.get_market(argv)
base_url = base_url[:-1]
strip_char = ';,. \n\t$'
def get_sale_head(line):
head_string = line.pop().replace('hd','').strip(strip_char)
try:
sale_head = int(head_string)
return sale_head
except ValueError:
return None
def get_sale_date(line):
date_string = line[0].replace('Sale', '').strip(strip_char)
sale_date = parser.parse(date_string)
return sale_date
def is_sale(line, io_name):
right_columns = line[0] and all(line[2:])
has_price = '$' in ''.join(line[2:])
extra_price = '$' in line[1]
return (right_columns and has_price and not extra_price)
def get_sale_location(string):
if ',' in string:
sale_location = string.split(',')
else:
match = re.search(r'(.*?)(' + scrape_util.state + ')', string)
if match:
sale_location = [match.group(1), match.group(2)]
else:
sale_location = [string]
return sale_location
def get_sale(word, cattle):
sale_location = get_sale_location(word[0])
sale = {
'consignor_city': sale_location.pop(0).strip(strip_char).title(),
'cattle_cattle': cattle,
'cattle_head': word[1].replace('hd', '').strip(strip_char),
'cattle_avg_weight': word[2].strip(strip_char),
}
if sale_location:
sale['consignor_state'] = sale_location.pop().strip(strip_char)
try:
cattle_head = int(sale['cattle_head'])
except ValueError:
sale['cattle_cattle'] = ' '.join([sale['cattle_cattle'], sale['cattle_head']])
sale['cattle_head'] = ''
price_word = re.sub(r'\([^\)]*\)?', '', word[3])
match = re.search(r'/?(by the )?h[ea]{2}d', price_word, re.IGNORECASE)
if match:
price = 'cattle_price'
else:
price = 'cattle_price_cwt'
sale[price] = re.search(r'[0-9,]+', price_word).group(0).replace(',', '')
sale = {k:v for k,v in sale.items() if v}
return sale
def main():
# get URLs for all reports
request = Request(
base_url,
headers = scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
google_doc = soup.iframe
report = [google_doc]
# Identify existing reports
archive = scrape_util.ArchiveFolder(argv, prefix)
# write csv file for each historical report
for this_report in report:
# query this_report for sale data
url = this_report['src']
request = Request(
url,
headers = scrape_util.url_header,
)
try:
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
except urllib.error.HTTPError:
print('HTTP error: {}'.format(url))
continue
line = [
[td.get_text().replace('\xa0', ' ') for td in tr.find_all('td')]
for tr in soup.find_all('tr')
]
sale_date = get_sale_date(line[0])
# skip if already archived
io_name = archive.new_csv(sale_date)
if not io_name:
continue
# sale defaults
sale_head = get_sale_head(line.pop(0))
this_default_sale = default_sale.copy()
this_default_sale.update({
'sale_year': sale_date.year,
'sale_month': sale_date.month,
'sale_day': sale_date.day,
'sale_head': sale_head,
})
# open csv file and write header
with io_name.open('w', encoding='utf-8') as io:
writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n')
writer.writeheader()
# extract & write sale dictionary
for this_line in line:
if is_sale(this_line, io_name.name):
sale = this_default_sale.copy()
sale.update(get_sale(this_line, cattle))
if sale != this_default_sale:
writer.writerow(sale)
else:
cattle = this_line[0].strip(strip_char)
if __name__ == '__main__':
main()
|
{
"content_hash": "f8db7a46b1a8f02edc68263c28c0cee4",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 86,
"avg_line_length": 28.12258064516129,
"alnum_prop": 0.5574673090158293,
"repo_name": "bansallab/roundup",
"id": "f3e34a286bfd799b84519599f390f7ed7b3921db",
"size": "4359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_30_scrape.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "414972"
},
{
"name": "Shell",
"bytes": "870"
}
],
"symlink_target": ""
}
|
"""
ACE object handling according to WebDAV ACP specification.
"""
from webdav.acp.Principal import Principal
from webdav.acp.GrantDeny import GrantDeny
from webdav import Constants
from webdav.Connection import WebdavError
__version__ = "$LastChangedRevision: 13 $"
class ACE(object):
"""
This class provides functionality for handling ACEs
@ivar principal: A principal (user or group)
@type principal: L{Principal} object
@ivar invert: Flag indicating whether ACE should invert the principal.
@type invert: C{bool}
@ivar grantDenies: Grant or deny clauses for privileges
@type grantDenies: C{list} of L{GrantDeny} objects
@ivar protected: Flag indicating whether ACE is protected.
@type protected: C{bool}
@ivar inherited: URL indicating the source from where the ACE is inherited.
@type inherited: C{string}
"""
# restrict instance variables
__slots__ = ('principal', 'invert', 'grantDenies', 'protected', 'inherited')
def __init__(self, domroot=None, principal=None, grantDenies=None):
"""
Constructor should be called with either no parameters (create blank ACE),
one parameter (a DOM tree or principal), or two parameters (principal and
sequence of GrantDenies).
@param domroot: A DOM tree (default: None).
@type domroot: L{webdav.WebdavResponse.Element} object
@param principal: A principal (user or group), (default: None).
@type principal: L{Principal} object
@param grantDenies: Grant and deny clauses for privileges (default: None).
@type grantDenies: sequence of L{GrantDeny} objects
@raise WebdavError: When non-valid parameters are passed a L{WebdavError} is raised.
"""
self.principal = Principal()
self.protected = None
self.inherited = None
self.invert = None
self.grantDenies = []
if domroot:
self.principal = Principal(domroot=domroot.find(Constants.TAG_PRINCIPAL, Constants.NS_DAV))
self.inherited = domroot.find(Constants.TAG_INHERITED, Constants.NS_DAV)
if self.inherited:
self.inherited = self.inherited.children[0].textof()
if domroot.find(Constants.TAG_PROTECTED, Constants.NS_DAV):
self.protected = 1
for child in domroot.children:
if child.ns == Constants.NS_DAV \
and (child.name == Constants.TAG_GRANT or child.name == Constants.TAG_DENY):
self.grantDenies.append(GrantDeny(domroot=child))
elif isinstance(principal, Principal):
newPrincipal = Principal()
newPrincipal.copy(principal)
self.principal = newPrincipal
if (isinstance(grantDenies, list) or isinstance(grantDenies, tuple)):
self.addGrantDenies(grantDenies)
elif domroot == None and grantDenies == None:
# no param ==> blank ACE
pass
else:
# This shouldn't happen, someone screwed up with the params ...
raise WebdavError('non-valid parameters handed to ACE constructor')
def __cmp__(self, other):
if not isinstance(other, ACE):
return 1
if self.principal == other.principal \
and self.invert == other.invert \
and self.protected == other.protected \
and self.inherited == other.inherited:
equal = 1
for grantDeny in self.grantDenies:
inList = 0
for otherGrantDeny in other.grantDenies:
if grantDeny == otherGrantDeny:
inList = 1
if inList == 0:
equal = 0
return not equal
else:
return 1
def __repr__(self):
repr = '<class ACE: '
if self.invert:
repr += 'inverted principal, ' % (self.invert)
if self.principal:
repr += 'principal: %s, ' % (self.principal)
if self.protected:
repr += 'protected, '
if self.inherited:
repr += 'inherited href: %s, ' % (self.inherited)
first = 1
repr += 'grantDenies: ['
for grantDeny in self.grantDenies:
if first:
repr += '%s' % grantDeny
first = 0
else:
repr += ', %s' % grantDeny
return '%s]>' % (repr)
def copy(self, other):
'''Copy an ACE object.
@param other: Another ACE to copy.
@type other: L{ACE} object
@raise WebdavError: When an object that is not an L{ACE} is passed
a L{WebdavError} is raised.
'''
if not isinstance(other, ACE):
raise WebdavError('Non-ACE object passed to copy method: %s.' % other.__class__)
self.invert = other.invert
self.protected = other.protected
self.inherited = other.inherited
self.principal = Principal()
if other.principal:
self.principal.copy(other.principal)
if other.grantDenies:
self.addGrantDenies(other.grantDenies)
def isValid(self):
"""
Returns true/false (1/0) whether necessarry props
principal and grantDenies are set and whether the ACE contains one
grant or deny clauses.
@return: Validity of ACE.
@rtype: C{bool}
"""
return self.principal and len(self.grantDenies) == 1
def isGrant(self):
'''
Returns true/false (1/0) if ACE contains only grant clauses.
@return: Value whether the ACE is of grant type.
@rtype: C{bool}
'''
if self.isMixed() or len(self.grantDenies) < 1:
return 0
else:
return self.grantDenies[0].isGrant()
def isDeny(self):
'''
Returns true/false (1/0) if ACE contains only deny clauses.
@return: Value whether the ACE is of deny type.
@rtype: C{bool}
'''
if self.isMixed() or len(self.grantDenies) < 1:
return 0
else:
return self.grantDenies[0].isDeny()
def isMixed(self):
'''
Returns true/false (1/0) if ACE contains both types (grant and deny) of clauses.
@return: Value whether the ACE is of mixed (grant and deny) type.
@rtype: C{bool}
'''
mixed = 0
if len(self.grantDenies):
first = self.grantDenies[0].grantDeny
for grantDeny in self.grantDenies:
if grantDeny.grantDeny != first:
mixed = 1
return mixed
def toXML(self, defaultNameSpace=None):
"""
Returns ACE content as a string of valid XML as described in WebDAV ACP.
@param defaultNameSpace: Name space (default: None).
@type defaultNameSpace: C(string)
"""
assert self.isValid(), "ACE is not initialized or does not contain valid content!"
ACE = 'D:' + Constants.TAG_ACE
res = self.principal.toXML(self.invert)
for grantDeny in self.grantDenies:
res += grantDeny.toXML()
if self.protected:
res += '<D:protected/>'
if self.inherited:
res += '<D:inherited><D:href>%s</D:href></D:inherited>' % (self.inherited)
return '<%s>%s</%s>' % (ACE, res, ACE)
def setPrincipal(self, principal):
'''
Sets the passed principal on the ACE.
@param principal: A principal.
@type principal: L{Principal} object
'''
self.principal = Principal()
self.principal.copy(principal)
def setInherited(self, href):
'''
Sets the passed URL on the ACE to denote from where it is inherited.
@param href: A URL.
@type href: C{string}
'''
self.inherited = href
def addGrantDeny(self, grantDeny):
'''
Adds the passed GrantDeny object to list if it's not in it, yet.
@param grantDeny: A grant or deny clause.
@type grantDeny: L{GrantDeny} object
'''
# only add it if it's not in the list, yet ...
inList = 0
for element in self.grantDenies:
if element == grantDeny:
inList = 1
if not inList:
newGrantDeny = GrantDeny()
newGrantDeny.copy(grantDeny)
self.grantDenies.append(newGrantDeny)
def addGrantDenies(self, grantDenies):
'''Adds the list of passed grant/deny objects to list.
@param grantDenies: Grant or deny clauses.
@type grantDenies: sequence of L{GrantDeny} objects
'''
map(lambda grantDeny: self.addGrantDeny(grantDeny), grantDenies)
def delGrantDeny(self, grantDeny):
'''Deletes the passed GrantDeny object from list.
@param grantDeny: A grant or deny clause.
@type grantDeny: L{GrantDeny} object
@raise WebdavError: A L{WebdavError} is raised if the clause to be
deleted is not present.
'''
# only add it if it's not in the list, yet ...
count = 0
index = 0
for element in self.grantDenies:
count += 1
if element == grantDeny:
index = count
if index:
self.grantDenies.pop(index - 1)
else:
raise WebdavError('GrantDeny to be deleted not in list: %s.' % grantDeny)
def delGrantDenies(self, grantDenies):
'''Deletes the list of passed grant/deny objects from list.
@param grantDenies: Grant or deny clauses.
@type grantDenies: sequence of L{GrantDeny} objects
'''
map(lambda grantDeny: self.delGrantDeny(grantDeny), grantDenies)
|
{
"content_hash": "6a2edaa2212197897239b06de17d1b96",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 105,
"avg_line_length": 36.02517985611511,
"alnum_prop": 0.5717423864203695,
"repo_name": "antont/tundra",
"id": "fc22ea2f63193ea9e3abece23752b164449ff662",
"size": "10610",
"binary": false,
"copies": "1",
"ref": "refs/heads/tundra2",
"path": "src/Application/PythonScriptModule/pymodules_old/lib/webdav/acp/Ace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "110345"
},
{
"name": "C#",
"bytes": "76173"
},
{
"name": "C++",
"bytes": "4959154"
},
{
"name": "CoffeeScript",
"bytes": "2229"
},
{
"name": "JavaScript",
"bytes": "316308"
},
{
"name": "Objective-C",
"bytes": "222359"
},
{
"name": "Python",
"bytes": "999850"
},
{
"name": "Shell",
"bytes": "8224"
},
{
"name": "TypeScript",
"bytes": "230019"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.mail import send_mail
class Command(BaseCommand):
"""send mail test
"""
def message(self, message):
self.stdout.write("%s\n" % message.encode("ascii", "replace"))
def handle(self, *args, **options):
now = datetime.now()
body="""
Hi, this is the testing message
send on %s
""" % str(now)
self.message(body)
send_mail('Testing mail - %s' % str(now), body,
settings.DEFAULT_FROM_EMAIL, args)
|
{
"content_hash": "e8561d3570d33faf11c30ab392935eb8",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 70,
"avg_line_length": 22.48148148148148,
"alnum_prop": 0.6293245469522241,
"repo_name": "GeographicaGS/moocng",
"id": "1b6e09402068114aa5bf77bf224bba0f39868b47",
"size": "1175",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moocng/portal/management/commands/mailtest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "162701"
},
{
"name": "HTML",
"bytes": "362912"
},
{
"name": "JavaScript",
"bytes": "1911286"
},
{
"name": "Python",
"bytes": "2723710"
},
{
"name": "Shell",
"bytes": "24842"
}
],
"symlink_target": ""
}
|
"""
Copyright 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import logging
import os
import re
import sys
import threading
import json
import time
from datetime import date
from naarad.metrics.metric import Metric
import naarad.utils
import naarad.naarad_constants as CONSTANTS
logger = logging.getLogger('naarad.metrics.linkedin_android_rum_metric')
class LinkedInAndroidRumMetric(Metric):
"""
Class for LinkedIn Android RUM logs, deriving from class Metric
Note that this is for LinkedIn only
"""
clock_format = '%Y-%m-%d %H:%M:%S'
val_types = ('launch_time', 'nus_update_time')
def __init__(self, metric_type, infile_list, hostname, aggr_metrics, outdir, resource_path, label, ts_start, ts_end, rule_strings,
important_sub_metrics, anomaly_detection_metrics, **other_options):
Metric.__init__(self, metric_type, infile_list, hostname, aggr_metrics, outdir, resource_path, label, ts_start, ts_end, rule_strings,
important_sub_metrics, anomaly_detection_metrics)
self.sub_metrics = self.val_types
if not self.important_sub_metrics:
self.important_sub_metrics = CONSTANTS.important_sub_metrics_import['LINKEDINANDROIDRUM']
self.sub_metric_description = {
"launch_time": "the time taken to launch the client application",
"nus_update_time": "the time taken to update NUS list after launch"
}
# get start time stamp, launch time duration, and nus update time duration
def get_times(self, native):
"""
get start time stamp, launch time duration, and nus update time duration from JSON object native
:param JSON OBJECT native
:return: LONG event time stamp, LONG launch time, and LONG nus update time
"""
start_time = 0
end_time = 0
launch_time = 0
nus_update_time = 0
for item in native:
if item[CONSTANTS.LIA_TIMING_NAME] == CONSTANTS.LIA_APP_ON_CREATE and item[CONSTANTS.LIA_START] is not None:
start_time = item[CONSTANTS.LIA_START][CONSTANTS.LIA_LONG]
if item[CONSTANTS.LIA_TIMING_NAME] == CONSTANTS.LIA_NUS_UPDATE:
if item[CONSTANTS.LIA_TIMING_VALUE] is not None:
nus_update_time = item[CONSTANTS.LIA_TIMING_VALUE][CONSTANTS.LIA_LONG]
if item[CONSTANTS.LIA_START] is not None:
end_time = item[CONSTANTS.LIA_START][CONSTANTS.LIA_LONG]
if start_time == 0 or end_time == 0:
time_stamp = 0
launch_time = 0
else:
time_stamp = start_time
launch_time = end_time - start_time
return (time_stamp, launch_time, nus_update_time)
# parse Android RUM logs
def parse(self):
# check if outdir exists, if not, create it
if not os.path.isdir(self.outdir):
os.makedirs(self.outdir)
if not os.path.isdir(self.resource_directory):
os.makedirs(self.resource_directory)
results = {}
ts = None
# set output csv
launch_time_file = self.get_csv('launch_time')
nus_update_time_file = self.get_csv('nus_update_time')
for input_file in self.infile_list:
# get Android RUM input data: for each line, generate (timestamp, launch_time, nus_update_time)
with open(input_file, 'r') as inf:
for line in inf:
try:
data = json.loads(line)
except ValueError:
logger.warn("Invalid JSON Object at line: %s", line)
if data[CONSTANTS.LIA_NATIVE_TIMINGS] is not None:
native = data[CONSTANTS.LIA_NATIVE_TIMINGS][CONSTANTS.LIA_ARRAY]
time_stamp, launch_time, nus_update_time = self.get_times(native)
if launch_time != 0 and nus_update_time != 0:
results[time_stamp] = (str(launch_time), str(nus_update_time))
# Writing launch time and nus update time stats
with open(launch_time_file, 'w') as launchtimef:
with open(nus_update_time_file, 'w') as nusupdatetimef:
for ts in sorted(results.iterkeys()):
launchtimef.write(naarad.utils.get_standardized_timestamp(ts, 'epoch_ms') + ',' + results[ts][0] + '\n')
nusupdatetimef.write(naarad.utils.get_standardized_timestamp(ts, 'epoch_ms') + ',' + results[ts][1] + '\n')
self.csv_files.append(launch_time_file)
self.csv_files.append(nus_update_time_file)
return True
|
{
"content_hash": "d20ecde35be8d3871e9fecc61e9b590d",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 137,
"avg_line_length": 40.82051282051282,
"alnum_prop": 0.684463986599665,
"repo_name": "kilink/naarad",
"id": "99af901ec4414d1a54e9a08de141f9aa195c0c7b",
"size": "4791",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/naarad/metrics/linkedin_android_rum_metric.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "83289"
},
{
"name": "CSS",
"bytes": "4420"
},
{
"name": "HTML",
"bytes": "41597"
},
{
"name": "JavaScript",
"bytes": "42346"
},
{
"name": "Python",
"bytes": "382090"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from unittest.mock import Mock
from Orange.data import ContinuousVariable, DiscreteVariable, Domain
from Orange.widgets.data.contexthandlers import \
SelectAttributesDomainContextHandler
from Orange.widgets.settings import ContextSetting
from Orange.widgets.utils import vartype
Continuous = vartype(ContinuousVariable())
Discrete = vartype(DiscreteVariable())
class TestSelectAttributesDomainContextHandler(TestCase):
def setUp(self):
self.domain = Domain(
attributes=[ContinuousVariable('c1'),
DiscreteVariable('d1', values='abc'),
DiscreteVariable('d2', values='def')],
class_vars=[DiscreteVariable('d3', values='ghi')],
metas=[ContinuousVariable('c2'),
DiscreteVariable('d4', values='jkl')]
)
self.args = (self.domain,
{'c1': Continuous, 'd1': Discrete,
'd2': Discrete, 'd3': Discrete},
{'c2': Continuous, 'd4': Discrete, })
self.handler = SelectAttributesDomainContextHandler(metas_in_res=True)
self.handler.read_defaults = lambda: None
def test_open_context(self):
self.handler.bind(SimpleWidget)
context = Mock(
attributes=self.args[1], metas=self.args[2], values=dict(
domain_role_hints=({('d1', Discrete): ('available', 0),
('d2', Discrete): ('meta', 0),
('c1', Continuous): ('attribute', 0),
('d3', Discrete): ('attribute', 1),
('d4', Discrete): ('attribute', 2),
('c2', Continuous): ('class', 0)}, -2),
with_metas=[('d1', Discrete), ('d2', Discrete)]
))
self.handler.global_contexts = \
[Mock(values={}), context, Mock(values={})]
widget = SimpleWidget()
self.handler.initialize(widget)
self.handler.open_context(widget, self.args[0])
self.assertEqual(widget.domain_role_hints,
{('d1', Discrete): ('available', 0),
('d2', Discrete): ('meta', 0),
('c1', Continuous): ('attribute', 0),
('d3', Discrete): ('attribute', 1),
('d4', Discrete): ('attribute', 2),
('c2', Continuous): ('class', 0)})
def test_open_context_with_imperfect_match(self):
self.handler.bind(SimpleWidget)
context = Mock(values=dict(
domain_role_hints=({('d1', Discrete): ('available', 0),
('d2', Discrete): ('meta', 0),
('c1', Continuous): ('attribute', 0),
('d6', Discrete): ('attribute', 1),
('d7', Discrete): ('attribute', 2),
('c2', Continuous): ('class', 0)}, -2)
))
self.handler.global_contexts = \
[Mock(values={}), context, Mock(values={})]
widget = SimpleWidget()
self.handler.initialize(widget)
self.handler.open_context(widget, self.args[0])
self.assertEqual(widget.domain_role_hints,
{('d1', Discrete): ('available', 0),
('d2', Discrete): ('meta', 0),
('c1', Continuous): ('attribute', 0),
('c2', Continuous): ('class', 0)})
def test_open_context_with_no_match(self):
self.handler.bind(SimpleWidget)
context = Mock(values=dict(
domain_role_hints=({('d1', Discrete): ('available', 0),
('d2', Discrete): ('meta', 0),
('c1', Continuous): ('attribute', 0),
('d3', Discrete): ('attribute', 1),
('d4', Discrete): ('attribute', 2),
('c2', Continuous): ('class', 0)}, -2),
required=('g1', Continuous),
))
self.handler.global_contexts = [context]
widget = SimpleWidget()
self.handler.initialize(widget)
self.handler.open_context(widget, self.args[0])
self.assertEqual(widget.domain_role_hints, {})
class SimpleWidget:
domain_role_hints = ContextSetting({}, exclude_metas=False)
required = ContextSetting("", required=ContextSetting.REQUIRED)
def retrieveSpecificSettings(self):
pass
def storeSpecificSettings(self):
pass
|
{
"content_hash": "c06ffb93d57d471a1db5b930de46520c",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 78,
"avg_line_length": 43.55140186915888,
"alnum_prop": 0.5023605150214592,
"repo_name": "marinkaz/orange3",
"id": "dacb0bddb7c503203b5ef4eacf458dec7de41f18",
"size": "4660",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "Orange/widgets/tests/test_owselectcolumns.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "1746"
},
{
"name": "JavaScript",
"bytes": "3025"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "19900"
},
{
"name": "Python",
"bytes": "4007069"
},
{
"name": "Shell",
"bytes": "39117"
}
],
"symlink_target": ""
}
|
from sqlalchemy import Column, Integer, String, Date, Boolean, DateTime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String)
password = Column(String)
creation_date = Column(Date)
apikey = Column(String)
encryption_enabled = Column(Boolean)
theme_name = Column(String)
class File(Base):
__tablename__ = 'files'
id = Column(Integer, primary_key=True)
owner = Column(Integer)
name = Column(String)
extension = Column(String)
upload_date = Column(DateTime)
encrypted = Column(Boolean)
local_name = Column(String)
class Paste(Base):
__tablename__ = 'pastes'
id = Column(Integer, primary_key=True)
owner = Column(Integer)
name = Column(String)
upload_date = Column(DateTime)
content_json = Column(String)
is_code = Column(Boolean)
|
{
"content_hash": "692917d947839ce5f9c708e8f8aecde6",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 71,
"avg_line_length": 24.71794871794872,
"alnum_prop": 0.6732365145228216,
"repo_name": "mstojcevich/Lambda-Python",
"id": "d1c26afde939cb52a5f9d577ef030d0f850b2dee",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lmda/migrate/oldmodels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22896"
},
{
"name": "CoffeeScript",
"bytes": "21296"
},
{
"name": "HTML",
"bytes": "13359"
},
{
"name": "JavaScript",
"bytes": "361546"
},
{
"name": "Python",
"bytes": "45396"
},
{
"name": "Shell",
"bytes": "83"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.patches import _Style, FancyArrowPatch
from matplotlib.transforms import IdentityTransform
from matplotlib.path import Path
import numpy as np
class _FancyAxislineStyle:
class SimpleArrow(FancyArrowPatch):
"""
The artist class that will be returned for SimpleArrow style.
"""
_ARROW_STYLE = "->"
def __init__(self, axis_artist, line_path, transform,
line_mutation_scale):
self._axis_artist = axis_artist
self._line_transform = transform
self._line_path = line_path
self._line_mutation_scale = line_mutation_scale
FancyArrowPatch.__init__(self,
path=self._line_path,
arrowstyle=self._ARROW_STYLE,
arrow_transmuter=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=line_mutation_scale,
mutation_aspect=None,
transform=IdentityTransform(),
)
def set_line_mutation_scale(self, scale):
self.set_mutation_scale(scale*self._line_mutation_scale)
def _extend_path(self, path, mutation_size=10):
"""
Extend the path to make a room for drawing arrow.
"""
from matplotlib.bezier import get_cos_sin
x0, y0 = path.vertices[-2]
x1, y1 = path.vertices[-1]
cost, sint = get_cos_sin(x0, y0, x1, y1)
d = mutation_size * 1.
x2, y2 = x1 + cost*d, y1+sint*d
if path.codes is None:
_path = Path(np.concatenate([path.vertices, [[x2, y2]]]))
else:
_path = Path(np.concatenate([path.vertices, [[x2, y2]]]),
np.concatenate([path.codes, [Path.LINETO]]))
return _path
def set_path(self, path):
self._line_path = path
def draw(self, renderer):
"""
Draw the axis line.
1) transform the path to the display coordinate.
2) extend the path to make a room for arrow
3) update the path of the FancyArrowPatch.
4) draw
"""
path_in_disp = self._line_transform.transform_path(self._line_path)
mutation_size = self.get_mutation_scale() #line_mutation_scale()
extented_path = self._extend_path(path_in_disp,
mutation_size=mutation_size)
self._path_original = extented_path
FancyArrowPatch.draw(self, renderer)
class FilledArrow(SimpleArrow):
"""
The artist class that will be returned for SimpleArrow style.
"""
_ARROW_STYLE = "-|>"
class AxislineStyle(_Style):
"""
:class:`AxislineStyle` is a container class which defines style classes
for AxisArtists.
An instance of any axisline style class is an callable object,
whose call signature is ::
__call__(self, axis_artist, path, transform)
When called, this should return a mpl artist with following
methods implemented. ::
def set_path(self, path):
# set the path for axisline.
def set_line_mutation_scale(self, scale):
# set the scale
def draw(self, renderer):
# draw
"""
_style_list = {}
class _Base(object):
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initialization.
"""
super(AxislineStyle._Base, self).__init__()
def __call__(self, axis_artist, transform):
"""
Given the AxisArtist instance, and transform for the path
(set_path method), return the mpl artist for drawing the axis line.
"""
return self.new_line(axis_artist, transform)
class SimpleArrow(_Base):
"""
A simple arrow.
"""
ArrowAxisClass = _FancyAxislineStyle.SimpleArrow
def __init__(self, size=1):
"""
*size*
size of the arrow as a fraction of the ticklabel size.
"""
self.size = size
super(AxislineStyle.SimpleArrow, self).__init__()
def new_line(self, axis_artist, transform):
linepath = Path([(0,0), (0, 1)])
axisline = self.ArrowAxisClass(axis_artist, linepath, transform,
line_mutation_scale=self.size)
return axisline
_style_list["->"] = SimpleArrow
class FilledArrow(SimpleArrow):
ArrowAxisClass = _FancyAxislineStyle.FilledArrow
_style_list["-|>"] = FilledArrow
|
{
"content_hash": "ebb9eb7b8530de935e22594e536efee7",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 31.410714285714285,
"alnum_prop": 0.5209399279893879,
"repo_name": "uglyboxer/linear_neuron",
"id": "d87530444a6590171d144dee89260f0c81c4ed1b",
"size": "5277",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "net-p3/lib/python3.5/site-packages/mpl_toolkits/axisartist/axisline_style.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "317983"
},
{
"name": "C++",
"bytes": "5695"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "Groff",
"bytes": "17679"
},
{
"name": "HTML",
"bytes": "4000"
},
{
"name": "JavaScript",
"bytes": "24260"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "24433064"
},
{
"name": "Shell",
"bytes": "3791"
}
],
"symlink_target": ""
}
|
import sys
import os
from paddle.trainer_config_helpers import *
def seq_to_seq_data(data_dir,
is_generating,
dict_size=30000,
train_list='train.list',
test_list='test.list',
gen_list='gen.list',
gen_result='gen_result'):
"""
Predefined seqToseq train data provider for application
is_generating: whether this config is used for generating
dict_size: word count of dictionary
train_list: a text file containing a list of training data
test_list: a text file containing a list of testing data
gen_list: a text file containing a list of generating data
gen_result: a text file containing generating result
"""
src_lang_dict = os.path.join(data_dir, 'src.dict')
trg_lang_dict = os.path.join(data_dir, 'trg.dict')
src_dict = dict()
for line_count, line in enumerate(open(src_lang_dict, "r")):
src_dict[line.strip()] = line_count
trg_dict = dict()
for line_count, line in enumerate(open(trg_lang_dict, "r")):
trg_dict[line.strip()] = line_count
if is_generating:
train_list = None
test_list = os.path.join(data_dir, gen_list)
trg_dict = None
else:
train_list = os.path.join(data_dir, train_list)
test_list = os.path.join(data_dir,test_list)
define_py_data_sources2(train_list, test_list,
module = "dataprovider",
obj = "process",
args = {"src_dict": src_dict,
"trg_dict": trg_dict})
return {"src_dict_path": src_lang_dict, "trg_dict_path": trg_lang_dict,
"gen_result": gen_result}
def gru_encoder_decoder(data_conf,
is_generating,
word_vector_dim=512,
encoder_size=512,
decoder_size=512,
beam_size=3,
max_length=250):
"""
A wrapper for an attention version of GRU Encoder-Decoder network
is_generating: whether this config is used for generating
encoder_size: dimension of hidden unit in GRU Encoder network
decoder_size: dimension of hidden unit in GRU Decoder network
word_vector_dim: dimension of word vector
beam_size: expand width in beam search
max_length: a stop condition of sequence generation
"""
for k, v in data_conf.iteritems():
globals()[k] = v
source_dict_dim = len(open(src_dict_path, "r").readlines())
target_dict_dim = len(open(trg_dict_path, "r").readlines())
gen_trans_file = gen_result
src_word_id = data_layer(name='source_language_word', size=source_dict_dim)
src_embedding = embedding_layer(
input=src_word_id,
size=word_vector_dim,
param_attr=ParamAttr(name='_source_language_embedding'))
src_forward = simple_gru(input=src_embedding, size=encoder_size)
src_backward = simple_gru(input=src_embedding,
size=encoder_size,
reverse=True)
encoded_vector = concat_layer(input=[src_forward, src_backward])
with mixed_layer(size=decoder_size) as encoded_proj:
encoded_proj += full_matrix_projection(input=encoded_vector)
backward_first = first_seq(input=src_backward)
with mixed_layer(size=decoder_size,
act=TanhActivation(), ) as decoder_boot:
decoder_boot += full_matrix_projection(input=backward_first)
def gru_decoder_with_attention(enc_vec, enc_proj, current_word):
decoder_mem = memory(name='gru_decoder',
size=decoder_size,
boot_layer=decoder_boot)
context = simple_attention(encoded_sequence=enc_vec,
encoded_proj=enc_proj,
decoder_state=decoder_mem, )
with mixed_layer(size=decoder_size * 3) as decoder_inputs:
decoder_inputs += full_matrix_projection(input=context)
decoder_inputs += full_matrix_projection(input=current_word)
gru_step = gru_step_layer(name='gru_decoder',
input=decoder_inputs,
output_mem=decoder_mem,
size=decoder_size)
with mixed_layer(size=target_dict_dim,
bias_attr=True,
act=SoftmaxActivation()) as out:
out += full_matrix_projection(input=gru_step)
return out
decoder_group_name = "decoder_group"
group_inputs=[StaticInput(input=encoded_vector,is_seq=True),
StaticInput(input=encoded_proj,is_seq=True)]
if not is_generating:
trg_embedding = embedding_layer(
input=data_layer(name='target_language_word',
size=target_dict_dim),
size=word_vector_dim,
param_attr=ParamAttr(name='_target_language_embedding'))
group_inputs.append(trg_embedding)
# For decoder equipped with attention mechanism, in training,
# target embeding (the groudtruth) is the data input,
# while encoded source sequence is accessed to as an unbounded memory.
# Here, the StaticInput defines a read-only memory
# for the recurrent_group.
decoder = recurrent_group(name=decoder_group_name,
step=gru_decoder_with_attention,
input=group_inputs)
lbl = data_layer(name='target_language_next_word',
size=target_dict_dim)
cost = classification_cost(input=decoder, label=lbl)
outputs(cost)
else:
# In generation, the decoder predicts a next target word based on
# the encoded source sequence and the last generated target word.
# The encoded source sequence (encoder's output) must be specified by
# StaticInput, which is a read-only memory.
# Embedding of the last generated word is automatically gotten by
# GeneratedInputs, which is initialized by a start mark, such as <s>,
# and must be included in generation.
trg_embedding = GeneratedInput(
size=target_dict_dim,
embedding_name='_target_language_embedding',
embedding_size=word_vector_dim)
group_inputs.append(trg_embedding)
beam_gen = beam_search(name=decoder_group_name,
step=gru_decoder_with_attention,
input=group_inputs,
bos_id=0,
eos_id=1,
beam_size=beam_size,
max_length=max_length)
seqtext_printer_evaluator(input=beam_gen,
id_input=data_layer(name="sent_id", size=1),
dict_file=trg_dict_path,
result_file=gen_trans_file)
outputs(beam_gen)
|
{
"content_hash": "6b4b8de6b2372d2a157b395ec674a51b",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 79,
"avg_line_length": 42.74251497005988,
"alnum_prop": 0.5701877276548053,
"repo_name": "zuowang/Paddle",
"id": "edd6ad3f739b6cefc24d235be55c7a8f541e1ab7",
"size": "7767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/seqToseq/seqToseq_net.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "210539"
},
{
"name": "C++",
"bytes": "2694261"
},
{
"name": "CMake",
"bytes": "90483"
},
{
"name": "Cuda",
"bytes": "409839"
},
{
"name": "M4",
"bytes": "39963"
},
{
"name": "Perl",
"bytes": "11412"
},
{
"name": "Python",
"bytes": "826795"
},
{
"name": "Shell",
"bytes": "59063"
}
],
"symlink_target": ""
}
|
import myhdl as hdl
def clock_gen(clk_s, Ticks):
interval = hdl.delay(Ticks)
@hdl.always(interval)
def clk():
clk_s.next = not clk_s
return clk
|
{
"content_hash": "f84a22fa0f353e2a5e6083ba21a821d5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 31,
"avg_line_length": 15.636363636363637,
"alnum_prop": 0.6046511627906976,
"repo_name": "mutalabs/instar",
"id": "e47ca7f08313f1a6fb8453bb9899ef7b3d82750a",
"size": "172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instar/resources/resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1290"
},
{
"name": "Python",
"bytes": "5681"
}
],
"symlink_target": ""
}
|
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
from streamlink.stream import RTMPStream, HLSStream
STREAMS_URL = "https://piczel.tv:3000/streams/{0}?&page=1&sfw=false&live_only=true"
HLS_URL = "https://5810b93fdf674.streamlock.net:1936/live/{0}/playlist.m3u8"
RTMP_URL = "rtmp://piczel.tv:1935/live/{0}"
_url_re = re.compile(r"https://piczel.tv/watch/(\w+)")
_streams_schema = validate.Schema(
{
"type": validate.text,
"data": [
{
"id": int,
"live": bool,
"slug": validate.text
}
]
}
)
class Piczel(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
match = _url_re.match(self.url)
if not match:
return
channel_name = match.group(1)
res = http.get(STREAMS_URL.format(channel_name))
streams = http.json(res, schema=_streams_schema)
if streams["type"] not in ("multi", "stream"):
return
for stream in streams["data"]:
if stream["slug"] != channel_name:
continue
if not stream["live"]:
return
streams = {}
try:
streams.update(HLSStream.parse_variant_playlist(self.session, HLS_URL.format(stream["id"])))
except IOError as e:
# fix for hosted offline streams
if "404 Client Error" in str(e):
return
raise
streams["rtmp"] = RTMPStream(self.session, {
"rtmp": RTMP_URL.format(stream["id"]),
"pageUrl": self.url,
"live": True
})
return streams
return
__plugin__ = Piczel
|
{
"content_hash": "22d0d235dbfa5507dbb5ca47e5b3add8",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 108,
"avg_line_length": 25.75,
"alnum_prop": 0.5275080906148867,
"repo_name": "mmetak/streamlink",
"id": "e6bac79b74b7fb2aa2001d4cac613938cbc5f0e7",
"size": "1854",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/piczel.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "932019"
},
{
"name": "Shell",
"bytes": "16668"
}
],
"symlink_target": ""
}
|
"""
Django settings for pokepy project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'emu6)69ar_*h1(rwqmxa6o@2#o+3)lvvxq8^6n+t+d5vck+cw0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pokepy',
'django_extensions',
'pykemon',
'simplejson',
'requests',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'pokepy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pokepy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "d0d5a527f3aac86e2654008b461e63f1",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 71,
"avg_line_length": 25.457943925233646,
"alnum_prop": 0.6868575624082232,
"repo_name": "locolan/pokepy",
"id": "8193c5aa11bf4e7bdc1465a082fd932296209ca8",
"size": "2724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pokepy/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1242"
},
{
"name": "Python",
"bytes": "17308"
}
],
"symlink_target": ""
}
|
"""This code example gets all creative sets.
To create creative sets, run create_creative_sets.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_set_service = client.GetService('CreativeSetService',
version='v201411')
# Create a filter statement.
statement = dfp.FilterStatement()
# Get creative sets by statement.
while True:
response = creative_set_service.getCreativeSetsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for creative_set in response['results']:
print ('Creative set with ID \'%s\' and name \'%s\' was found.'
% (creative_set['id'], creative_set['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
{
"content_hash": "a765723a26aa9629714164d4dee4c4ca",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 31.093023255813954,
"alnum_prop": 0.6843679880329095,
"repo_name": "losnikitos/googleads-python-lib",
"id": "09467cc504cb59c913291c7221a214570d8d2bec",
"size": "1955",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201411/creative_set_service/get_all_creative_sets.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
}
|
import time
from six.moves import range as xrange
from tempest.common.utils import data_utils
from tempest.lib import exceptions
from tempest import test
from monasca_tempest_tests.tests.api import base
from monasca_tempest_tests.tests.api import constants
from monasca_tempest_tests.tests.api import helpers
class TestMetrics(base.BaseMonascaTest):
@classmethod
def resource_setup(cls):
super(TestMetrics, cls).resource_setup()
@classmethod
def resource_cleanup(cls):
super(TestMetrics, cls).resource_cleanup()
@test.attr(type='gate')
def test_create_metric(self):
name = data_utils.rand_name('name')
key = data_utils.rand_name('key')
value = data_utils.rand_name('value')
timestamp = int(round(time.time() * 1000))
time_iso = helpers.timestamp_to_iso(timestamp)
end_timestamp = int(round((time.time() + 3600 * 24) * 1000))
end_time_iso = helpers.timestamp_to_iso(end_timestamp)
value_meta_key = data_utils.rand_name('value_meta_key')
value_meta_value = data_utils.rand_name('value_meta_value')
metric = helpers.create_metric(name=name,
dimensions={key: value},
timestamp=timestamp,
value=1.23,
value_meta={
value_meta_key: value_meta_value
})
resp, response_body = self.monasca_client.create_metrics(metric)
self.assertEqual(204, resp.status)
query_param = '?name=' + name + '&start_time=' + time_iso + \
'&end_time=' + end_time_iso
for i in xrange(constants.MAX_RETRIES):
resp, response_body = self.monasca_client.\
list_measurements(query_param)
self.assertEqual(200, resp.status)
elements = response_body['elements']
for element in elements:
if str(element['name']) == name:
self._verify_list_measurements_element(element, key, value)
measurement = element['measurements'][0]
self._verify_list_measurements_measurement(
measurement, metric, value_meta_key, value_meta_value)
return
time.sleep(constants.RETRY_WAIT_SECS)
if i == constants.MAX_RETRIES - 1:
error_msg = "Failed test_create_metric: " \
"timeout on waiting for metrics: at least " \
"one metric is needed. Current number of " \
"metrics = 0"
self.fail(error_msg)
@test.attr(type='gate')
def test_create_metrics(self):
name = data_utils.rand_name('name')
key = data_utils.rand_name('key')
value = data_utils.rand_name('value')
timestamp = int(round(time.time() * 1000))
time_iso = helpers.timestamp_to_iso(timestamp)
end_timestamp = int(round(timestamp + 3600 * 24 * 1000))
end_time_iso = helpers.timestamp_to_iso(end_timestamp)
value_meta_key1 = data_utils.rand_name('meta_key')
value_meta_value1 = data_utils.rand_name('meta_value')
value_meta_key2 = data_utils.rand_name('value_meta_key')
value_meta_value2 = data_utils.rand_name('value_meta_value')
metrics = [
helpers.create_metric(name=name,
dimensions={key: value},
timestamp=timestamp,
value=1.23,
value_meta={
value_meta_key1: value_meta_value1
}),
helpers.create_metric(name=name,
dimensions={key: value},
timestamp=timestamp + 6000,
value=4.56,
value_meta={
value_meta_key2: value_meta_value2
})
]
resp, response_body = self.monasca_client.create_metrics(metrics)
self.assertEqual(204, resp.status)
query_param = '?name=' + name + '&start_time=' + str(time_iso) + \
'&end_time=' + str(end_time_iso)
for i in xrange(constants.MAX_RETRIES):
resp, response_body = self.monasca_client.\
list_measurements(query_param)
self.assertEqual(200, resp.status)
elements = response_body['elements']
for element in elements:
if str(element['name']) == name \
and len(element['measurements']) == 2:
self._verify_list_measurements_element(element, key, value)
first_measurement = element['measurements'][0]
second_measurement = element['measurements'][1]
self._verify_list_measurements_measurement(
first_measurement, metrics[0], value_meta_key1,
value_meta_value1)
self._verify_list_measurements_measurement(
second_measurement, metrics[1], value_meta_key2,
value_meta_value2)
return
time.sleep(constants.RETRY_WAIT_SECS)
if i == constants.MAX_RETRIES - 1:
error_msg = "Failed test_create_metrics: " \
"timeout on waiting for metrics: at least " \
"one metric is needed. Current number of " \
"metrics = 0"
self.fail(error_msg)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_with_no_name(self):
metric = helpers.create_metric(name=None)
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_with_empty_name(self):
metric = helpers.create_metric(name='')
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_with_empty_value_in_dimensions(self):
name = data_utils.rand_name('name')
metric = helpers.create_metric(name=name,
dimensions={'key': ''})
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_with_empty_key_in_dimensions(self):
name = data_utils.rand_name('name')
metric = helpers.create_metric(name=name,
dimensions={'': 'value'})
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
def test_create_metric_with_no_dimensions(self):
name = data_utils.rand_name('name')
timestamp = int(round(time.time() * 1000))
time_iso = helpers.timestamp_to_iso(timestamp)
end_timestamp = int(round(timestamp + 3600 * 24 * 1000))
end_time_iso = helpers.timestamp_to_iso(end_timestamp)
value_meta_key = data_utils.rand_name('value_meta_key')
value_meta_value = data_utils.rand_name('value_meta_value')
metric = helpers.create_metric(name=name,
dimensions=None,
timestamp=timestamp,
value=1.23,
value_meta={
value_meta_key: value_meta_value})
resp, response_body = self.monasca_client.create_metrics(metric)
self.assertEqual(204, resp.status)
query_param = '?name=' + str(name) + '&start_time=' + str(time_iso) \
+ '&end_time=' + str(end_time_iso)
for i in xrange(constants.MAX_RETRIES):
resp, response_body = self.monasca_client.\
list_measurements(query_param)
self.assertEqual(200, resp.status)
elements = response_body['elements']
for element in elements:
if str(element['name']) == name:
self._verify_list_measurements_element(
element, test_key=None, test_value=None)
if len(element['measurements']) > 0:
measurement = element['measurements'][0]
self._verify_list_measurements_measurement(
measurement, metric, value_meta_key,
value_meta_value)
return
time.sleep(constants.RETRY_WAIT_SECS)
if i == constants.MAX_RETRIES - 1:
error_msg = "Failed test_create_metric_with_no_dimensions: " \
"timeout on waiting for metrics: at least " \
"one metric is needed. Current number of " \
"metrics = 0"
self.fail(error_msg)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_with_no_timestamp(self):
metric = helpers.create_metric()
metric['timestamp'] = None
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_no_value(self):
timestamp = int(round(time.time() * 1000))
metric = helpers.create_metric(timestamp=timestamp,
value=None)
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_with_name_exceeds_max_length(self):
long_name = "x" * (constants.MAX_METRIC_NAME_LENGTH + 1)
metric = helpers.create_metric(long_name)
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_with_invalid_chars_in_name(self):
for invalid_char in constants.INVALID_CHARS:
metric = helpers.create_metric(invalid_char)
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_with_invalid_chars_in_dimensions(self):
for invalid_char in constants.INVALID_CHARS:
metric = helpers.create_metric('name-1', {'key-1': invalid_char})
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
for invalid_char in constants.INVALID_CHARS:
metric = helpers.create_metric('name-1', {invalid_char: 'value-1'})
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_dimension_key_exceeds_max_length(self):
long_key = "x" * (constants.MAX_DIMENSION_KEY_LENGTH + 1)
metric = helpers.create_metric('name-1', {long_key: 'value-1'})
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_dimension_value_exceeds_max_length(self):
long_value = "x" * (constants.MAX_DIMENSION_VALUE_LENGTH + 1)
metric = helpers.create_metric('name-1', {'key-1': long_value})
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_with_value_meta_name_exceeds_max_length(self):
long_value_meta_name = "x" * (constants.MAX_VALUE_META_NAME_LENGTH + 1)
value_meta_dict = {long_value_meta_name: "value_meta_value"}
metric = helpers.create_metric(name='name', value_meta=value_meta_dict)
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
@test.attr(type=['negative'])
def test_create_metric_with_value_meta_exceeds_max_length(self):
value_meta_name = "x"
long_value_meta_value = "y" * constants.MAX_VALUE_META_TOTAL_LENGTH
value_meta_dict = {value_meta_name: long_value_meta_value}
metric = helpers.create_metric(name='name', value_meta=value_meta_dict)
self.assertRaises(exceptions.UnprocessableEntity,
self.monasca_client.create_metrics,
metric)
@test.attr(type='gate')
def test_list_metrics(self):
resp, response_body = self.monasca_client.list_metrics()
self.assertEqual(200, resp.status)
self.assertTrue(set(['links', 'elements']) == set(response_body))
elements = response_body['elements']
element = elements[0]
self._verify_list_metrics_element(element, test_key=None,
test_value=None, test_name=None)
self.assertTrue(set(['id', 'name', 'dimensions']) == set(element))
@test.attr(type='gate')
def test_list_metrics_with_dimensions(self):
name = data_utils.rand_name('name')
key = data_utils.rand_name('key')
value = data_utils.rand_name('value')
metric = helpers.create_metric(name=name, dimensions={key: value})
resp, response_body = self.monasca_client.create_metrics(metric)
self.assertEqual(204, resp.status)
query_param = '?dimensions=' + key + ':' + value
for i in xrange(constants.MAX_RETRIES):
resp, response_body = self.monasca_client.list_metrics(query_param)
self.assertEqual(200, resp.status)
elements = response_body['elements']
for element in elements:
if str(element['dimensions'][key]) == value:
self._verify_list_metrics_element(element, test_name=name)
return
time.sleep(constants.RETRY_WAIT_SECS)
if i == constants.MAX_RETRIES - 1:
error_msg = "Failed test_list_metrics_with_dimensions: " \
"timeout on waiting for metrics: at least " \
"one metric is needed. Current number of " \
"metrics = 0"
self.fail(error_msg)
@test.attr(type='gate')
def test_list_metrics_dimension_query_multi_value_with_diff_names(self):
metrics, name, key_service, values = \
self._create_metrics_with_different_dimensions(same_name=False)
metric_dimensions = self._get_metric_dimensions(
key_service, values, same_metric_name=False)
query_param = '?dimensions=' + key_service + ':' + values[0] + '|' +\
values[1]
self._verify_dimensions(query_param, metric_dimensions)
@test.attr(type='gate')
def test_list_metrics_dimension_query_no_value_with_diff_names(self):
metrics, name, key_service, values = \
self._create_metrics_with_different_dimensions(same_name=False)
metric_dimensions = self._get_metric_dimensions(
key_service, values, same_metric_name=False)
query_param = '?dimensions=' + key_service
self._verify_dimensions(query_param, metric_dimensions)
@test.attr(type='gate')
def test_list_metrics_dimension_query_multi_value_with_same_name(self):
# Skip the test for now due to InfluxDB Inconsistency
return
metrics, name, key_service, values = \
self._create_metrics_with_different_dimensions(same_name=True)
metric_dimensions = self._get_metric_dimensions(
key_service, values, same_metric_name=True)
query_param = '?name=' + name + '&dimensions=' + key_service + ':' +\
values[0] + '|' + values[1]
self._verify_dimensions(query_param, metric_dimensions)
@test.attr(type='gate')
def test_list_metrics_dimension_query_no_value_with_same_name(self):
# Skip the test for now due to InfluxDB Inconsistency
return
metrics, name, key_service, values = \
self._create_metrics_with_different_dimensions(same_name=True)
metric_dimensions = self._get_metric_dimensions(
key_service, values, same_metric_name=True)
query_param = '?name=' + name + '&dimensions=' + key_service
self._verify_dimensions(query_param, metric_dimensions)
@test.attr(type='gate')
def test_list_metrics_with_name(self):
name = data_utils.rand_name('name')
key = data_utils.rand_name('key')
value = data_utils.rand_name('value')
metric = helpers.create_metric(name=name,
dimensions={key: value})
resp, response_body = self.monasca_client.create_metrics(metric)
self.assertEqual(204, resp.status)
query_param = '?name=' + str(name)
for i in xrange(constants.MAX_RETRIES):
resp, response_body = self.monasca_client.list_metrics(query_param)
self.assertEqual(200, resp.status)
elements = response_body['elements']
for element in elements:
if str(element['name']) == name:
self._verify_list_metrics_element(element, test_key=key,
test_value=value)
return
time.sleep(constants.RETRY_WAIT_SECS)
if i == constants.MAX_RETRIES - 1:
error_msg = "Failed test_list_metrics_with_name: " \
"timeout on waiting for metrics: at least " \
"one metric is needed. Current number of " \
"metrics = 0"
self.fail(error_msg)
@test.attr(type='gate')
def test_list_metrics_with_tenant(self):
name = data_utils.rand_name('name')
key = data_utils.rand_name('key')
value = data_utils.rand_name('value')
tenant = self.tenants_client.create_tenant(
name=data_utils.rand_name('test_tenant'))['tenant']
# Delete the tenant at the end of the test
self.addCleanup(self.tenants_client.delete_tenant, tenant['id'])
metric = helpers.create_metric(name=name,
dimensions={key: value})
resp, response_body = self.monasca_client.create_metrics(
metric, tenant_id=tenant['id'])
self.assertEqual(204, resp.status)
query_param = '?tenant_id=' + str(tenant['id'])
for i in xrange(constants.MAX_RETRIES):
resp, response_body = self.monasca_client.list_metrics(query_param)
self.assertEqual(200, resp.status)
elements = response_body['elements']
for element in elements:
if str(element['name']) == name:
self._verify_list_metrics_element(element, test_key=key,
test_value=value)
return
time.sleep(constants.RETRY_WAIT_SECS)
if i == constants.MAX_RETRIES - 1:
error_msg = "Failed test_list_metrics_with_tenant: " \
"timeout on waiting for metrics: at least " \
"one metric is needed. Current number of " \
"metrics = 0"
self.fail(error_msg)
@test.attr(type='gate')
def test_list_metrics_with_offset_limit(self):
name = data_utils.rand_name()
key1 = data_utils.rand_name()
key2 = data_utils.rand_name()
metrics = [
helpers.create_metric(name=name, dimensions={
key1: 'value-1', key2: 'value-1'}),
helpers.create_metric(name=name, dimensions={
key1: 'value-2', key2: 'value-2'}),
helpers.create_metric(name=name, dimensions={
key1: 'value-3', key2: 'value-3'}),
helpers.create_metric(name=name, dimensions={
key1: 'value-4', key2: 'value-4'})
]
self.monasca_client.create_metrics(metrics)
query_param = '?name=' + name
for i in xrange(constants.MAX_RETRIES):
resp, response_body = self.monasca_client.list_metrics(query_param)
elements = response_body['elements']
if elements and len(elements) == 4:
break
time.sleep(constants.RETRY_WAIT_SECS)
if i == constants.MAX_RETRIES - 1:
error_msg = ("Failed test_list_metrics_with_offset_limit: "
"timeout on waiting for metrics: 4 metrics "
"are needed. Current number of elements = "
"{}").format(len(elements))
self.fail(error_msg)
first_element = elements[0]
query_parms = '?name=' + name + '&limit=4'
resp, response_body = self.monasca_client.list_metrics(query_parms)
self.assertEqual(200, resp.status)
elements = response_body['elements']
self.assertEqual(4, len(elements))
self.assertEqual(first_element, elements[0])
for metric_index in xrange(len(elements) - 1):
metric = elements[metric_index]
max_limit = 3 - metric_index
for limit in xrange(1, max_limit):
first_index = metric_index + 1
last_index = first_index + limit
expected_elements = elements[first_index:last_index]
query_parms = '?name=' + name + '&offset=' + \
str(metric['id']) + '&limit=' + \
str(limit)
resp, response_body = self.\
monasca_client.list_metrics(query_parms)
self.assertEqual(200, resp.status)
new_elements = response_body['elements']
self.assertEqual(limit, len(new_elements))
for i in xrange(len(expected_elements)):
self.assertEqual(expected_elements[i], new_elements[i])
def _verify_list_measurements_element(self, element, test_key, test_value):
self.assertEqual(set(element),
set(['columns', 'dimensions', 'id', 'measurements',
'name']))
self.assertEqual(set(element['columns']),
set(['timestamp', 'value', 'value_meta']))
self.assertTrue(str(element['id']) is not None)
if test_key is not None and test_value is not None:
self.assertEqual(str(element['dimensions'][test_key]), test_value)
def _verify_list_measurements_measurement(self, measurement,
test_metric, test_vm_key,
test_vm_value):
# Timestamps stored in influx sometimes are 1 millisecond different to
# the value stored by the persister. Check if the timestamps are
# equal in one millisecond range to pass the test.
time_iso_millis = helpers.timestamp_to_iso_millis(
test_metric['timestamp'] + 0)
time_iso_millis_plus = helpers.timestamp_to_iso_millis(
test_metric['timestamp'] + 1)
time_iso_millis_minus = helpers.timestamp_to_iso_millis(
test_metric['timestamp'] - 1)
if str(measurement[0]) != time_iso_millis and str(measurement[0]) != \
time_iso_millis_plus and str(measurement[0]) != \
time_iso_millis_minus:
error_msg = ("Mismatch Error: None of {}, {}, {} matches {}").\
format(time_iso_millis, time_iso_millis_plus,
time_iso_millis_minus, str(measurement[0]))
self.fail(error_msg)
self.assertEqual(measurement[1], test_metric['value'])
if test_vm_key is not None and test_vm_value is not None:
self.assertEqual(str(measurement[2][test_vm_key]), test_vm_value)
def _verify_list_metrics_element(self, element, test_key=None,
test_value=None, test_name=None):
self.assertTrue(type(element['id']) is unicode)
self.assertTrue(type(element['name']) is unicode)
self.assertTrue(type(element['dimensions']) is dict)
self.assertEqual(set(element), set(['dimensions', 'id', 'name']))
self.assertTrue(str(element['id']) is not None)
if test_key is not None and test_value is not None:
self.assertEqual(str(element['dimensions'][test_key]), test_value)
if test_name is not None:
self.assertEqual(str(element['name']), test_name)
@test.attr(type='gate')
def test_list_metrics_with_time_args(self):
name = data_utils.rand_name('name')
key = data_utils.rand_name('key')
value_org = data_utils.rand_name('value')
now = int(round(time.time() * 1000))
#
# Built start and end time args before and after the measurement.
#
start_iso = helpers.timestamp_to_iso(now - 1000)
end_timestamp = int(round(now + 1000))
end_iso = helpers.timestamp_to_iso(end_timestamp)
metric = helpers.create_metric(name=name,
dimensions={key: value_org},
timestamp=now)
self.monasca_client.create_metrics(metric)
for timer in xrange(constants.MAX_RETRIES):
query_parms = '?name=' + name + '&start_time=' + start_iso + '&end_time=' + end_iso
resp, response_body = self.monasca_client.list_metrics(query_parms)
self.assertEqual(200, resp.status)
elements = response_body['elements']
if elements:
dimensions = elements[0]
dimension = dimensions['dimensions']
value = dimension[unicode(key)]
self.assertEqual(value_org, str(value))
break
else:
time.sleep(constants.RETRY_WAIT_SECS)
if timer == constants.MAX_RETRIES - 1:
skip_msg = "Skipped test_list_metrics_with_time_args: " \
"timeout on waiting for metrics: at least one " \
"metric is needed. Current number of metrics " \
"= 0"
raise self.skipException(skip_msg)
@staticmethod
def _get_metric_dimensions(key_service, values, same_metric_name):
if same_metric_name:
metric_dimensions = [{key_service: values[0], 'key3': ''},
{key_service: values[1], 'key3': ''},
{key_service: '', 'key3': 'value3'}]
else:
metric_dimensions = [{key_service: values[0]},
{key_service: values[1]},
{'key3': 'value3'}]
return metric_dimensions
def _verify_dimensions(self, query_param, metric_dimensions):
for i in xrange(constants.MAX_RETRIES):
resp, response_body = self.monasca_client.list_metrics(query_param)
self.assertEqual(200, resp.status)
elements = response_body['elements']
if len(elements) == 2:
dimension_sets = []
for element in elements:
dimension_sets.append(element['dimensions'])
self.assertIn(metric_dimensions[0], dimension_sets)
self.assertIn(metric_dimensions[1], dimension_sets)
self.assertNotIn(metric_dimensions[2], dimension_sets)
return
time.sleep(constants.RETRY_WAIT_SECS)
if i == constants.MAX_RETRIES - 1:
error_msg = "Timeout on waiting for metrics: at least " \
"2 metrics are needed. Current number of " \
"metrics = {}".format(len(elements))
self.fail(error_msg)
def _create_metrics_with_different_dimensions(self, same_name=True):
name1 = data_utils.rand_name('name1')
name2 = name1 if same_name else data_utils.rand_name('name2')
name3 = name1 if same_name else data_utils.rand_name('name3')
key_service = data_utils.rand_name('service')
values = [data_utils.rand_name('value1'),
data_utils.rand_name('value2')]
metrics = [helpers.create_metric(name1, {key_service: values[0]}),
helpers.create_metric(name2, {key_service: values[1]}),
helpers.create_metric(name3, {'key3': 'value3'})]
resp, response_body = self.monasca_client.create_metrics(metrics)
self.assertEqual(204, resp.status)
return metrics, name1, key_service, values
|
{
"content_hash": "36804165ecb6d5c7dbe5510d4d3a9d4a",
"timestamp": "",
"source": "github",
"line_count": 618,
"max_line_length": 95,
"avg_line_length": 48.90614886731392,
"alnum_prop": 0.5491662255161461,
"repo_name": "sapcc/monasca-api",
"id": "123a7609a72bca96e009ff1e5ea599e22766c2a7",
"size": "30988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monasca_tempest_tests/tests/api/test_metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "4305"
},
{
"name": "Python",
"bytes": "865299"
},
{
"name": "Ruby",
"bytes": "4534"
},
{
"name": "Shell",
"bytes": "72248"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
import logging
from restclients_core.exceptions import (
DataFailureException, InvalidNetID, InvalidRegID)
from uw_gws import GWS_DAO
from sis_provisioner.models import get_now
logger = logging.getLogger(__name__)
def get_dt_from_now(duration):
return get_now() - timedelta(minutes=duration)
def changed_since_str(duration):
return get_dt_from_now(duration).strftime("%Y-%m-%d %H:%M:%S")
def is_using_file_dao():
return GWS_DAO().get_implementation().is_mock()
|
{
"content_hash": "59807a985c3037c40f214eef5bbc064d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 66,
"avg_line_length": 24.571428571428573,
"alnum_prop": 0.7344961240310077,
"repo_name": "uw-it-aca/bridge-sis-provisioner",
"id": "b2e88d9b7cfe9ebb85b3da8421e5dfea51d4e41d",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sis_provisioner/dao/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "653"
},
{
"name": "Python",
"bytes": "187059"
},
{
"name": "Shell",
"bytes": "744"
}
],
"symlink_target": ""
}
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sitecheck', '0001_initial'),
('tlssite', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='sitecheck',
name='site',
field=models.ForeignKey(related_name='checks', to='tlssite.Site', on_delete=models.PROTECT),
preserve_default=True,
),
]
|
{
"content_hash": "c20faa234f5a4c08f06e5e2287726975",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 104,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.5783664459161147,
"repo_name": "tykling/tlsscout",
"id": "b4bb9d86989a0e91df09d899ff016e8d768e5014",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sitecheck/migrations/0002_sitecheck_site.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52"
},
{
"name": "HTML",
"bytes": "47251"
},
{
"name": "Python",
"bytes": "79561"
}
],
"symlink_target": ""
}
|
import os
from app import create_app, db
from app.models import User, Role, Permission
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('ZEEN_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role, Permission=Permission)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Runs the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
|
{
"content_hash": "f330b7e1554a8edc6cfde86779050829",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 76,
"avg_line_length": 25.620689655172413,
"alnum_prop": 0.7200538358008075,
"repo_name": "drinkyouroj/zeen",
"id": "81c459165dea266c268578e12542ea530e020995",
"size": "765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7948"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "35873"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from wikipendium.wiki.models import Article, ArticleContent, User
from wikipendium.wiki.forms import NewArticleForm
from django.core.exceptions import ValidationError
import datetime
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
class ArticleTest(TestCase):
def setUp(self):
self.u1 = User(username='u1')
self.u2 = User(username='u2')
self.u1.save()
self.u2.save()
self.article1 = Article(slug=u'TDT4100')
self.article1.save()
self.article2 = Article(slug=u'TIØ4258')
self.article2.save()
self.ac1 = ArticleContent(article=self.article1,
updated=datetime.datetime(2012, 1, 1))
self.ac2 = ArticleContent(article=self.article1,
updated=datetime.datetime(2013, 1, 1),
title='per')
self.ac3 = ArticleContent(article=self.article2,
updated=datetime.datetime(2001, 3, 7))
self.ac4 = ArticleContent(article=self.article2,
updated=datetime.datetime(2001, 3, 8),
lang='nb')
self.ac1.edited_by = self.u1
self.ac2.edited_by = self.u2
self.ac3.edited_by = self.u1
self.ac4.edited_by = self.u2
self.ac1.save()
self.ac2.save()
self.ac3.save()
self.ac4.save()
def test_slug_should_uppercase_when_saved(self):
article = Article()
article.slug = u'lowercase'
article.save()
self.assertEqual(article.slug, u'LOWERCASE')
def test_slug_should_strip_whitespace_when_saved(self):
article = Article()
article.slug = u' PADDED\v\t \n'
article.save()
self.assertEqual(article.slug, u'PADDED')
def test_slug_cannot_contain_slashes(self):
article = Article()
article.slug = u'TDT/4100'
try:
article.save()
self.assertEqual(1, 2) # this should not be reached
except ValidationError:
self.assertEqual(1, 1) # correct error was raised
def test_get_contributors(self):
self.assertEquals(self.article1.get_contributors(),
set([self.u1, self.u2]))
def test_get_newest_content(self):
self.assertEquals(self.article1.get_newest_content(), self.ac2)
def test_get_available_languages(self):
self.assertEquals(self.article1.get_available_languages(),
[('English', self.ac2)])
result = self.article2.get_available_languages()
expected_result = [('English', self.ac3),
('Norwegian', self.ac4)]
self.assertEquals(expected_result, result)
def test_get_absolute_url(self):
self.assertEquals(self.article1.get_absolute_url(), '/TDT4100_per')
def test_get_all_article_content(self):
result = Article.get_all_article_content()
expected_result = [[self.ac2], [self.ac3, self.ac4]]
self.assertEquals(expected_result, result)
def test_get_all_newest_contents(self):
result = Article.get_all_newest_contents()
expected_result = [self.ac2, self.ac4]
self.assertEquals(expected_result, result)
def test_get_all_newest_contents_all_languages(self):
result = Article.get_all_newest_contents_all_languages()
expected_result = [self.ac4, self.ac3, self.ac2]
self.assertEquals(expected_result, result)
def test___str__(self):
self.assertEquals('TIØ4258', str(self.article2))
def test_get_sorted_contents(self):
result = list(self.article1.get_sorted_contents())
expected_result = [self.ac2, self.ac1]
self.assertEquals(expected_result, result)
result = list(self.article2.get_sorted_contents())
expected_result = [self.ac3]
self.assertEquals(expected_result, result)
result = list(self.article2.get_sorted_contents(lang='nb'))
expected_result = [self.ac4]
self.assertEquals(expected_result, result)
def test_get_available_language_codes(self):
result = self.article2.get_available_language_codes()
expected_result = ['en', 'nb']
self.assertEquals(expected_result, result)
def test_get_slug(self):
self.assertEquals(u'TIØ4258', self.article2.get_slug())
class ArticleContentTest(TestCase):
def setUp(self):
self.article1 = Article(slug=u'TDT4100')
self.article1.save()
self.ac1 = ArticleContent(article=self.article1,
updated=datetime.datetime(2012, 1, 1),
title='Cooking and baking',
lang='fr',
content='# Title')
self.ac2 = ArticleContent(article=self.article1,
updated=datetime.datetime(2014, 1, 1),
title='Cooking and baking',
lang='fr')
self.ac1.save()
self.ac2.save()
def test_title_cannot_contain_slashes(self):
ac = ArticleContent()
ac.title = 'asdf/sdfi'
ac.article = self.article1
with self.assertRaises(ValidationError):
ac.save()
def test_get_full_title(self):
self.assertEquals('TDT4100: Cooking and baking',
self.ac1.get_full_title())
def test_get_last_descendant(self):
self.assertEquals(self.ac2,
self.ac1.get_last_descendant())
def test_get_absolute_url(self):
self.assertEquals('/TDT4100_Cooking_and_baking/fr/',
self.ac1.get_absolute_url())
def test_get_edit_url(self):
self.assertEquals('/TDT4100_Cooking_and_baking/fr/edit/',
self.ac1.get_edit_url())
def test_get_add_language_url(self):
self.assertEquals('/TDT4100/add_language/',
self.ac1.get_add_language_url())
def test_get_history_url(self):
self.assertEquals('/TDT4100_Cooking_and_baking/fr/history/',
self.ac1.get_history_url())
def test_get_history_single_url(self):
self.assertEquals('/TDT4100_Cooking_and_baking/fr/history/%s/' % (
self.ac1.pk), self.ac1.get_history_single_url())
def test_get_html_content(self):
self.assertTrue('<h1>' in self.ac1.get_html_content()['html'])
def test___str__(self):
self.assertEquals('[1] Cooking and baking', str(self.ac1))
def test_line_endings(self):
form = NewArticleForm({
'slug': u'lineendings',
'title': u'New article',
'lang': u'en',
'content': u'Test\r\nnext line.',
})
self.assertTrue(form.is_valid())
ac = form.save(commit=False)
self.assertEquals(u'Test\nnext line.',
ac.content)
|
{
"content_hash": "53373a1b9125f7e7a011163bb56f7bd6",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 75,
"avg_line_length": 37.10362694300518,
"alnum_prop": 0.577154028766932,
"repo_name": "stianjensen/wikipendium.no",
"id": "5f9cf91923467ec75f14c3f7860dd2a0ddb68cf3",
"size": "7189",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "wikipendium/wiki/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "74159"
},
{
"name": "HTML",
"bytes": "47651"
},
{
"name": "JavaScript",
"bytes": "79776"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "Python",
"bytes": "72147"
}
],
"symlink_target": ""
}
|
import unittest
from datetime import datetime
from .fixtures import fixture_data, Witness, Witnesses, Account
from graphenecommon import exceptions
class Testcases(unittest.TestCase):
def setUp(self):
fixture_data()
def test_Witness(self):
w = Witness("1.6.1")
self.assertIsInstance(w.account, Account)
self.assertEqual(w.account["id"], "1.2.101")
Witness(w)
"""
def test_nonexist(self):
with self.assertRaises(exceptions.AccountDoesNotExistsException):
Witness("foobar")
def test_Witnesss(self):
ws = Witnesses()
self.assertEqual(len(ws), 2)
def test_Witnesss2(self):
ws = Witnesses("init0")
self.assertEqual(len(ws), 1)
"""
|
{
"content_hash": "fddfffd97cab866177851e25f389cf55",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 73,
"avg_line_length": 25.93103448275862,
"alnum_prop": 0.6382978723404256,
"repo_name": "xeroc/python-graphenelib",
"id": "44f56c1566a2a57058af997dedd69fd7ca375ab1",
"size": "776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_witness.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "872"
},
{
"name": "Python",
"bytes": "922435"
}
],
"symlink_target": ""
}
|
from django.utils.translation import trans_real, activate
def test_mkt_locale_not_in_django():
"""
We load gettext catalogs in this order:
django/locale/django.po
locale/django.po
If Django doesn't have a locale, it returns the en-us catalog as a
fallback. But then we take that catalog and merge in our messages.po.
That's no good because we just mixed some other locale into en-us.
This test will be invalid once Django gets an mn locale.
"""
activate('mn')
en = trans_real._translations['en-US']
mn = trans_real._translations['mn']
assert en != mn
assert en._catalog != mn._catalog
|
{
"content_hash": "6d5bd2fbd70e24300cc5f2123cd88c57",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 74,
"avg_line_length": 32.7,
"alnum_prop": 0.6743119266055045,
"repo_name": "diox/zamboni",
"id": "8369602408515c88e753134fcf847f3d242df7d9",
"size": "654",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "mkt/site/tests/test_locales.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "354315"
},
{
"name": "HTML",
"bytes": "2333064"
},
{
"name": "JavaScript",
"bytes": "529996"
},
{
"name": "Makefile",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "4535722"
},
{
"name": "Shell",
"bytes": "11147"
},
{
"name": "Smarty",
"bytes": "1159"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import os
import sys
from .log import logger
__all__ = ["home", "files", "rm"]
def home(path: str | list) -> str:
if sys.platform == "win32":
_home = os.environ["USERPROFILE"]
elif sys.platform == "linux":
_home = os.environ["HOME"]
else:
raise NotImplementedError("platform %s", sys.platform)
if isinstance(path, list):
return os.path.join(_home, *path)
return os.path.join(_home, path)
def files(path: str | list) -> str:
if isinstance(path, list):
return os.path.join(os.getcwd(), "files", *path)
return os.path.join(os.getcwd(), "files", path)
def rm(path: str):
try:
os.remove(path)
except FileNotFoundError:
logger.debug("failed to remove file at %s", path, exc_info=True)
|
{
"content_hash": "c09cb3add21949f0f4159cbceba3ff91",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 72,
"avg_line_length": 24,
"alnum_prop": 0.6102941176470589,
"repo_name": "mlcdf/dotfiles",
"id": "5432f6b1294f2625d3a3a2f86b5f567c2b504358",
"size": "816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/suzy/fs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1409"
},
{
"name": "HTML",
"bytes": "372"
},
{
"name": "Jinja",
"bytes": "5399"
},
{
"name": "Python",
"bytes": "18798"
},
{
"name": "Shell",
"bytes": "1539"
},
{
"name": "Vim script",
"bytes": "532"
}
],
"symlink_target": ""
}
|
"""
test_faz
----------------------------------
Tests for `faz` module.
"""
import os
import glob
import time
import unittest
from faz import main, parser
from faz.task import Task, TaskFailedException
from faz.graph import CircularDependencyException
FILE = """file=file999
# Using bash as the interpreter
# file21, file22, $file <-
touch file21 file22
echo "Output from the first task"
echo $file
touch $file
# file3, file4 <- file2*, $file :force
touch file3 file4
echo "Output from the last task"
"""
FILE1 = """
# Using bash as the interpreter
# file1, file2 <-
touch file1 file2
# file3, file4 <- file1, file2
echo "Hellow world! 1" > file3
echo "Hellow world! 1" > file4
# file5, file6 <- file3, file4
echo "Hellow world! 2" > file5
echo "Hellow world! 2" > file6
# file7, file8 <- file5, file6
echo "Hellow world! 3" > file7
echo "Hellow world! 3" > file8
# Now using python as the interpreter
# file9, file10, file11 <- file5, file3 :python, force
import sys
a = [[range(3)], [range(4, 7)], [range(7, 10)]]
f = open("file11", "w")
for line in a:
f.write(" ".join([str(i) for i in line]))
f.close()
open("file9", "w").write("Hello from python\\n")
open("file10", "w").write("Hello from python\\n")
# file22, file33 <- file1, file11 :ruby
File.open("file22", 'w') { |file| file.write("Hi Ruby22!") }
File.open("file33", 'w') { |file| file.write("Hi Ruby33!") }
"""
FILE2 = """
# Using bash as the interpreter
# file1, file2 <-
touch file3 file4
touch file1 file2
# file3, file4 <- file1, file2
echo "Hellow world! 1" > file3
echo "Hellow world! 1" > file4
# file5, file6 <- file3, file4
echo "Hellow world! 2" > file5
echo "Hellow world! 2" > file6
# file7, file8 <- file5, file6
echo "Hellow world! 3" > file7
echo "Hellow world! 3" > file8
"""
FILE3 = """
# Using bash as the interpreter
# file1, file2 <-
touch file3 file4
"""
FILE4 = """
# Using bash as the interpreter
# file3, file4 <- file1, file2
touch file3 file4
"""
FILE5 = """
# Using bash as the interpreter
# file1, file2 <-
touch file5
touch file1 file2
# file3, file4 <- file1, file2
touch file3 file4
# file5 <- file3, file4
touch file5
"""
FILE6 = """
# Using bash as the interpreter
# file21, file22 <-
touch file21 file22
# file3, file4 <- file2*
touch file3 file4
"""
FILE7 = """
test = 1
a = 2
b = 3
"""
FILE8 = """
file=asdf
# Using bash as the interpreter
# file21, file22, $file <-
touch file21 file22
touch $file
# file3, file4 <- file2*, $file
touch file3 file4
# file5, file6 <- file3, file4
touch $[OUTPUT1]
touch $[OUTPUT2]
"""
FILE9 = """
# Using bash as the interpreter
# file21, file22 <- file3
touch file21 file22
# file3, file4 <- file22, file21
touch file3 file4
"""
FILE10 = """
#include: file1.txt
#include: file2.txt
# file3, file4 <- file1, file2
touch file3 file4
"""
FILE11 = """
# Using bash as the interpreter
# data/file1, data/file2 <-
mkdir data
touch data/file1 data/file2
# file3, file4 <- data/file2, data/file1
touch file3 file4
"""
class TestFaz(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
main.faz(FILE1)
def tearDown(self):
for fname in glob.glob("file*"):
os.unlink(fname)
class TestMain(unittest.TestCase):
def setUp(self):
f = open("fazfile", "w")
f.write(FILE1)
f.close()
def test_something(self):
main.main(arguments=[])
def tearDown(self):
for fname in glob.glob("file*"):
os.unlink(fname)
os.unlink("fazfile")
class TestInputFileDoesNotExist(unittest.TestCase):
def setUp(self):
pass
@unittest.expectedFailure
def test_something(self):
main.main(arguments=["nonexistent_file"])
def tearDown(self):
pass
class TestMainDebug(unittest.TestCase):
def setUp(self):
f = open("fazfile", "w")
f.write(FILE1)
f.close()
def test_something(self):
main.main(arguments=["-d"])
def tearDown(self):
for fname in glob.glob("file*"):
os.unlink(fname)
os.unlink("fazfile")
class TestMissingInput(unittest.TestCase):
def setUp(self):
pass
@unittest.expectedFailure
def test_something(self):
main.faz()
def tearDown(self):
pass
class TestMissingInputs(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
main.faz(FILE2)
def tearDown(self):
for fname in glob.glob("file*"):
os.unlink(fname)
class TestFazFileInDir(unittest.TestCase):
def setUp(self):
for fname in glob.glob(".faz/tmp*"):
os.remove(fname)
os.rmdir(".faz")
f = open(".faz", "w")
f.close()
@unittest.expectedFailure
def test_something(self):
main.faz(FILE1)
def tearDown(self):
os.unlink(".faz")
class TestOuputsNotCreated(unittest.TestCase):
def setUp(self):
pass
@unittest.expectedFailure
def test_something(self):
main.faz(FILE3)
def tearDown(self):
pass
class TestInputsDoNotExist(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
main.faz(FILE4)
def tearDown(self):
pass
class TestOutputsAreOlderThanInputs(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
main.faz(FILE5)
def tearDown(self):
pass
class TestWildcardInName(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
main.faz(FILE6)
def tearDown(self):
pass
class TestParser(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
tasks = parser.parse_input_file(FILE1)
self.failUnlessEqual(6, len(tasks))
def tearDown(self):
pass
class TestEnvironment(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
env = parser.create_environment(FILE7.splitlines())
self.failUnlessEqual(env["test"], "1")
self.failUnlessEqual(env["a"], "2")
self.failUnlessEqual(env["b"], "3")
def tearDown(self):
pass
class TestVariableExpansion(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
main.faz(FILE8)
def tearDown(self):
for fname in glob.glob("file*"):
os.unlink(fname)
os.unlink('asdf')
class TestCircularDependencyException(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
with self.assertRaises(CircularDependencyException):
main.faz(FILE9)
def tearDown(self):
pass
class TestTaskMethods(unittest.TestCase):
def setUp(self):
self.filenames = ["file1",
"file2",
"file3",
"file_1",
"file_2",
"file_3",
"file__1",
"file__2"]
self.should_not_be_present = ["file4",
"file5",
"file6",
"file7",
"file8",
"file9"]
for filename in self.should_not_be_present:
if os.path.exists(filename) and os.path.isfile(filename):
os.unlink(filename)
for filename in self.filenames:
with open(filename, "w") as f:
f.close()
self.task = Task(["file[0-3]", "file_*"],
["file[4-6]"],
["touch file4\n", "touch file5\n", "touch file6\n", "echo $[test_var]\n", "echo $test_var\n"],
["force"],
{"test_var": "test_var_value"})
def test_task(self):
self.task()
def test_code_variable_expansion(self):
self.task.expand_variables()
self.assertTrue(any([line for line in self.task.code if "test_var_value" in line]))
def test_outputs_do_not_exist(self):
task = Task(["file[0-3]", "file_*"],
["file99", "file234"],
["touch file4\n", "touch file5\n", "touch file6\n"],
["force"],
{"test_var": "test_var_value"})
with self.assertRaises(TaskFailedException):
task()
def test_return_code_is_not_0(self):
task = Task(["file[0-3]", "file_*"],
["file99", "file234"],
["touch file4\n",
"touch file5\n",
"touch file6\n",
"ls non_existant_dir\n"],
["force"],
{"test_var": "test_var_value"})
with self.assertRaises(TaskFailedException):
task()
def test_use_the_force(self):
f = open("file22", "w")
f.close()
time.sleep(0.1)
f = open("file33", "w")
f.close()
self.assertTrue(os.path.getmtime("file33") > os.path.getmtime("file22"))
task = Task(["file22"],
["file33"],
["touch file33\n"],
["force"],
{"test_var": "test_var_value"})
result = self.task.dependencies_are_newer(["file33"], ["file22"])
self.assertFalse(result)
self.assertTrue(task.inputs == ["file22"])
self.assertTrue(task.outputs == ["file33"])
self.assertTrue(task.code == ["touch file33\n"])
self.assertTrue(task.options == ["force"])
self.assertTrue(task.interpreter == "bash")
self.assertTrue(task.force)
task()
os.unlink("file22")
os.unlink("file33")
def test_files_exist(self):
self.assertTrue(self.task.files_exist(["file1", "file2", "file3"]))
def test_filename_shell_expansion(self):
results = self.task.expand_filenames(["file[0-3]", "file_?", "file__*"])
for result, filename in zip(results, self.filenames):
self.assertEqual(result, filename)
def test_filename_variable_expansion(self):
results = self.task.expand_filenames(["$test_var"])
self.assertEqual(results[0], "test_var_value")
def test_nonexistant_file(self):
results = self.task.expand_filenames(["file[4-9]"])
self.assertEqual(results[0], "NONEXISTENT")
def test_dependencies_are_newer(self):
for filename in ["old_file1", "old_file2"]:
with open(filename, "w") as f:
f.close()
time.sleep(0.1)
for filename in ["new_file1", "new_file2"]:
with open(filename, "w") as f:
f.close()
result = self.task.dependencies_are_newer(["old_file1", "old_file2"],
["new_file1", "new_file2"])
self.assertTrue(result)
[os.unlink(filename) for filename in ["old_file1", "old_file2"]]
[os.unlink(filename) for filename in ["new_file1", "new_file2"]]
def test_dependencies_are_older(self):
for filename in ["new_file1", "new_file2"]:
with open(filename, "w") as f:
f.close()
time.sleep(0.1)
for filename in ["old_file1", "old_file2"]:
with open(filename, "w") as f:
f.close()
result = self.task.dependencies_are_newer(["old_file1", "old_file2"],
["new_file1", "new_file2"])
self.assertFalse(result)
[os.unlink(filename) for filename in ["old_file1", "old_file2"]]
[os.unlink(filename) for filename in ["new_file1", "new_file2"]]
def tearDown(self):
for filename in self.filenames:
os.unlink(filename)
class TestIncludeMechanism(unittest.TestCase):
def setUp(self):
with open("file1.txt", "w") as f:
f.write("# file1 <- \ntouch file1\n")
f.close()
with open("file2.txt", "w") as f:
f.write("# file2 <- \ntouch file2\n")
f.close()
def test_includes(self):
main.faz(FILE10)
self.assertTrue(os.path.isfile("file3"))
self.assertTrue(os.path.isfile("file4"))
def tearDown(self):
for fname in ["file1", "file2", "file3", "file4", "file1.txt", "file2.txt"]:
os.unlink(fname)
class TestAbsPAth(unittest.TestCase):
def setUp(self):
pass
def test_abspath(self):
main.faz(FILE11)
self.assertTrue(os.path.isdir(os.path.abspath("data")))
self.assertTrue(os.path.isfile("file3"))
self.assertTrue(os.path.isfile("file4"))
def tearDown(self):
for fname in ["data/file1", "data/file2", "file3", "file4"]:
os.unlink(os.path.abspath(fname))
os.rmdir('data')
if __name__ == '__main__':
unittest.main(verbosity=3)
|
{
"content_hash": "00a30e5f8dc76382a52dbfdc563899e8",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 119,
"avg_line_length": 23.838532110091744,
"alnum_prop": 0.5604987684729064,
"repo_name": "hmartiniano/faz",
"id": "288523773b4238bf2c0a323ceb753c7f05dec872",
"size": "13039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_faz.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1598"
},
{
"name": "Python",
"bytes": "31759"
}
],
"symlink_target": ""
}
|
import argparse
import psycopg2
import re
class MacrobaseArgAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
arg = re.sub('_', '.', self.dest)
_attr_to_set = 'macrobase_args' if self.const is None else self.const
macrobase_args = getattr(namespace, _attr_to_set, {})
macrobase_args[arg] = values
setattr(namespace, _attr_to_set, macrobase_args)
def add_macrobase_args(parser):
parser.add_argument('--macrobase-loader-db-url', action=MacrobaseArgAction)
parser.add_argument('--macrobase-loader-db-user', action=MacrobaseArgAction)
parser.add_argument('--macrobase-loader-db-password',
action=MacrobaseArgAction)
parser.add_argument('--macrobase-analysis-kde-bandwidthMultiplier',
type=float,
action=MacrobaseArgAction)
parser.add_argument('--macrobase-analysis-transformType',
action=MacrobaseArgAction)
parser.add_argument('--macrobase-analysis-treeKde-accuracy', type=float,
action=MacrobaseArgAction)
parser.add_argument('--macrobase-analysis-treeKde-leafCapacity', type=int,
action=MacrobaseArgAction)
def add_db_args(parser):
parser.add_argument('--db-user', default='postgres')
parser.add_argument('--db-name', default='postgres')
parser.add_argument('--db-password')
parser.add_argument('--db-host', default='localhost')
parser.add_argument('--db-port', type=int)
def set_db_connection(args):
def _parse_arg(**kwarg):
[(key, value)] = kwarg.items()
if value:
return "{key}='{value}'".format(key=key, value=value)
return ""
args.db_connection = psycopg2.connect(" ".join([
_parse_arg(dbname=args.db_name),
_parse_arg(port=args.db_port),
_parse_arg(user=args.db_user),
_parse_arg(password=args.db_password),
_parse_arg(host=args.db_host)]))
def add_plot_limit_args(parser):
parser.add_argument('--y-limits', nargs=2, type=float)
parser.add_argument('--xscale')
parser.add_argument('--x-limits', nargs=2, type=float)
parser.add_argument('--xmax', type=float)
parser.add_argument('--xmin', type=float)
parser.add_argument('--yscale')
parser.add_argument('--xlabel')
parser.add_argument('--ylabel')
parser.add_argument('--ymin', type=float)
parser.add_argument('--ymax', type=float)
parser.add_argument('--title')
def set_ax_limits(ax, args):
if args.xmin:
ax.set_xlim(xmin=args.xmin)
if args.xmax:
ax.set_xlim(xman=args.xman)
if args.x_limits:
print 'setting ax limits', args.x_limits
ax.set_xlim(args.x_limits)
print ax.axis()
if args.y_limits:
ax.set_ylim(args.y_limits)
def set_plot_limits(plt, args):
print 'setting plot limits'
if args.xlabel:
plt.xlabel(args.xlabel)
if args.ylabel:
plt.ylabel(args.ylabel)
if args.xmin:
plt.xlim(xmin=args.xmin)
if args.xmax:
plt.xlim(xman=args.xman)
if args.ymin:
plt.ylim(ymin=args.ymin)
if args.ymax:
plt.ylim(ymax=args.ymax)
if args.xscale:
plt.xscale(args.xscale)
if args.yscale:
plt.yscale(args.yscale)
if args.x_limits:
plt.xlim(args.x_limits)
if args.y_limits:
plt.ylim(args.y_limits)
if args.title:
plt.title(args.title)
|
{
"content_hash": "9a55b8bc6222ded208b3801296412e48",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 78,
"avg_line_length": 31.46153846153846,
"alnum_prop": 0.6659535452322738,
"repo_name": "stanford-futuredata/macrobase",
"id": "e77ae6c3cb1194f23c3fea86cd9ec26826da656d",
"size": "3272",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/py_analysis/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "16996"
},
{
"name": "CSS",
"bytes": "985"
},
{
"name": "Dockerfile",
"bytes": "881"
},
{
"name": "HTML",
"bytes": "20671"
},
{
"name": "Java",
"bytes": "1530639"
},
{
"name": "JavaScript",
"bytes": "30435"
},
{
"name": "Jupyter Notebook",
"bytes": "72425"
},
{
"name": "Python",
"bytes": "148127"
},
{
"name": "Shell",
"bytes": "2268"
},
{
"name": "TSQL",
"bytes": "7215"
}
],
"symlink_target": ""
}
|
import threading
import serial
import time
import distutils.util
import math
from numpy import interp
import statistics
import config
class Arduino(threading.Thread):
"""docstring for Arduino"""
daemon = True
previously_sent = None
actioations_per_second = 15
time_between_ems = 30
def __init__(self):
super(Arduino, self).__init__()
self.channels = {
'ems1': {
'min_max': [20, 100],
'type': 'digipot',
'prefix': 1000,
'last_value': 0,
'ems_on_off': False,
'name': 'A1',
'color': 'green',
'serial_open': 'a',
'serial_close': 'b'
},
'ems2': {
'min_max': [20, 100],
'type': 'digipot',
'prefix': 2000,
'last_value': 0,
'ems_on_off': False,
'name': 'B1',
'color': 'red',
'serial_open': 'f',
'serial_close': 'g'
},
'ems3': {
'min_max': [20, 100],
'type': 'digipot',
'prefix': 3000,
'last_value': 0,
'ems_on_off': False,
'name': 'A2',
'color': 'blue',
'serial_open': 'c',
'serial_close': 'e'
},
'ems4': {
'min_max': [20, 100],
'type': 'digipot',
'prefix': 4000,
'last_value': 0,
'ems_on_off': False,
'name': 'B2',
'color': 'blue',
'serial_open': 'h',
'serial_close': 'i'
}
}
# 'ems3': {
# 'min_max': [20, 100],
# 'type': 'digipot',
# 'prefix': 3000,
# 'last_value': 0,
# 'ems_on_off': False,
# 'name': 'EMS3',
# 'color': 'violet',
# 'serial_open': 'b',
# 'serial_close': 'n'
# },
#'ems3': {'min_max': [20, 100], 'type': 'digipot', 'prefix': 3000, 'last_value': 0, 'ems_on_off': False, 'name': 'EMS3', 'color': 'orange'}
#'relay1': {'type': 'relay', 'state': False, 'serial_open': 'o', 'serial_close': 'c'}
self.subscribers = []
self.stop = True
self.last_sent_ems = 0
self.list_with_ems_strength = {}
self.stop_gesture = False
self.study_no_ems = False
self.arduino_value_callback = None
try:
self.ser = serial.Serial(port=config.EMS_SERIAL, baudrate=19200, timeout=0.05, writeTimeout=0)
self.no_serial = False
except:
self.no_serial = True
try:
self.ser_capacitive = serial.Serial(port=config.CAPACITIVE_SERIAL, baudrate=19200, timeout=0, writeTimeout=0)
self.no_serial_cap = False
except:
self.no_serial_cap = True
print("failed getting cap arduino...")
def stop_all(self):
self.send_value("s")
def open_all_channels(self):
for channel in self.channels.keys():
self.change_relay_state(channel, True)
def close_all_channels(self):
for channel in self.channels.keys():
self.change_relay_state(channel, False)
def perform_gesture(self, gesture, duration, ignore_channels=False):
#self.stop_gesture = False
sampled_gestures = []
for ges, val in gesture.items():
new_value = val[::int(math.ceil(len(val)/self.actioations_per_second/(duration/1000)))]
sampled_gestures.append([new_value, ges])
samples = dict()
channels = {}
for index, sampled_gesture in enumerate(sampled_gestures):
for idx, cord in enumerate(sampled_gesture[0]):
if not idx in samples:
samples[idx] = []
channels[sampled_gesture[1]] = True
samples[idx].append([int(interp(cord, [0, 100], self.channels[sampled_gesture[1]]['min_max'])), sampled_gesture[1]])
samples[idx].append([int(cord), sampled_gesture[1]])
for channel in channels:
self.change_relay_state(channel, True)
for index, val in samples.items():
final_list = {}
for thing in val:
final_list[thing[1]] = thing[0]
if not self.stop_gesture:
self.send_ems_strength(final_list)
time.sleep(1/self.actioations_per_second)
else:
break
if not ignore_channels:
stop_ems = {}
for channel in self.channels.keys():
stop_ems[channel] = 0
self.send_ems_strength(stop_ems, True)
for channel in channels:
self.change_relay_state(channel, False)
self.stop_all()
def change_relay_state(self, channel, state):
if state:
self.send_value(self.channels[channel]['serial_open'])
else:
self.send_value(self.channels[channel]['serial_close'])
self.channels[channel]['state'] = state
def calibration(self, message):
if message[1] == "reset":
self.send_value("r")
if message[1] == "ems_min_max":
if message[2] in self.channels:
self.channels[message[2]]['min_max'] = [int(message[3]), int(message[4])]
if message[1] == "ems_on_off":
self.change_relay_state(message[2], distutils.util.strtobool(message[3]))
if message[1] == "ems_value":
if message[3] and message[3].isdigit() and int(message[3]) >= 0 and int(message[3]) <= 100:
self.send_ems_strength({message[2]: int(message[3])})
else:
raise ValueError
if message[1] == "relay":
self.change_relay_state(message[2], distutils.util.strtobool(message[3]))
def send_ems_strength(self, values, force=False):
final_list = []
too_short = False
if time.time() - self.last_sent_ems < self.time_between_ems/1000 and force is not True:
too_short = True
for channel, val in sorted(values.items()):
if channel in self.channels:
new_val = int(val)
if new_val < self.channels[channel]['min_max'][0] and new_val < self.channels[channel]['min_max'][1]:
new_val = self.channels[channel]['min_max'][0]
if new_val > self.channels[channel]['min_max'][1] and new_val > self.channels[channel]['min_max'][0]:
new_val = self.channels[channel]['min_max'][1]
if not channel in self.list_with_ems_strength:
self.list_with_ems_strength[channel] = []
self.list_with_ems_strength[channel].append(int(new_val))
if not too_short:
final_list.append(str(self.channels[channel]['prefix'] + round(100 - statistics.mean(self.list_with_ems_strength[channel]))))
#final_list.append(str((self.channels[channel]['prefix']) + int(interp(val, [0,100], self.channels[channel]['min_max'][::-1]))))
else:
raise IndexError
if not too_short:
#print(final_list)
self.send_value("$" + "%$".join(final_list) + "%")
self.list_with_ems_strength = {}
self.last_sent_ems = time.time()
def send_value(self, value):
if value != self.previously_sent and not self.no_serial and not self.study_no_ems:
self.ser.write(bytes(value, "UTF-8"))
self.previously_sent = value
print(value)
def subscribe(self, callback):
self.subscribers.append(callback)
def run(self):
"""docstring for run"""
while True:
if not self.no_serial:
#print(self.ser.readline(1))
data = self.ser.readline(1024)
if data:
if self.arduino_value_callback != None:
self.arduino_value_callback(data.decode("utf-8").replace('\n', '').replace('\r', ''))
if not self.no_serial_cap:
data = self.ser_capacitive.readline(1)
if data and data != bytes("\n", "utf-8") and data != bytes("\r", "utf-8") and not self.stop:
for subscriber in self.subscribers:
subscriber(data.decode("utf-8").replace('\n', '').replace('\r', ''))
time.sleep(0.01)
|
{
"content_hash": "c62bf411924c5ae401cc4b82e189b828",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 157,
"avg_line_length": 37.02542372881356,
"alnum_prop": 0.49576562142366676,
"repo_name": "PedroLopes/affordance",
"id": "0486513973b0d9671dd4400f62a9186f4582d598",
"size": "8738",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "affordance/arduino.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "5415"
},
{
"name": "C#",
"bytes": "67526"
},
{
"name": "CSS",
"bytes": "17615"
},
{
"name": "CoffeeScript",
"bytes": "25443"
},
{
"name": "HTML",
"bytes": "15388"
},
{
"name": "JavaScript",
"bytes": "18462"
},
{
"name": "Python",
"bytes": "71031"
}
],
"symlink_target": ""
}
|
"""Support gathering ted5000 information."""
from datetime import timedelta
import logging
import requests
import voluptuous as vol
import xmltodict
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT, POWER_WATT
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "ted"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=80): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Ted5000 sensor."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
url = f"http://{host}:{port}/api/LiveData.xml"
gateway = Ted5000Gateway(url)
# Get MUT information to create the sensors.
gateway.update()
dev = []
for mtu in gateway.data:
dev.append(Ted5000Sensor(gateway, name, mtu, POWER_WATT))
dev.append(Ted5000Sensor(gateway, name, mtu, "V"))
add_entities(dev)
return True
class Ted5000Sensor(Entity):
"""Implementation of a Ted5000 sensor."""
def __init__(self, gateway, name, mtu, unit):
"""Initialize the sensor."""
units = {POWER_WATT: "power", "V": "voltage"}
self._gateway = gateway
self._name = "{} mtu{} {}".format(name, mtu, units[unit])
self._mtu = mtu
self._unit = unit
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
@property
def state(self):
"""Return the state of the resources."""
try:
return self._gateway.data[self._mtu][self._unit]
except KeyError:
pass
def update(self):
"""Get the latest data from REST API."""
self._gateway.update()
class Ted5000Gateway:
"""The class for handling the data retrieval."""
def __init__(self, url):
"""Initialize the data object."""
self.url = url
self.data = dict()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the Ted5000 XML API."""
try:
request = requests.get(self.url, timeout=10)
except requests.exceptions.RequestException as err:
_LOGGER.error("No connection to endpoint: %s", err)
else:
doc = xmltodict.parse(request.text)
mtus = int(doc["LiveData"]["System"]["NumberMTU"])
for mtu in range(1, mtus + 1):
power = int(doc["LiveData"]["Power"]["MTU%d" % mtu]["PowerNow"])
voltage = int(doc["LiveData"]["Voltage"]["MTU%d" % mtu]["VoltageNow"])
self.data[mtu] = {POWER_WATT: power, "V": voltage / 10}
|
{
"content_hash": "ed6b8f71a06abc43155bce492ae81c62",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 86,
"avg_line_length": 28.792792792792792,
"alnum_prop": 0.6182728410513142,
"repo_name": "leppa/home-assistant",
"id": "e0025a050c307d6d2f89c37f3152b825e129dd41",
"size": "3196",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ted5000/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18957740"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
"""Test the connection module."""
import datetime
import os
import sys
import time
import unittest
import warnings
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.son import SON
from bson.tz_util import utc
from pymongo.connection import (Connection,
_parse_uri)
from pymongo.database import Database
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidName,
InvalidURI,
OperationFailure)
from test import version
def get_connection(*args, **kwargs):
host = os.environ.get("DB_IP", "localhost")
port = int(os.environ.get("DB_PORT", 27017))
return Connection(host, port, *args, **kwargs)
class TestConnection(unittest.TestCase):
def setUp(self):
self.host = os.environ.get("DB_IP", "localhost")
self.port = int(os.environ.get("DB_PORT", 27017))
def test_types(self):
self.assertRaises(TypeError, Connection, 1)
self.assertRaises(TypeError, Connection, 1.14)
self.assertRaises(TypeError, Connection, "localhost", "27017")
self.assertRaises(TypeError, Connection, "localhost", 1.14)
self.assertRaises(TypeError, Connection, "localhost", [])
self.assertRaises(ConfigurationError, Connection, [])
def test_constants(self):
Connection.HOST = self.host
Connection.PORT = self.port
self.assert_(Connection())
Connection.HOST = "somedomainthatdoesntexist.org"
Connection.PORT = 123456789
self.assertRaises(ConnectionFailure, Connection)
self.assert_(Connection(self.host, self.port))
Connection.HOST = self.host
Connection.PORT = self.port
self.assert_(Connection())
def test_connect(self):
self.assertRaises(ConnectionFailure, Connection,
"somedomainthatdoesntexist.org")
self.assertRaises(ConnectionFailure, Connection, self.host, 123456789)
self.assert_(Connection(self.host, self.port))
def test_host_w_port(self):
self.assert_(Connection("%s:%d" % (self.host, self.port)))
self.assertRaises(ConnectionFailure, Connection,
"%s:1234567" % self.host, self.port)
def test_repr(self):
self.assertEqual(repr(Connection(self.host, self.port)),
"Connection('%s', %s)" % (self.host, self.port))
def test_getters(self):
self.assertEqual(Connection(self.host, self.port).host, self.host)
self.assertEqual(Connection(self.host, self.port).port, self.port)
self.assertEqual(set([(self.host, self.port)]), Connection(self.host, self.port).nodes)
def test_get_db(self):
connection = Connection(self.host, self.port)
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, connection, "")
self.assertRaises(InvalidName, make_db, connection, "te$t")
self.assertRaises(InvalidName, make_db, connection, "te.t")
self.assertRaises(InvalidName, make_db, connection, "te\\t")
self.assertRaises(InvalidName, make_db, connection, "te/t")
self.assertRaises(InvalidName, make_db, connection, "te st")
self.assert_(isinstance(connection.test, Database))
self.assertEqual(connection.test, connection["test"])
self.assertEqual(connection.test, Database(connection, "test"))
def test_database_names(self):
connection = Connection(self.host, self.port)
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_mike.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assert_("pymongo_test" in dbs)
self.assert_("pymongo_test_mike" in dbs)
def test_drop_database(self):
connection = Connection(self.host, self.port)
self.assertRaises(TypeError, connection.drop_database, 5)
self.assertRaises(TypeError, connection.drop_database, None)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assert_("pymongo_test" in dbs)
connection.drop_database("pymongo_test")
dbs = connection.database_names()
self.assert_("pymongo_test" not in dbs)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assert_("pymongo_test" in dbs)
connection.drop_database(connection.pymongo_test)
dbs = connection.database_names()
self.assert_("pymongo_test" not in dbs)
def test_copy_db(self):
c = Connection(self.host, self.port)
self.assertRaises(TypeError, c.copy_database, 4, "foo")
self.assertRaises(TypeError, c.copy_database, "foo", 4)
self.assertRaises(InvalidName, c.copy_database, "foo", "$foo")
c.pymongo_test.test.drop()
c.drop_database("pymongo_test1")
c.drop_database("pymongo_test2")
c.pymongo_test.test.insert({"foo": "bar"})
self.assertFalse("pymongo_test1" in c.database_names())
self.assertFalse("pymongo_test2" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1")
self.assert_("pymongo_test1" in c.database_names())
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.copy_database("pymongo_test", "pymongo_test2",
"%s:%s" % (self.host, self.port))
self.assert_("pymongo_test2" in c.database_names())
self.assertEqual("bar", c.pymongo_test2.test.find_one()["foo"])
if version.at_least(c, (1, 3, 3, 1)):
c.drop_database("pymongo_test1")
c.pymongo_test.add_user("mike", "password")
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="foo", password="bar")
self.assertFalse("pymongo_test1" in c.database_names())
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="mike", password="bar")
self.assertFalse("pymongo_test1" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1",
username="mike", password="password")
self.assert_("pymongo_test1" in c.database_names())
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
def test_iteration(self):
connection = Connection(self.host, self.port)
def iterate():
[a for a in connection]
self.assertRaises(TypeError, iterate)
# TODO this test is probably very dependent on the machine its running on
# due to timing issues, but I want to get something in here.
def test_low_network_timeout(self):
c = None
i = 0
n = 10
while c is None and i < n:
try:
c = Connection(self.host, self.port, network_timeout=0.0001)
except AutoReconnect:
i += 1
if i == n:
raise SkipTest()
coll = c.pymongo_test.test
for _ in range(1000):
try:
coll.find_one()
except AutoReconnect:
pass
except AssertionError:
self.fail()
def test_disconnect(self):
c = Connection(self.host, self.port)
coll = c.foo.bar
c.disconnect()
c.disconnect()
coll.count()
c.disconnect()
c.disconnect()
coll.count()
def test_parse_uri(self):
self.assertEqual(([("localhost", 27017)], None, None, None, None, {}),
_parse_uri("localhost", 27017))
self.assertEqual(([("localhost", 27018)], None, None, None, None, {}),
_parse_uri("localhost", 27018))
self.assertRaises(InvalidURI, _parse_uri,
"http://foobar.com", 27017)
self.assertRaises(InvalidURI, _parse_uri,
"http://foo@foobar.com", 27017)
self.assertEqual(([("localhost", 27017)], None, None, None, None, {}),
_parse_uri("mongodb://localhost", 27017))
self.assertEqual(([("localhost", 27017)], None, "fred", "foobar", None, {}),
_parse_uri("mongodb://fred:foobar@localhost",
27017))
self.assertEqual(([("localhost", 27017)], "baz", "fred", "foobar", None, {}),
_parse_uri("mongodb://fred:foobar@localhost/baz",
27017))
self.assertEqual(([("example1.com", 27017), ("example2.com", 27017)],
None, None, None, None, {}),
_parse_uri("mongodb://example1.com:27017,example2.com:27017",
27018))
self.assertEqual(([("localhost", 27017),
("localhost", 27018),
("localhost", 27019)], None, None, None, None, {}),
_parse_uri("mongodb://localhost,localhost:27018,localhost:27019",
27017))
self.assertEqual(([("localhost", 27018)], None, None, None, None, {}),
_parse_uri("localhost:27018", 27017))
self.assertEqual(([("localhost", 27017)], "foo", None, None, None, {}),
_parse_uri("localhost/foo", 27017))
self.assertEqual(([("localhost", 27017)], None, None, None, None, {}),
_parse_uri("localhost/", 27017))
self.assertEqual(([("localhost", 27017)], "test", None, None, "yield_historical.in", {}),
_parse_uri("mongodb://localhost/test.yield_historical.in", 27017))
self.assertEqual(([("localhost", 27017)], "test", "fred", "foobar", "yield_historical.in", {}),
_parse_uri("mongodb://fred:foobar@localhost/test.yield_historical.in",
27017))
self.assertEqual(([("example1.com", 27017), ("example2.com", 27017)],
"test", None, None, "yield_historical.in", {}),
_parse_uri("mongodb://example1.com:27017,example2.com:27017/test.yield_historical.in",
27017))
self.assertEqual(([("localhost", 27017)], "test", "fred", "foobar", "yield_historical.in", {'slaveok': 'true'}),
_parse_uri("mongodb://fred:foobar@localhost/test.yield_historical.in?slaveok=true",
27017))
def test_from_uri(self):
c = Connection(self.host, self.port)
self.assertRaises(InvalidURI, Connection, "mongodb://localhost/baz")
self.assertEqual(c, Connection("mongodb://%s:%s" %
(self.host, self.port)))
c.admin.system.users.remove({})
c.pymongo_test.system.users.remove({})
c.admin.add_user("admin", "pass")
c.pymongo_test.add_user("user", "pass")
self.assertRaises(ConfigurationError, Connection,
"mongodb://foo:bar@%s:%s" % (self.host, self.port))
self.assertRaises(ConfigurationError, Connection,
"mongodb://admin:bar@%s:%s" % (self.host, self.port))
self.assertRaises(ConfigurationError, Connection,
"mongodb://user:pass@%s:%s" % (self.host, self.port))
Connection("mongodb://admin:pass@%s:%s" % (self.host, self.port))
self.assertRaises(ConfigurationError, Connection,
"mongodb://admin:pass@%s:%s/pymongo_test" %
(self.host, self.port))
self.assertRaises(ConfigurationError, Connection,
"mongodb://user:foo@%s:%s/pymongo_test" %
(self.host, self.port))
Connection("mongodb://user:pass@%s:%s/pymongo_test" %
(self.host, self.port))
self.assert_(Connection("mongodb://%s:%s" %
(self.host, self.port),
slave_okay=True).slave_okay)
self.assert_(Connection("mongodb://%s:%s/?slaveok=true;w=2" %
(self.host, self.port)).slave_okay)
def test_fork(self):
"""Test using a connection before and after a fork.
"""
if sys.platform == "win32":
raise SkipTest()
try:
from multiprocessing import Process, Pipe
except ImportError:
raise SkipTest()
db = Connection(self.host, self.port).pymongo_test
# Failure occurs if the connection is used before the fork
db.test.find_one()
db.connection.end_request()
def loop(pipe):
while True:
try:
db.test.insert({"a": "b"}, safe=True)
for _ in db.test.find():
pass
except:
pipe.send(True)
os._exit(1)
cp1, cc1 = Pipe()
cp2, cc2 = Pipe()
p1 = Process(target=loop, args=(cc1,))
p2 = Process(target=loop, args=(cc2,))
p1.start()
p2.start()
p1.join(1)
p2.join(1)
p1.terminate()
p2.terminate()
p1.join()
p2.join()
cc1.close()
cc2.close()
# recv will only have data if the subprocess failed
try:
cp1.recv()
self.fail()
except EOFError:
pass
try:
cp2.recv()
self.fail()
except EOFError:
pass
def test_document_class(self):
c = Connection(self.host, self.port)
db = c.pymongo_test
db.test.insert({"x": 1})
self.assertEqual(dict, c.document_class)
self.assert_(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.document_class = SON
self.assertEqual(SON, c.document_class)
self.assert_(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c = Connection(self.host, self.port, document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.document_class)
self.assert_(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.document_class = dict
self.assertEqual(dict, c.document_class)
self.assert_(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
def test_network_timeout(self):
no_timeout = Connection(self.host, self.port)
timeout = Connection(self.host, self.port, network_timeout=0.1)
no_timeout.pymongo_test.drop_collection("test")
no_timeout.pymongo_test.test.insert({"x": 1}, safe=True)
where_func = """function (doc) {
var d = new Date().getTime() + 200;
var x = new Date().getTime();
while (x < d) {
x = new Date().getTime();
}
return true;
}"""
def get_x(db):
return db.test.find().where(where_func).next()["x"]
self.assertEqual(1, get_x(no_timeout.pymongo_test))
self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test)
def get_x_timeout(db, t):
return db.test.find(network_timeout=t).where(where_func).next()["x"]
self.assertEqual(1, get_x_timeout(timeout.pymongo_test, None))
self.assertRaises(ConnectionFailure, get_x_timeout,
no_timeout.pymongo_test, 0.1)
def test_tz_aware(self):
aware = Connection(self.host, self.port, tz_aware=True)
naive = Connection(self.host, self.port)
aware.pymongo_test.drop_collection("test")
now = datetime.datetime.utcnow()
aware.pymongo_test.test.insert({"x": now}, safe=True)
self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None),
naive.pymongo_test.test.find_one()["x"])
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "f68fd0fe85cfac5ec14a2eb68151ac5c",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 120,
"avg_line_length": 38.109339407744876,
"alnum_prop": 0.5581589958158996,
"repo_name": "reedobrien/mongo-python-driver",
"id": "70f2a5f59ce4fe72dea4ffa190abe01147591050",
"size": "17307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "98411"
},
{
"name": "Python",
"bytes": "469880"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, unicode_literals)
import io
import logging
from binascii import b2a_hex
from .wsgidav import util
from .wsgidav.dav_error import DAVError, HTTP_FORBIDDEN
from .wsgidav.dav_provider import DAVProvider, DAVCollection, DAVNonCollection
logger = logging.getLogger(__name__)
class ArchiveResource(DAVCollection):
def __init__(self, path, environ, archive_name):
super(ArchiveResource, self).__init__(path, environ)
self.name = archive_name
def getDisplayName(self):
return self.name
def getEtag(self):
return self.name
def supportEtag(self):
return True
def delete(self):
"""Remove this resource or collection (recursive).
See DAVResource.delete()
"""
logger.debug("Delete archive: %s", self.name)
def getMemberNames(self):
logger.warning("getMemverNames called in ArchiveResource.")
return []
class RepositoryRoot(DAVCollection):
def __init__(self, path, environ, repository):
super(RepositoryRoot, self).__init__(path, environ)
self._repository = repository
self.name = b'Repository'
# Getter methods for standard live properties
def getCreationDate(self):
return self._repository.created_at
def getDisplayName(self):
return self.name
def getDirectoryInfo(self):
return None
def getEtag(self):
return b2a_hex(self._repository.psha)
def getLastModified(self):
return self._repository.modified_at
def getMemberNames(self):
"""Return list of direct collection member names (utf-8 encoded).
See DAVCollection.getMemberNames()
"""
name_list = []
for name in self._repository.repository_names():
name_list.append(name)
logger.info("getMemberNames in repo_provider: %r", name_list)
return name_list
def getMember(self, name):
"""Return direct collection member (DAVResource or derived).
See DAVCollection.getMember()
"""
if not self._repository.has_repository(name):
return None
path = util.joinUri(self.path, name)
return ArchiveResource(path + b'/', self.environ, name)
def createCollection(self, name):
"""Create a new collection as member of self.
See DAVResource.createCollection()
"""
self._repository.create_repository(name)
def delete(self):
"""Remove this resource or collection (recursive).
See DAVResource.delete()
"""
raise DAVError(HTTP_FORBIDDEN)
class RepositoryProvider(DAVProvider):
def __init__(self, repository):
super(RepositoryProvider, self).__init__()
self.repository = repository
def _split_path(self, path):
path = path.strip(b'/')
if b'/' in path:
archive_name, rest = path.split(b'/', 1)
else:
archive_name = path
rest = b'/'
return archive_name, rest
def __repr__(self):
return "RepositoryProvider"
def isReadOnly(self):
return False
def exists(self, path, environ):
path = path.strip(b'/')
return self.repository.has_repository(path)
def getResourceInst(self, path, environ):
"""Return info dictionary for path.
See DAVProvider.getResourceInst()
"""
self._count_getResourceInst += 1
logger.info("Get resource by path: '%s'", path)
assert path == b'' or path == b'/'
return RepositoryRoot(b'', environ, self.repository)
|
{
"content_hash": "5aacf4241b28d7a4a2177dd9ae74f916",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 26.554744525547445,
"alnum_prop": 0.6261682242990654,
"repo_name": "eavatar/avax.webdav",
"id": "9e3c2a3c6ad4181af2095273af68fa16049c30b9",
"size": "3662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avax/webdav/repo_provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "697720"
}
],
"symlink_target": ""
}
|
"""Script to ensure a configuration file exists."""
import argparse
import os
from homeassistant.core import HomeAssistant
import homeassistant.config as config_util
# mypy: allow-untyped-calls, allow-untyped-defs
def run(args):
"""Handle ensure config commandline script."""
parser = argparse.ArgumentParser(
description=(
"Ensure a Home Assistant config exists, " "creates one if necessary."
)
)
parser.add_argument(
"-c",
"--config",
metavar="path_to_config_dir",
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument("--script", choices=["ensure_config"])
args = parser.parse_args()
config_dir = os.path.join(os.getcwd(), args.config)
# Test if configuration directory exists
if not os.path.isdir(config_dir):
print("Creating directory", config_dir)
os.makedirs(config_dir)
hass = HomeAssistant()
config_path = hass.loop.run_until_complete(async_run(hass, config_dir))
print("Configuration file:", config_path)
return 0
async def async_run(hass, config_dir):
"""Make sure config exists."""
path = await config_util.async_ensure_config_exists(hass, config_dir)
await hass.async_stop(force=True)
return path
|
{
"content_hash": "88b91aaa0279abfa75606c2fc1b086a6",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 81,
"avg_line_length": 28.851063829787233,
"alnum_prop": 0.668141592920354,
"repo_name": "joopert/home-assistant",
"id": "c5cf69283e6f85b2227fb9e7772e374b007a2f48",
"size": "1356",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/scripts/ensure_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18670593"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
import functools
from .. import lists
from .. import documents
from .. import results
def _read_notes(note_type, element, body_reader):
def read_notes_xml_element(element):
note_elements = lists.filter(
_is_note_element,
element.find_children("w:" + note_type),
)
return results.combine(lists.map(_read_note_element, note_elements))
def _is_note_element(element):
return element.attributes.get("w:type") not in ["continuationSeparator", "separator"]
def _read_note_element(element):
return body_reader.read_all(element.children).map(lambda body:
documents.note(
note_type=note_type,
note_id=element.attributes["w:id"],
body=body
))
return read_notes_xml_element(element)
read_footnotes_xml_element = functools.partial(_read_notes, "footnote")
read_endnotes_xml_element = functools.partial(_read_notes, "endnote")
|
{
"content_hash": "0a1b35347c7bf3731a1c2ff7d6cc2785",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 93,
"avg_line_length": 30.59375,
"alnum_prop": 0.634320735444331,
"repo_name": "mwilliamson/python-mammoth",
"id": "5e4ea4e92443565463213f35e1d12f1d1bd00569",
"size": "979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mammoth/docx/notes_xml.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1060"
},
{
"name": "Python",
"bytes": "306218"
}
],
"symlink_target": ""
}
|
import sys
import os
import mmap
import subprocess
from array import array
import logging
import logging.handlers
from datetime import datetime
from ConfigParser import SafeConfigParser
try:
import cPickle as pickle
except:
import pickle
# This is necessary because a MutationObject will
# be unserialized from the server.
# It needs to exist in this namespace
from helpers.common import MutationObject
from helpers.utils import Utils
from helpers.crash_analysis import CrashAnalysis
from helpers.rpc_client import BrundleFuzzRpcClient
from helpers.fileops import FileOperations
from helpers.aesthetics import Aesthetics
# Some nice named constants
CAUSED_CRASH = 3
class BrundleFuzzClient(object):
def __init__(self):
self.debug = False
self.root_dir = os.path.dirname(os.path.abspath(__file__))
self.mutations_dir = os.path.join(self.root_dir, 'mutations')
self.hangs_dir = os.path.join(self.root_dir, 'hangs')
self.cfg = self._initialize_config()
self.ml = self._initialize_logging()
self.mo = None
# Shared memory
self.shm = None
self.shm_size = 0
self.bitmap_size = 65536
# PIN command line
self.cmd_l = []
# Setup helpers
self.ae = Aesthetics(self)
self.utils = Utils(self)
self.fileops = FileOperations(self)
self.crash_analysis = CrashAnalysis(self)
self.rpc_client = BrundleFuzzRpcClient(self)
self._initialize_shared_memory()
self._initialize_pin_cmd()
def _initialize_config(self):
"""
This config will be shared with helper
modules via the parent attribute
"""
cfg = SafeConfigParser()
cfg.read('config.ini')
return cfg
def _initialize_logging(self):
"""
Printing to console is dirty
"""
main_logger = logging.getLogger('main')
log_filename = os.path.join('logs', 'log.txt')
main_logger.setLevel(logging.DEBUG)
# 5 rotating logs of 1 MB each
handler = logging.handlers.RotatingFileHandler(
log_filename,
maxBytes = 1024 * 1024,
backupCount = 1
)
main_logger.addHandler(handler)
return main_logger
def _initialize_shared_memory(self):
"""
This is the IPC channel between us (Python)
and the PinTool (C/C++)
"""
s_uint32 = self.utils.get_size_uint32()
shm_name = "Local\\NaFlSharedMemory"
self.shm_size = self.bitmap_size * s_uint32 # architecture dependent :)
self.shm = mmap.mmap(0,
self.shm_size,
shm_name,
access = mmap.ACCESS_WRITE)
if not self.shm:
# Oops!
self.ml.info('[!] Could not create the shared memory region')
self.ml.info('[!] Aborting...')
sys.exit(1)
def _initialize_pin_cmd(self):
"""
Initializes fuzzing parameters with
information stored in a config file
"""
self.cmd_l.append(self.cfg.get('pin_info', 'pin_bat'))
self.cmd_l.append('-t')
self.cmd_l.append(self.cfg.get('pin_info', 'pintool'))
self.cmd_l.append('-timer')
self.cmd_l.append(self.cfg.get('pin_info', 'timeout'))
self.cmd_l.append('-module')
self.cmd_l.append(self.cfg.get('target_info', 'module').lower())
self.cmd_l.append('--')
self.cmd_l.append(self.cfg.get('target_info', 'filename'))
# Parse the cmd options
try:
_options = self.cfg.get('target_info', 'cmd_options')
for _cmd in _options.split():
self.cmd_l.append(_options.split())
except NoOptionError:
self.ml.info('[.] No command line options found.')
self.debug = self.cfg.getboolean('runtime', 'debug')
def _run_under_pin(self, input_filename):
"""
Runs the given file under PIN and
gets the bitmap representing execution
@returns: current execution bitmap
"""
self.cmd_l.append(input_filename)
subprocess.call(self.cmd_l, shell = False)
self.cmd_l.pop() # remove the filename from cmd :)
# The PinTool has written its feedback into
# the shared memory. Time to read it.
self.shm.seek(0) # file-like interface
# This coerces somehow the bitmap to an array of ulong's
curr_bitmap = array('L', self.shm.read(self.shm_size)) # C ulong (4 bytes)
return curr_bitmap
def _fuzzing_loop(self):
"""
Fuzzing Loop.
This loops (maybe indefinitely) creating several
fuzzing processes
"""
iteration_nr = 0
while True:
# subprocess.call() is blocking, exactly what I need :)
# Execution continues when this subprocess returns, either:
# * instrumented process exits
# * instrumented process crashes
# * timeout expires (implemented in PinTool)
if iteration_nr % 10 == 0:
self.ae.m_info("* Iteration #%d" % iteration_nr)
self.ae.m_info("* PLACEHOLDER. PERIODIC MAINTENANCE PROCESSES")
iteration_nr += 1
continue
# Mutation objects are read from the queue
smo = self.rpc_client.poll_mutation_queue()
self.mo = pickle.loads(smo)
if self.mo:
input_filename = self.mo.filename
data = self.mo.data
input_path_filename = os.path.join(self.mutations_dir, input_filename)
with open(input_path_filename, 'wb') as f:
f.write(data)
# Run with the newly created file unde PIN
curr_bitmap = self._run_under_pin(input_path_filename)
else:
self.ae.m_alert("Problem getting MutationObject from server")
self.ae.m_alert("Continuing...")
continue
#####################################################
# Check if this was a crash on client side
# This way I can analyze it inmediately
#####################################################
if curr_bitmap[0] == 0x41414141 \
and curr_bitmap[1] == 0x42424242:
# Restore these first bytes to more appropriate values
curr_bitmap[0] = 0
curr_bitmap[1] = 0
self.ml.info('**** CRASH ****' * 4)
self.ml.info(input_filename)
self.mo.priority == CAUSED_CRASH
# Analyzes the crash (and saves it, if determined interesting)
# This sets the MutationObject crash_data attribute
cmd = [self.cfg.get('target_info', 'filename'), input_filename]
self.crash_analysis.analyze_crash(cmd)
# The bitmap regarding the current execution
self.mo.arr = curr_bitmap
# Delete the temporary file from disk
if os.path.exists(input_path_filename):
os.remove(input_path_filename)
# Information is sent back to the server
self.rpc_client.send_evaluation(self.mo)
iteration_nr += 1
def run(self):
"""
This prepares the run and starts the fuzzing loop
"""
victim_filename = self.cfg.get('target_info', 'filename')
self.ml.info("")
self.ml.info("=" * 80)
self.ml.info("Fuzzing initiated from the command line.")
self.ml.info("Started fuzzing: %s" % victim_filename)
self.ml.info("Timestamp: %s" % str(datetime.now()))
try:
self._fuzzing_loop() # never returns
except KeyboardInterrupt:
self.ae.m_alert("""
============================================
=== ===
=== Fuzzing cancelled by user (Ctrl + C) ===
=== Exiting... ===
=== ===
============================================
""")
self.rpc_client.connection.close()
sys.exit(1)
def main():
"""
This must be kept to the bare minimum
"""
bf = BrundleFuzzClient()
bf.run()
if __name__ == '__main__':
main()
|
{
"content_hash": "141babdbb972a8faa7f602e878970f24",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 86,
"avg_line_length": 32.142857142857146,
"alnum_prop": 0.5430409356725147,
"repo_name": "carlosgprado/BrundleFuzz",
"id": "cd9408bb2044385ea443e88c7c05458c4d785cf1",
"size": "8808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client_windows/BrundleFuzzClient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "673"
},
{
"name": "C++",
"bytes": "18662"
},
{
"name": "CSS",
"bytes": "6516"
},
{
"name": "Groff",
"bytes": "12"
},
{
"name": "HTML",
"bytes": "7703"
},
{
"name": "JavaScript",
"bytes": "3006"
},
{
"name": "Python",
"bytes": "120670"
},
{
"name": "Shell",
"bytes": "425"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import subprocess
base_dir = os.path.join(os.path.dirname(__file__), '..')
os.chdir(base_dir)
p = subprocess.Popen("python setup.py sdist".split(),
stdout=subprocess.PIPE)
out, err = p.communicate()
data = out.decode('utf-8').split('\n')
data = [l for l in data if l.startswith('hard linking')]
data = [l.replace('hard linking ', '') for l in data]
data = ['./' + l.split(' ->')[0] for l in data]
ignore_exts = ['.pyc', '.so', '.o', '#', '~']
ignore_dirs = ['./dist', './tools', './doc']
ignore_files = ['./TODO.md', './README.md',
'./run_script_in_container.py', './.gitignore',
'./.travis.yml']
missing = []
for root, dirs, files in os.walk('./'):
for d in ignore_dirs:
if root.startswith(d):
break
else:
if root.startswith('./.'):
continue
for fn in files:
for ext in ignore_exts:
if fn.endswith(ext):
break
else:
fn = os.path.join(root, fn)
if not (fn in data or fn in ignore_files):
missing.append(fn)
if missing:
print('Missing from source distribution:\n')
for m in missing:
print(' ', m)
|
{
"content_hash": "e8509d17033a349b3b0c4ac790fcaf87",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 63,
"avg_line_length": 26.9375,
"alnum_prop": 0.5212683681361175,
"repo_name": "mltsp/mltsp",
"id": "df9dfc792ea40fbefc1e871196b0c314a088071b",
"size": "1316",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/check_sdist.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "14826"
},
{
"name": "CSS",
"bytes": "133353"
},
{
"name": "HTML",
"bytes": "2366019"
},
{
"name": "JavaScript",
"bytes": "2508582"
},
{
"name": "Makefile",
"bytes": "2695"
},
{
"name": "Nginx",
"bytes": "789"
},
{
"name": "PHP",
"bytes": "53129"
},
{
"name": "Python",
"bytes": "520802"
},
{
"name": "Shell",
"bytes": "1694"
}
],
"symlink_target": ""
}
|
import importlib
import os
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("fairseq.model_parallel.models." + model_name)
|
{
"content_hash": "49086aa6cd13fd094cc36848dedfe87c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 87,
"avg_line_length": 34.86666666666667,
"alnum_prop": 0.6462715105162524,
"repo_name": "pytorch/fairseq",
"id": "3532479e52a0e1f1ba204c6f5d51c71c98ee5df0",
"size": "701",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "fairseq/model_parallel/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21106"
},
{
"name": "Cuda",
"bytes": "38166"
},
{
"name": "Cython",
"bytes": "13294"
},
{
"name": "Lua",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "3699357"
},
{
"name": "Shell",
"bytes": "2182"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from app.openapi_server.models.base_model_ import Model
from openapi_server import util
class Link(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, _class: str=None, href: str=None): # noqa: E501
"""Link - a model defined in Swagger
:param _class: The _class of this Link. # noqa: E501
:type _class: str
:param href: The href of this Link. # noqa: E501
:type href: str
"""
self.swagger_types = {
'_class': str,
'href': str
}
self.attribute_map = {
'_class': '_class',
'href': 'href'
}
self.__class = _class
self._href = href
@classmethod
def from_dict(cls, dikt) -> 'Link':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Link of this Link. # noqa: E501
:rtype: Link
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self) -> str:
"""Gets the _class of this Link.
:return: The _class of this Link.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""Sets the _class of this Link.
:param _class: The _class of this Link.
:type _class: str
"""
self.__class = _class
@property
def href(self) -> str:
"""Gets the href of this Link.
:return: The href of this Link.
:rtype: str
"""
return self._href
@href.setter
def href(self, href: str):
"""Sets the href of this Link.
:param href: The href of this Link.
:type href: str
"""
self._href = href
|
{
"content_hash": "94477f94be5fa2cf54f775f1d63627c6",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 80,
"avg_line_length": 22.568181818181817,
"alnum_prop": 0.5332326283987915,
"repo_name": "cliffano/swaggy-jenkins",
"id": "7f3f0c56a3c121e8c4a8e0e4a3b0233ea45568e4",
"size": "2003",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "clients/python-blueplanet/generated/app/openapi_server/models/link.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "569823"
},
{
"name": "Apex",
"bytes": "741346"
},
{
"name": "Batchfile",
"bytes": "14792"
},
{
"name": "C",
"bytes": "971274"
},
{
"name": "C#",
"bytes": "5131336"
},
{
"name": "C++",
"bytes": "7799032"
},
{
"name": "CMake",
"bytes": "20609"
},
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Clojure",
"bytes": "129018"
},
{
"name": "Crystal",
"bytes": "864941"
},
{
"name": "Dart",
"bytes": "876777"
},
{
"name": "Dockerfile",
"bytes": "7385"
},
{
"name": "Eiffel",
"bytes": "424642"
},
{
"name": "Elixir",
"bytes": "139252"
},
{
"name": "Elm",
"bytes": "187067"
},
{
"name": "Emacs Lisp",
"bytes": "191"
},
{
"name": "Erlang",
"bytes": "373074"
},
{
"name": "F#",
"bytes": "556012"
},
{
"name": "Gherkin",
"bytes": "951"
},
{
"name": "Go",
"bytes": "345227"
},
{
"name": "Groovy",
"bytes": "89524"
},
{
"name": "HTML",
"bytes": "2367424"
},
{
"name": "Haskell",
"bytes": "680841"
},
{
"name": "Java",
"bytes": "12164874"
},
{
"name": "JavaScript",
"bytes": "1959006"
},
{
"name": "Kotlin",
"bytes": "1280953"
},
{
"name": "Lua",
"bytes": "322316"
},
{
"name": "Makefile",
"bytes": "11882"
},
{
"name": "Nim",
"bytes": "65818"
},
{
"name": "OCaml",
"bytes": "94665"
},
{
"name": "Objective-C",
"bytes": "464903"
},
{
"name": "PHP",
"bytes": "4383673"
},
{
"name": "Perl",
"bytes": "743304"
},
{
"name": "PowerShell",
"bytes": "678274"
},
{
"name": "Python",
"bytes": "5529523"
},
{
"name": "QMake",
"bytes": "6915"
},
{
"name": "R",
"bytes": "840841"
},
{
"name": "Raku",
"bytes": "10945"
},
{
"name": "Ruby",
"bytes": "328360"
},
{
"name": "Rust",
"bytes": "1735375"
},
{
"name": "Scala",
"bytes": "1387368"
},
{
"name": "Shell",
"bytes": "407167"
},
{
"name": "Swift",
"bytes": "342562"
},
{
"name": "TypeScript",
"bytes": "3060093"
}
],
"symlink_target": ""
}
|
import numpy as np
from PIL import Image
data = np.fromfile('data/float32.dat', dtype=np.float32)
data = data.reshape((360,720))
Image.fromarray(data*10**7).show()
|
{
"content_hash": "b95a4929bcde2a9d5440736b901be836",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 56,
"avg_line_length": 27.5,
"alnum_prop": 0.7333333333333333,
"repo_name": "jsheedy/velotronheavyindustries.com",
"id": "ee005637e6a7388aa2bfb8b3664c054e428a1d5d",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intro-to-d3-grid-map/bin/load_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "398"
},
{
"name": "CSS",
"bytes": "2980"
},
{
"name": "HTML",
"bytes": "88205"
},
{
"name": "JavaScript",
"bytes": "1293"
}
],
"symlink_target": ""
}
|
"""Lookup operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_lookup_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
# TODO(yleon): Remove this function.
@deprecated("2017-03-02", "Use `tf.tables_initializer` instead.")
def initialize_all_tables(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
return tables_initializer(name)
def tables_initializer(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
initializers = ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS)
if initializers:
return control_flow_ops.group(*initializers, name=name)
return control_flow_ops.no_op(name=name)
def _check_table_dtypes(table, key_dtype, value_dtype):
"""Check that the given key_dtype and value_dtype matches the table dtypes.
Args:
table: The table to check types against to.
key_dtype: The key data type to check.
value_dtype: The value data type to check.
Raises:
TypeError: when 'key_dtype' or 'value_dtype' doesn't match the table data
types.
"""
if key_dtype != table.key_dtype:
raise TypeError("Invalid key dtype, expected %s but got %s." %
(table.key_dtype, key_dtype))
if value_dtype != table.value_dtype:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(table.value_dtype, value_dtype))
class LookupInterface(object):
"""Represent a lookup table that persists across different steps."""
def __init__(self, key_dtype, value_dtype, name):
"""Construct a lookup table interface.
Args:
key_dtype: The table key type.
value_dtype: The table value type.
name: A name for the operation (optional).
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
self._name = name
@property
def key_dtype(self):
"""The table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The table value dtype."""
return self._value_dtype
@property
def name(self):
"""The name of the table."""
return self._name
@property
def init(self):
"""The table initialization op."""
raise NotImplementedError
def size(self, name=None):
"""Compute the number of elements in this table."""
raise NotImplementedError
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values."""
raise NotImplementedError
class InitializableLookupTableBase(LookupInterface):
"""Initializable lookup table interface.
An initializable lookup tables persist across different steps.
"""
def __init__(self, table_ref, default_value, initializer):
"""Construct a table object from a table reference.
If requires a table initializer object (subclass of `TableInitializerBase`).
It provides the table key and value types, as well as the op to initialize
the table. The caller is responsible to execute the initialization op.
Args:
table_ref: The table reference, i.e. the output of the lookup table ops.
default_value: The value to use if a key is missing in the table.
initializer: The table initializer to use.
"""
super(InitializableLookupTableBase,
self).__init__(initializer.key_dtype, initializer.value_dtype,
table_ref.op.name.split("/")[-1])
self._table_ref = table_ref
self._default_value = ops.convert_to_tensor(
default_value, dtype=self._value_dtype)
self._default_value.get_shape().merge_with(tensor_shape.scalar())
self._init = initializer.initialize(self)
@property
def table_ref(self):
"""Get the underlying table reference."""
return self._table_ref
@property
def default_value(self):
"""The default value of the table."""
return self._default_value
@property
def init(self):
"""The table initialization op."""
return self._init
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as scope:
# pylint: disable=protected-access
return gen_lookup_ops._lookup_table_size_v2(self._table_ref, name=scope)
# pylint: enable=protected-access
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: A name for the operation (optional).
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` or `default_value` doesn't match the table data
types.
"""
key_tensor = keys
if isinstance(keys, sparse_tensor.SparseTensor):
key_tensor = keys.values
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(name, "%s_Lookup" % self._name,
(self._table_ref, key_tensor,
self._default_value)) as scope:
# pylint: disable=protected-access
values = gen_lookup_ops._lookup_table_find_v2(
self._table_ref, key_tensor, self._default_value, name=scope)
# pylint: enable=protected-access
values.set_shape(key_tensor.get_shape())
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, values, keys.dense_shape)
else:
return values
class HashTable(InitializableLookupTableBase):
"""A generic hash table implementation.
Example usage:
```python
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values), -1)
out = table.lookup(input_tensor).
table.init.run()
print out.eval()
```
"""
def __init__(self, initializer, default_value, shared_name=None, name=None):
"""Creates a non-initialized `HashTable` object.
Creates a table, the type of its keys and values are specified by the
initializer.
Before using the table you will have to initialize it. After initialization
the table will be immutable.
Args:
initializer: The table initializer to use. See `HashTable` kernel for
supported key and value types.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
Returns:
A `HashTable` object.
"""
with ops.name_scope(name, "hash_table", (initializer,
default_value)) as scope:
# pylint: disable=protected-access
table_ref = gen_lookup_ops._hash_table_v2(
shared_name=shared_name,
key_dtype=initializer.key_dtype,
value_dtype=initializer.value_dtype,
name=scope)
# pylint: enable=protected-access
super(HashTable, self).__init__(table_ref, default_value, initializer)
class TableInitializerBase(object):
"""Base class for lookup table initializers."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
raise NotImplementedError
class KeyValueTensorInitializer(TableInitializerBase):
"""Table initializers given `keys` and `values` tensors."""
def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):
"""Constructs a table initializer object based on keys and values tensors.
Args:
keys: The tensor for the keys.
values: The tensor for the values.
key_dtype: The `keys` data type. Used when `keys` is a python array.
value_dtype: The `values` data type. Used when `values` is a python array.
name: A name for the operation (optional).
"""
with ops.name_scope(name, "key_value_init", [keys, values]) as scope:
self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys")
self._values = ops.convert_to_tensor(
values, dtype=value_dtype, name="values")
self._name = scope
super(KeyValueTensorInitializer, self).__init__(self._keys.dtype,
self._values.dtype)
def initialize(self, table):
"""Initializes the given `table` with `keys` and `values` tensors.
Args:
table: The table to initialize.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self._keys.dtype, self._values.dtype)
with ops.name_scope(
self._name, values=(table.table_ref, self._keys,
self._values)) as scope:
# pylint: disable=protected-access
init_op = gen_lookup_ops._initialize_table_v2(
table.table_ref, self._keys, self._values, name=scope)
# pylint: enable=protected-access
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
class TextFileIndex(object):
WHOLE_LINE = -2
LINE_NUMBER = -1
class TextFileInitializer(TableInitializerBase):
"""Table initializers from a text file.
This initializer assigns one entry in the table for each line in the file.
The key and value type of the table to initialize is given by `key_dtype` and
`value_dtype`.
The key and value content to get from each line is specified by
the `key_index` and `value_index`.
* `TextFileIndex.LINE_NUMBER` means use the line number starting from zero,
expects data type int64.
* `TextFileIndex.WHOLE_LINE` means use the whole line content, expects data
type string.
* A value `>=0` means use the index (starting at zero) of the split line based
on `delimiter`.
For example if we have a file with the following content:
```
emerson 10
lake 20
palmer 30
```
The following snippet initializes a table with the first column as keys and
second column as values:
* `emerson -> 10`
* `lake -> 20`
* `palmer -> 30`
```python
table = tf.contrib.lookup.HashTable(tf.contrib.lookup.TextFileInitializer(
"test.txt", tf.string, 0, tf.int64, 1, delimiter=" "), -1)
...
table.init.run()
```
Similarly to initialize the whole line as keys and the line number as values.
* `emerson 10 -> 0`
* `lake 20 -> 1`
* `palmer 30 -> 2`
```python
table = tf.contrib.lookup.HashTable(tf.contrib.lookup.TextFileInitializer(
"test.txt", tf.string, tf.contrib.lookup.TextFileIndex.WHOLE_LINE,
tf.int64, tf.contrib.lookup.TextFileIndex.LINE_NUMBER, delimiter=" "), -1)
...
table.init.run()
```
"""
def __init__(self,
filename,
key_dtype,
key_index,
value_dtype,
value_index,
vocab_size=None,
delimiter="\t",
name=None):
"""Constructs a table initializer object to populate from a text file.
It generates one key-value pair per line. The type of table key and
value are specified by `key_dtype` and `value_dtype`, respectively.
Similarly the content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_dtype: The `key` data type.
key_index: the index that represents information of a line to get the
table 'key' values from.
value_dtype: The `value` data type.
value_index: the index that represents information of a line to get the
table 'value' values from.'
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: A name for the operation (optional).
Raises:
ValueError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
if not isinstance(filename, ops.Tensor) and not filename:
raise ValueError("Filename required for %s." % name)
key_dtype = dtypes.as_dtype(key_dtype)
value_dtype = dtypes.as_dtype(value_dtype)
if key_index < -2:
raise ValueError("Invalid key index %s." % (key_index))
if key_index == TextFileIndex.LINE_NUMBER and key_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Keys must be dtype %s, got %s." %
(dtypes.int64, key_dtype))
if ((key_index == TextFileIndex.WHOLE_LINE) and
(not key_dtype.is_integer) and (key_dtype != dtypes.string)):
raise ValueError(
"Signature mismatch. Keys must be integer or string, got %s." %
key_dtype)
if value_index < -2:
raise ValueError("Invalid value index %s." % (value_index))
if value_index == TextFileIndex.LINE_NUMBER and value_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.int64, value_dtype))
if value_index == TextFileIndex.WHOLE_LINE and value_dtype != dtypes.string:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.string, value_dtype))
if (vocab_size is not None) and (vocab_size <= 0):
raise ValueError("Invalid vocab_size %s." % vocab_size)
self._filename = filename
self._key_index = key_index
self._value_index = value_index
self._vocab_size = vocab_size
self._delimiter = delimiter
self._name = name
super(TextFileInitializer, self).__init__(key_dtype, value_dtype)
def initialize(self, table):
"""Initializes the table from a text file.
Args:
table: The table to be initialized.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self.key_dtype, self.value_dtype)
with ops.name_scope(self._name, "text_file_init",
(table.table_ref,)) as scope:
filename = ops.convert_to_tensor(
self._filename, dtypes.string, name="asset_filepath")
# pylint: disable=protected-access
init_op = gen_lookup_ops._initialize_table_from_text_file_v2(
table.table_ref,
filename,
self._key_index,
self._value_index,
-1 if self._vocab_size is None else self._vocab_size,
self._delimiter,
name=scope)
# pylint: enable=protected-access
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
# If the filename tensor is anything other than a string constant (e.g., if
# it is a placeholder) then it does not make sense to track it as an asset.
if constant_op.is_constant(filename):
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)
return init_op
class TextFileStringTableInitializer(TextFileInitializer):
"""Table initializer for `int64` IDs to string tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.LINE_NUMBER,
value_column_index=TextFileIndex.WHOLE_LINE,
vocab_size=None,
delimiter="\t",
name="text_file_string_table_init"):
"""Constructs an initializer for an id-to-string table from a text file.
It populates a table that its key and value types are int64 and string,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by `key_column_index`
and `value_column_index`.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the keys
from. The default is 0 that represents the whole line content.
value_column_index: The column index from the text file to get the
values from. The default is to use the line number, starting from zero.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileStringTableInitializer, self).__init__(
filename,
dtypes.int64,
key_column_index,
dtypes.string,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class TextFileIdTableInitializer(TextFileInitializer):
"""Table initializer for string to `int64` IDs tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.WHOLE_LINE,
value_column_index=TextFileIndex.LINE_NUMBER,
vocab_size=None,
delimiter="\t",
name="text_file_id_table_init",
key_dtype=dtypes.string):
"""Constructs an initializer for an string-to-id table from a text file.
It populates a table that its key and value types are string and int64,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the `key`
values from. The default is to use the line number, starting from zero.
value_column_index: The column index from the text file ro get the `value`
values from. The default is 0 that represents the whole line content.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
key_dtype: The `key` data type.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileIdTableInitializer, self).__init__(
filename,
key_dtype,
key_column_index,
dtypes.int64,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class HasherSpec(collections.namedtuple("HasherSpec", ["hasher", "key"])):
"""A structure for the spec of the hashing function to use for hash buckets.
`hasher` is the name of the hashing function to use (eg. "fasthash",
"stronghash").
`key` is optional and specify the key to use for the hash function if
supported, currently only used by a strong hash.
Fields:
hasher: The hasher name to use.
key: The key to be used by the hashing function, if required.
"""
__slots__ = ()
FastHashSpec = HasherSpec("fasthash", None) # pylint: disable=invalid-name
class StrongHashSpec(HasherSpec):
"""A structure to specify a key of the strong keyed hash spec.
The strong hash requires a `key`, which is a list of 2 unsigned integer
numbers. These should be non-zero; random numbers generated from random.org
would be a fine choice.
Fields:
key: The key to be used by the keyed hashing function.
"""
__slots__ = ()
def __new__(cls, key):
if len(key) != 2:
raise ValueError("key must have size 2, got %s." % len(key))
if not isinstance(key[0], compat.integral_types) or not isinstance(
key[1], compat.integral_types):
raise TypeError("Invalid key %s. Must be unsigned integer values." % key)
return super(cls, StrongHashSpec).__new__(cls, "stronghash", key)
def _as_string(tensor):
if dtypes.string == tensor.dtype.base_dtype:
return tensor
return string_ops.as_string(tensor)
class IdTableWithHashBuckets(LookupInterface):
"""String to Id table wrapper that assigns out-of-vocabulary keys to buckets.
For example, if an instance of `IdTableWithHashBuckets` is initialized with a
string-to-id table that maps:
- emerson -> 0
- lake -> 1
- palmer -> 2
The `IdTableWithHashBuckets` object will performs the following mapping:
- emerson -> 0
- lake -> 1
- palmer -> 2
- <other term> -> bucket id between 3 and 3 + num_oov_buckets - 1, calculated
by: hash(<term>) % num_oov_buckets + vocab_size
If input_tensor is ["emerson", "lake", "palmer", "king", "crimson"],
the lookup result is [0, 1, 2, 4, 7]
If `table` is None, only out-of-vocabulary buckets are used.
Example usage:
```python
num_oov_buckets = 3
input_tensor = tf.constant(["emerson", "lake", "palmer", "king", "crimnson"])
table = tf.IdTableWithHashBuckets(
tf.HashTable(tf.TextFileIdTableInitializer(filename), default_value),
num_oov_buckets)
out = table.lookup(input_tensor).
table.init.run()
print out.eval()
```
The hash function used for generating out-of-vocabulary buckets ID is handled
by `hasher_spec`.
"""
def __init__(self,
table,
num_oov_buckets,
hasher_spec=FastHashSpec,
name=None,
key_dtype=None):
"""Construct a `IdTableWithHashBuckets` object.
Args:
table: Table that maps `tf.string` or `tf.int64` keys to `tf.int64` ids.
num_oov_buckets: Number of buckets to use for out-of-vocabulary keys.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets (optional).
name: A name for the operation (optional).
key_dtype: Data type of keys passed to `lookup`. Defaults to
`table.key_dtype` if `table` is specified, otherwise `tf.string`.
Must be string or integer, and must be castable to `table.key_dtype`.
Raises:
ValueError: when `table` in None and `num_oov_buckets` is not positive.
TypeError: when `hasher_spec` is invalid.
"""
# If a name ends with a '/' it is a "name scope", remove all trailing '/'
# characters to use as table name.
if name:
name = name.rstrip("/")
if table:
if key_dtype is None:
key_dtype = table.key_dtype
supported_table_key_dtypes = (dtypes.int64, dtypes.string)
if table.key_dtype not in supported_table_key_dtypes:
raise TypeError("Invalid key dtype, expected one of %s, but got %s." %
(supported_table_key_dtypes, key_dtype))
if table.key_dtype.is_integer != key_dtype.is_integer:
raise TypeError("Invalid key dtype, expected %s but got %s." %
("integer" if key_dtype.is_integer else "non-integer",
table.key_dtype))
if table.value_dtype != dtypes.int64:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(dtypes.int64, table.value_dtype))
self._table = table
name = name or self._table.name
else:
if num_oov_buckets <= 0:
raise ValueError("oov_buckets must be > 0 if no table is supplied.")
key_dtype = dtypes.string if key_dtype is None else key_dtype
self._table = None
name = name or "hash_bucket"
if (not key_dtype.is_integer) and (dtypes.string != key_dtype):
raise TypeError(
"Invalid key_dtype, expected integer or string, got %s." % key_dtype)
self._num_oov_buckets = num_oov_buckets
if not isinstance(hasher_spec, HasherSpec):
raise TypeError(
"hasher_spec must be of type HasherSpec, got %s" % hasher_spec)
self._hasher_spec = hasher_spec
super(IdTableWithHashBuckets, self).__init__(key_dtype, dtypes.int64,
name.split("/")[-1])
@property
def init(self):
"""The table initialization op."""
if self._table:
return self._table.init
with ops.name_scope(None, "init"):
return control_flow_ops.no_op()
def size(self, name=None):
"""Compute the number of elements in this table."""
with ops.name_scope(name, "%s_Size" % self.name) as scope:
if self._table:
tsize = self._table.size(scope)
else:
tsize = ops.convert_to_tensor(0, dtype=dtypes.int64)
return tsize + self._num_oov_buckets
def _get_string_to_hash_bucket_fn(self, hasher_spec):
"""Returns the string_to_hash_bucket op to use based on `hasher_spec`."""
if not isinstance(hasher_spec, HasherSpec):
raise TypeError("hasher_spec must be of type HasherSpec %s" % hasher_spec)
if hasher_spec.hasher == "fasthash":
return string_ops.string_to_hash_bucket_fast
if hasher_spec.hasher == "legacy":
return string_ops.string_to_hash_bucket
if hasher_spec.hasher == "stronghash":
return functools.partial(
string_ops.string_to_hash_bucket_strong, key=hasher_spec.key)
raise ValueError("Unknown hasher %s" % hasher_spec.hasher)
def lookup(self, keys, name=None):
"""Looks up `keys` in the table, outputs the corresponding values.
It assigns out-of-vocabulary keys to buckets based in their hashes.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` doesn't match the table key data type.
"""
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
values = keys
if isinstance(keys, sparse_tensor.SparseTensor):
values = keys.values
if self._table and (self._table.key_dtype.base_dtype == dtypes.int64):
values = math_ops.to_int64(values)
if self._num_oov_buckets == 0:
ids = self._table.lookup(values, name=name)
else:
# TODO(yleon): Consider moving this functionality to its own kernel.
with ops.name_scope(name, "%s_Lookup" % self.name) as scope:
str_to_hash_bucket = self._get_string_to_hash_bucket_fn(
self._hasher_spec)
buckets = str_to_hash_bucket(
_as_string(values),
num_buckets=self._num_oov_buckets,
name="hash_bucket")
if self._table:
ids = self._table.lookup(values)
buckets = math_ops.add(buckets, self._table.size())
is_id_non_default = math_ops.not_equal(ids, self._table.default_value)
ids = array_ops.where(is_id_non_default, ids, buckets, name=scope)
else:
ids = buckets
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)
return ids
def index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
key_dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the key and the zero-based line
number is the ID.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is
`[vocabulary size, vocabulary size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
features = tf.constant(["emerson", "lake", "and", "palmer"])
table = tf.contrib.lookup.index_table_from_file(
vocabulary_file="test.txt", num_oov_buckets=1)
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 3, 2] # where 3 is the out-of-vocabulary bucket
```
Args:
vocabulary_file: The vocabulary filename, may be a constant scalar `Tensor`.
num_oov_buckets: The number of out-of-vocabulary buckets.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets.
key_dtype: The `key` data type.
name: A name for this op (optional).
Returns:
The lookup table to map a `key_dtype` `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `vocabulary_file` is not set.
ValueError: If `num_oov_buckets` is negative or `vocab_size` is not greater
than zero.
"""
if vocabulary_file is None or (
isinstance(vocabulary_file, str) and not vocabulary_file):
raise ValueError("vocabulary_file must be specified and must not be empty.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if vocab_size is not None and vocab_size < 1:
raise ValueError("vocab_size must be greater than 0, got %d." % vocab_size)
if (not key_dtype.is_integer) and (dtypes.string != key_dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
table = None
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
if vocab_size:
# Keep the shared_name:
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
TextFileIndex.WHOLE_LINE,
TextFileIndex.LINE_NUMBER)
else:
# Keep the shared_name
# <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file,
TextFileIndex.WHOLE_LINE,
TextFileIndex.LINE_NUMBER)
init = TextFileIdTableInitializer(
vocabulary_file,
vocab_size=vocab_size,
key_dtype=dtypes.int64 if key_dtype.is_integer else key_dtype,
name="table_init")
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=key_dtype)
return table
def index_table_from_tensor(vocabulary_list,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a string `vocabulary_list` 1-D
tensor where each element is a key and corresponding index within the tensor
is the value.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[mapping size, mapping size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = tf.constant(["emerson", "lake", "palmer"])
table = tf.contrib.lookup.index_table_from_tensor(
mapping=vocabulary_list, num_oov_buckets=1, default_value=-1)
features = tf.constant(["emerson", "lake", "and", "palmer"])
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 4, 2]
```
Args:
vocabulary_list: A 1-D `Tensor` that specifies the mapping of keys to
indices. Thetype of this object must be castable to `dtype`.
num_oov_buckets: The number of out-of-vocabulary buckets.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignment of out-of-vocabulary buckets.
dtype: The type of values passed to `lookup`. Only string and integers are
supported.
name: A name for this op (optional).
Returns:
The lookup table to map an input `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `mapping` is invalid.
ValueError: If `num_oov_buckets` is negative.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if (not dtype.is_integer) and (dtypes.string != dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
keys = ops.convert_to_tensor(vocabulary_list)
if keys.dtype.is_integer != dtype.is_integer:
raise ValueError("Expected %s, got %s." %
("integer"
if dtype.is_integer else "non-integer", keys.dtype))
if (not dtype.is_integer) and (keys.dtype.base_dtype != dtype):
raise ValueError("Expected %s, got %s." % (dtype, keys.dtype))
num_elements = array_ops.size(keys)
values = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
table_keys = math_ops.to_int64(keys) if keys.dtype.is_integer else keys
init = KeyValueTensorInitializer(
table_keys,
values,
table_keys.dtype.base_dtype,
dtypes.int64,
name="table_init")
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=dtype)
return table
def index_to_string_table_from_file(vocabulary_file,
vocab_size=None,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The table is initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the value and the
zero-based line number is the index.
Any input which does not have a corresponding index in the vocabulary file
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_file(
vocabulary_file="test.txt", default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_file: The vocabulary filename.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_file` is empty.
ValueError: when `vocab_size` is invalid.
"""
if not vocabulary_file:
raise ValueError("vocabulary_file must be specified.")
if vocab_size is not None and vocab_size < 1:
raise ValueError("vocab_size must be greater than 0, got %d." % vocab_size)
with ops.name_scope(name, "index_to_string") as scope:
shared_name = ""
if vocab_size:
# Keep a shared_name
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
TextFileIndex.LINE_NUMBER,
TextFileIndex.WHOLE_LINE)
else:
# Keep a shared_name <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file,
TextFileIndex.LINE_NUMBER,
TextFileIndex.WHOLE_LINE)
init = TextFileStringTableInitializer(
vocabulary_file, vocab_size=vocab_size, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
def index_to_string_table_from_tensor(vocabulary_list,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
vocabulary_list, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_list: A 1-D string `Tensor` that specifies the strings to map
from indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_list` is not set.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
with ops.name_scope(name, "index_to_string") as scope:
vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string)
num_elements = array_ops.size(vocabulary_list)
keys = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
init = KeyValueTensorInitializer(
keys, vocabulary_list, dtypes.int64, dtypes.string, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
ops.NotDifferentiable("LookupTableFind")
ops.NotDifferentiable("LookupTableFindV2")
ops.NotDifferentiable("LookupTableInsert")
ops.NotDifferentiable("LookupTableInsertV2")
ops.NotDifferentiable("LookupTableSize")
ops.NotDifferentiable("LookupTableSizeV2")
ops.NotDifferentiable("HashTable")
ops.NotDifferentiable("HashTableV2")
ops.NotDifferentiable("InitializeTable")
ops.NotDifferentiable("InitializeTableV2")
ops.NotDifferentiable("InitializeTableFromTextFile")
ops.NotDifferentiable("InitializeTableFromTextFileV2")
ops.NotDifferentiable("MutableDenseHashTable")
ops.NotDifferentiable("MutableDenseHashTableV2")
ops.NotDifferentiable("MutableHashTable")
ops.NotDifferentiable("MutableHashTableV2")
ops.NotDifferentiable("MutableHashTableOfTensors")
ops.NotDifferentiable("MutableHashTableOfTensorsV2")
|
{
"content_hash": "b1cc1e12b7cc2009bf9b649b8b3f0e03",
"timestamp": "",
"source": "github",
"line_count": 1207,
"max_line_length": 80,
"avg_line_length": 36.6106048053024,
"alnum_prop": 0.6564077032745705,
"repo_name": "yufengg/tensorflow",
"id": "dade0535892492cd60f511674f42e9158c5e72a7",
"size": "44877",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/lookup_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "194748"
},
{
"name": "C++",
"bytes": "26944576"
},
{
"name": "CMake",
"bytes": "174857"
},
{
"name": "Go",
"bytes": "908627"
},
{
"name": "Java",
"bytes": "323449"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37293"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Protocol Buffer",
"bytes": "249901"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "22872500"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "336333"
}
],
"symlink_target": ""
}
|
"""
Join parts of URLs by ensuring that parts are separated by exactly one slash.
Because, unfortunately urllib.parse.urljoin is not for joining URLs.
"""
from functools import reduce
def _join_slash(a, b):
return a.rstrip('/') + '/' + b.lstrip('/')
def urljoin(*args):
return reduce(_join_slash, args) if args else ''
if __name__ == '__main__':
parts = ['https://foo-bar.quux.net', '/foo', 'bar', '/bat/', '/quux/']
url = urljoin(*parts)
print('url=', url)
url = urljoin('https://quux.com/', '/path', 'to/file///', '//here/')
print('url=', url)
url = urljoin()
print('url=', url)
url = urljoin('//','beware', 'of/this///')
print('url=', url)
url = urljoin('/leading', 'and/', '/trailing/', 'slash/')
print('url=', url)
|
{
"content_hash": "2aebf21b94b733cccae0b6aa72ff01af",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 25.193548387096776,
"alnum_prop": 0.5697823303457106,
"repo_name": "cbare/Etudes",
"id": "87e4ed69c54d2a806bb25581e8049898650f070a",
"size": "781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/urljoin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Haskell",
"bytes": "47480"
},
{
"name": "Jupyter Notebook",
"bytes": "2763925"
},
{
"name": "Python",
"bytes": "106346"
},
{
"name": "R",
"bytes": "2756"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import logout
from django.urls import path, re_path
from . import views
urlpatterns = static('/compiled/', document_root=settings.BUILD_ROOT) + [
path('go/<path:path>', views.redirector, name='redirector'),
path('logout', logout, {'next_page': '/'}),
path('logout/', logout, {'next_page': '/'}),
re_path('^', views.index, name='index'),
]
|
{
"content_hash": "5400533683bc4335886bf11bbd8688fe",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 73,
"avg_line_length": 32.785714285714285,
"alnum_prop": 0.6797385620915033,
"repo_name": "citizenlabsgr/voter-engagement",
"id": "b56cab4e38c9bae2fa8b56055eb7585e52cf2cc7",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/core/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "652"
},
{
"name": "HTML",
"bytes": "1028"
},
{
"name": "JavaScript",
"bytes": "6368"
},
{
"name": "Makefile",
"bytes": "3666"
},
{
"name": "Python",
"bytes": "54548"
},
{
"name": "Shell",
"bytes": "995"
},
{
"name": "TypeScript",
"bytes": "86686"
}
],
"symlink_target": ""
}
|
from datetime import date
from workalendar.tests import GenericCalendarTest
from workalendar.america import UnitedStates
from workalendar.america import Brazil, BrazilSaoPauloState
from workalendar.america import BrazilSaoPauloCity
from workalendar.america import Mexico, Chile, Panama
class UnitedStatesTest(GenericCalendarTest):
cal_class = UnitedStates
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # new year
self.assertIn(date(2013, 7, 4), holidays) # Nation day
self.assertIn(date(2013, 11, 11), holidays) # Armistice
self.assertIn(date(2013, 12, 25), holidays) # Christmas
# Variable days
self.assertIn(date(2013, 1, 21), holidays) # Martin Luther King
self.assertIn(date(2013, 2, 18), holidays) # Washington's bday
self.assertIn(date(2013, 5, 27), holidays) # Memorial day
self.assertIn(date(2013, 9, 2), holidays) # Labour day
self.assertIn(date(2013, 10, 14), holidays) # Colombus
self.assertIn(date(2013, 11, 28), holidays) # Thanskgiving
def test_presidential_year(self):
self.assertTrue(UnitedStates.is_presidential_year(2012))
self.assertFalse(UnitedStates.is_presidential_year(2013))
self.assertFalse(UnitedStates.is_presidential_year(2014))
self.assertFalse(UnitedStates.is_presidential_year(2015))
self.assertTrue(UnitedStates.is_presidential_year(2016))
def test_inauguration_day(self):
holidays = self.cal.holidays_set(2008)
self.assertNotIn(date(2008, 1, 20), holidays)
holidays = self.cal.holidays_set(2009)
self.assertIn(date(2009, 1, 20), holidays)
# case when inauguration day is a sunday
holidays = self.cal.holidays_set(1985)
self.assertNotIn(date(1985, 1, 20), holidays)
self.assertIn(date(1985, 1, 21), holidays)
class BrazilTest(GenericCalendarTest):
cal_class = Brazil
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # new year
self.assertIn(date(2013, 4, 21), holidays) # Tiradentes
self.assertIn(date(2013, 5, 1), holidays) # Dia do trabalhador
self.assertIn(date(2013, 9, 7), holidays) # Dia da Independência
self.assertIn(date(2013, 10, 12), holidays) # Nossa Senhora Aparecida
self.assertIn(date(2013, 11, 2), holidays) # Finados
self.assertIn(date(2013, 11, 15), holidays) # Proclamação da República
self.assertIn(date(2013, 12, 25), holidays) # Natal
class SaoPauloStateTest(BrazilTest):
cal_class = BrazilSaoPauloState
def test_regional_2013(self):
holidays = self.cal.holidays_set(2013)
# Revolução Constitucionalista de 1932
self.assertIn(date(2013, 7, 9), holidays)
class SaoPauloCityTest(SaoPauloStateTest):
cal_class = BrazilSaoPauloCity
def test_city_2013(self):
holidays = self.cal.holidays_set(2013)
# Aniversário da Cidade de São Paulo
self.assertIn(date(2013, 1, 25), holidays)
self.assertIn(date(2013, 2, 12), holidays) # Carnaval
self.assertIn(date(2013, 11, 20), holidays) # Dia da Consciência Negra
self.assertIn(date(2013, 3, 29), holidays) # Sexta-feira da Paixão
self.assertIn(date(2013, 3, 31), holidays) # Páscoa
self.assertIn(date(2013, 5, 30), holidays) # Corpus Christi
class ChileTest(GenericCalendarTest):
cal_class = Chile
def test_holidays_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays)
self.assertIn(date(2013, 3, 29), holidays)
self.assertIn(date(2013, 3, 30), holidays)
self.assertIn(date(2013, 5, 1), holidays)
self.assertIn(date(2013, 5, 21), holidays)
self.assertIn(date(2013, 6, 29), holidays)
self.assertIn(date(2013, 7, 16), holidays)
self.assertIn(date(2013, 8, 15), holidays)
self.assertIn(date(2013, 9, 18), holidays)
self.assertIn(date(2013, 9, 19), holidays)
self.assertIn(date(2013, 9, 20), holidays)
self.assertIn(date(2013, 10, 12), holidays)
self.assertIn(date(2013, 10, 31), holidays)
self.assertIn(date(2013, 11, 1), holidays)
self.assertIn(date(2013, 12, 8), holidays)
self.assertIn(date(2013, 12, 25), holidays)
self.assertIn(date(2013, 12, 31), holidays)
def test_reformation_day(self):
holidays = self.cal.holidays_set(2012)
self.assertNotIn(date(2012, 10, 31), holidays)
self.assertIn(date(2012, 11, 2), holidays)
#
holidays = self.cal.holidays_set(2017)
self.assertNotIn(date(2017, 10, 31), holidays)
self.assertIn(date(2017, 10, 27), holidays)
class MexicoTest(GenericCalendarTest):
cal_class = Mexico
def test_holidays_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays)
self.assertIn(date(2013, 2, 4), holidays) # Constitution day
self.assertIn(date(2013, 3, 18), holidays) # Benito Juárez's birthday
self.assertIn(date(2013, 5, 1), holidays) # Labour day
self.assertIn(date(2013, 9, 16), holidays) # Independence day
self.assertIn(date(2013, 11, 18), holidays) # Revolution day
self.assertIn(date(2013, 12, 25), holidays) # XMas
def test_shift_to_monday(self):
holidays = self.cal.holidays_set(2017)
# New year on Sunday -> shift
self.assertIn(date(2017, 1, 2), holidays)
holidays = self.cal.holidays_set(2016)
# XMas on sunday -> shift to monday
self.assertIn(date(2016, 12, 26), holidays)
# Same for Labour day
self.assertIn(date(2016, 5, 2), holidays)
def test_shift_to_friday(self):
holidays = self.cal.holidays_set(2021)
# January 1st 2022 is a saturday, so we shift to friday
self.assertIn(date(2021, 12, 31), holidays)
# Same for Labour day
self.assertIn(date(2021, 4, 30), holidays)
holidays = self.cal.holidays_set(2021)
# December 25th, 2022 is a saturday, so we shift to friday
self.assertIn(date(2021, 12, 24), holidays)
class PanamaTest(GenericCalendarTest):
cal_class = Panama
def test_holidays_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays)
self.assertIn(date(2013, 1, 9), holidays) # Martyrs day
self.assertIn(date(2013, 2, 12), holidays) # carnival tuesday
self.assertIn(date(2013, 3, 29), holidays) # good friday
self.assertIn(date(2013, 3, 30), holidays) # easter saturday
self.assertIn(date(2013, 3, 31), holidays) # easter sunday
self.assertIn(date(2013, 5, 1), holidays) # labour day
self.assertIn(date(2013, 11, 3), holidays) # independence day
self.assertIn(date(2013, 11, 5), holidays) # colon day
# Shout in Villa de los Santos
self.assertIn(date(2013, 11, 10), holidays)
self.assertIn(date(2013, 12, 2), holidays) # Independence from spain
self.assertIn(date(2013, 12, 8), holidays) # mother day
self.assertIn(date(2013, 12, 25), holidays) # XMas
|
{
"content_hash": "1cb403a10a07f18111b12b4c371c8c31",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 79,
"avg_line_length": 43.92814371257485,
"alnum_prop": 0.6499454743729552,
"repo_name": "ChrisStevens/workalendar",
"id": "e63fbfda106ea555da6f7e20b7a5bdd0330db5c6",
"size": "7372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workalendar/tests/test_america.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "134041"
}
],
"symlink_target": ""
}
|
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ComponentConfig(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'node_placement': 'V1NodePlacement',
'replicas': 'int'
}
attribute_map = {
'node_placement': 'nodePlacement',
'replicas': 'replicas'
}
def __init__(self, node_placement=None, replicas=None):
"""
V1ComponentConfig - a model defined in Swagger
"""
self._node_placement = None
self._replicas = None
if node_placement is not None:
self.node_placement = node_placement
if replicas is not None:
self.replicas = replicas
@property
def node_placement(self):
"""
Gets the node_placement of this V1ComponentConfig.
nodePlacement describes scheduling configuration for specific KubeVirt components
:return: The node_placement of this V1ComponentConfig.
:rtype: V1NodePlacement
"""
return self._node_placement
@node_placement.setter
def node_placement(self, node_placement):
"""
Sets the node_placement of this V1ComponentConfig.
nodePlacement describes scheduling configuration for specific KubeVirt components
:param node_placement: The node_placement of this V1ComponentConfig.
:type: V1NodePlacement
"""
self._node_placement = node_placement
@property
def replicas(self):
"""
Gets the replicas of this V1ComponentConfig.
replicas indicates how many replicas should be created for each KubeVirt infrastructure component (like virt-api or virt-controller). Defaults to 2. WARNING: this is an advanced feature that prevents auto-scaling for core kubevirt components. Please use with caution!
:return: The replicas of this V1ComponentConfig.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1ComponentConfig.
replicas indicates how many replicas should be created for each KubeVirt infrastructure component (like virt-api or virt-controller). Defaults to 2. WARNING: this is an advanced feature that prevents auto-scaling for core kubevirt components. Please use with caution!
:param replicas: The replicas of this V1ComponentConfig.
:type: int
"""
self._replicas = replicas
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ComponentConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "96297c2ed68ce7182369c0fd21945e57",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 275,
"avg_line_length": 29.980132450331126,
"alnum_prop": 0.5886900817318312,
"repo_name": "kubevirt/client-python",
"id": "0cc370924591767b9aedfef218c2c58e92505e1b",
"size": "4544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubevirt/models/v1_component_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4224980"
},
{
"name": "Shell",
"bytes": "2209"
}
],
"symlink_target": ""
}
|
import os, os.path, sys, re
from fnmatch import fnmatch
from setuptools import setup
MODULE_NAME = 'django_mako_plus'
IGNORE_PATTERNS = [
'.*',
'__pycache__',
'*.pyc',
'__dmpcache__',
'node_modules',
'.vscode',
'.DS_Store',
]
def is_ignore(fn):
for pat in IGNORE_PATTERNS:
if fnmatch(fn, pat):
return True
return False
# I can't import the version file the normal way because it loads
# __init__.py, which then imports the DMP engine.
with open('django_mako_plus/version.py') as f:
match = re.search("__version__\s=\s'(\d+\.\d+\.\d+)'", f.read())
if not match:
print('Cannot determine the DMP version. Aborting setup.py.')
sys.exit(1)
VERSION = match.group(1)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3 :: Only',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development',
]
install_requires = [
'django >= 1.9.0',
'mako >= 1.0.0',
]
if len(sys.argv) >= 2 and sys.argv[1] == 'sdist':
# remove the __pycache__ directories since the ones in project_template seems to stick around
os.system('find . -name "__pycache__" -type d -exec rm -r "{}" \+')
# Compile the list of packages available
packages = []
def walk(parent):
for fname in os.listdir(parent):
fpath = os.path.join(parent, fname)
# skip hidden/cache files
if is_ignore(fname) or fname in ( 'app_template', 'project_template' ):
continue
# if a directory, walk it
elif os.path.isdir(fpath):
walk(fpath)
# if an __init__.py file, add the directory to the packages
elif fname == '__init__.py':
packages.append(os.path.dirname(fpath))
walk(MODULE_NAME)
data_files = []
# add the readme/license
data_files.extend([
('', [ 'readme.md' ]),
('', [ 'readme.txt' ]),
('', [ 'license.txt' ]),
])
# add the extra directories
# empty directories within app_template/ will cause problems with distutils, so be sure each directory has at least one file
package_data_files = []
def walk2(parent):
for fname in os.listdir(parent):
fpath = os.path.join(parent, fname)
if is_ignore(fname):
pass # ignore this one
elif os.path.isdir(fpath):
walk2(fpath)
else:
package_data_files.append(os.path.relpath(fpath, MODULE_NAME))
walk2(os.path.join(MODULE_NAME, 'app_template'))
walk2(os.path.join(MODULE_NAME, 'project_template'))
walk2(os.path.join(MODULE_NAME, 'webroot'))
# read the long description if sdist
description = 'Django+Mako: Routing by Convention, Python-Centric Template Language'
long_description = description
if len(sys.argv) > 1 and sys.argv[1] == 'sdist':
long_description = open('readme.txt').read()
# run the setup
setup(
name='django-mako-plus',
description=description,
long_description=long_description,
version=VERSION,
author='Conan Albrecht',
author_email='doconix@gmail.com',
url="http://doconix.github.io/django-mako-plus/",
download_url="https://github.com/doconix/django-mako-plus/archive/master.zip",
packages=packages,
package_data = {
MODULE_NAME: package_data_files,
},
entry_points={
'console_scripts': [
'django_mako_plus = django_mako_plus.__main__:main'
]
},
install_requires=install_requires,
classifiers=CLASSIFIERS,
license='Apache 2.0',
)
|
{
"content_hash": "0e42b174f1d9a474333834beb192b472",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 124,
"avg_line_length": 30.12295081967213,
"alnum_prop": 0.6312925170068027,
"repo_name": "doconix/django-mako-plus",
"id": "d46ee989d55d3550fe3584077706199806c1035a",
"size": "3675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4041"
},
{
"name": "HTML",
"bytes": "2797"
},
{
"name": "JavaScript",
"bytes": "13359"
},
{
"name": "Makefile",
"bytes": "681"
},
{
"name": "Python",
"bytes": "263803"
}
],
"symlink_target": ""
}
|
from cli import base
from core import data, container
from cement.core.controller import CementBaseController, expose
class PackageController(CementBaseController):
class Meta:
label = 'package'
stacked_on = 'base'
@expose(help='Package the AppDir into an AppImage.')
def package(self):
data_obj = data.Data()
container_name = data_obj.get_path_hash()
container_obj = container.Container(container_name)
for line in container_obj.execute('AppImageAssistant.AppImage /mnt/appimager/build /mnt/appimager/out/' + data_obj.get_name()):
print(line, end="")
|
{
"content_hash": "ff8ecacc7b0e2f74b35838b78ce6b3b6",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 135,
"avg_line_length": 36.88235294117647,
"alnum_prop": 0.6858054226475279,
"repo_name": "eloquentstore/appimager",
"id": "facf8826d7e79670f981d0de04541a5e10f3f02c",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cli/package.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23298"
},
{
"name": "Shell",
"bytes": "1071"
}
],
"symlink_target": ""
}
|
'''
From the original motif scanning, selects those events withing a particular window of a gene and outputs the
tgm, geneids and tf ids for final analysis
'''
__author__='sara jc gosline'
__email__='sgosline@mit.edu'
import re,os,sys
from optparse import OptionParser
from collections import defaultdict
import numpy as np
progdir=os.path.dirname(sys.argv[0])
def build_annotated_tgm(closest_gene_output,distance_to_tss,logistic_score_output,fasta_file,motif_ids,makeWindow=True,tgm_file='',do_pkl=True):
'''
Takes existing tgm, and maps to gene names and TF ids within a specific window
'''
from chipsequtil import Fasta
##get fasta file events, since these are columns in the logistic_score matrix
seq_ids=Fasta.load(fasta_file,key_func=lambda x: x)
##need to get sequence mids in the order they are processed
##in the file, this is the index into the score_output file
##. ASSUMES GALAXY-formatted FASTA!!!!
seq_mids=[] ##list of FASTA regions, in their appropriate order in the file
filtered_events={}##gene name of closest gene to event within window
for k in seq_ids.keys():
vals=k.split(';')
if len(vals)==1:
vals=k.split()
if ':' in vals[0]: #bed tools used
chr,range=vals[0].split(':')
low,high=range.split('-')
mid=str(int(low)+((int(high)-int(low))/2))
seq_mids.append(chr+':'+mid)
elif 'random' not in vals[0]: #galaxy tools used
genome,chr,low,high,strand=vals[0].split('_')
mid=str(int(low)+((int(high)-int(low))/2))
seq_mids.append(chr+':'+mid)
if len(vals)==3:
filtered_events[chr+':'+mid]=vals[2]
print 'Found %d events, of which %d have gene names'%(len(seq_mids),len(filtered_events))
##this next section relies on xls
##filter events that are within distance from closest_gene_output to get gene mapping
##
filtered_fc={}##FC of events within window, in case we want to use in the future
event_indexes=[] ##
# ###open the closest_gene_output and determine
# try:
# cgo=open(closest_gene_output,'rU').readlines()
# except:
# print "Error opening file:", sys.exc_info()[0]
# print "Check to make sure file exists at %s"%(closest_gene_output)
# raise
# inds=cgo[0].strip().split('\t')
# for row in cgo[1:]:
# arr=row.strip().split('\t')
# if 'geneSymbol' in inds: #this is true if we used an xref file
# gene=arr[inds.index('geneSymbol')]
# # mid=arr[2]+':'+str(int(arr[3])+(int(arr[4])-int(arr[3]))/2)
# else: #otherwise we just gene id
# gene=arr[inds.index('knownGeneID')]
# #position mapping is different
# if 'Position' in inds: #this is for GPS
# mid='chr'+arr[inds.index('Position')]
# elif 'chrom' in inds: #this is for BED
# mid=arr[inds.index('chrom')]+':'+str(int(arr[inds.index('chromStart')])+(int(arr[inds.index('chromEnd')])-int(arr[inds.index('chromStart')]))/2)
# else: #this is for MACS
# mid=arr[inds.index('chr')]+':'+str(int(arr[inds.index('start')])+(int(arr[inds.index('end')])-int(arr[inds.index('start')]))/2)
# #print gene,mid
# dist=arr[inds.index('dist from feature')]
# try:
# sv=arr[inds.index('score')]
# except:
# try:
# sv=arr[inds.index('IPvsCTR')]
# except:
# fc=0.0
# if sv!='':
# fc=float(sv)
# else:
# next
# #check absolute distance if we're doing a window, or negative distance if we're looking upstream
# if distance_to_tss=='' or (makeWindow and np.absolute(int(dist))<int(distance_to_tss)) or int(dist)>(-1*int(distance_to_tss)):
# # filtered_events[mid]=gene #(this was out of if clause, should it be there?) 1/2
# if mid in seq_mids:
# event_indexes.append(seq_mids.index(mid))##index into fasta file value/maps to array
# ##UPDATE: moved these to within if clause - so that unrelated scores are not included
# filtered_events[mid]=gene ##gene name of event
# filtered_fc[mid]=float(fc) ##fc value of event
# # filtered_fc[mid]=float(fc) #see above, 2/2
# print 'Got '+str(len(filtered_events))+' per-gene events within '+distance_to_tss+' bp window out of '+str(len(cgo))
# print 'These map to '+str(len(event_indexes))+' regions in the FASTA file'
##get gene ids, or just use mid of sequence region
gene_names=[t for t in set(filtered_events.values())]
print gene_names[0:10]
#get gene ids for all matrices list loaded in
mi_files=motif_ids.split(',')
if len(mi_files)>0:
#open first motif name file that contains names for each element in TAMO file
all_tf_names=[a.strip() for a in open(mi_files[0],'rU').readlines()]
if len(mi_files)>1:
#if we have additional files, check to see if if names already exist
for i,f in enumerate(mi_files):
if i==0:
next
try:
#open file and read in extra ids
newfs=[a.strip() for a in open(f,'rU').readlines()]
except:
print "Error opening file:", sys.exc_info()[0]
print "Check to make sure file exists at %s"%(f)
raise
if len(newfs)==len(all_tf_names):
#combine existing tf names with these with . delimiter....
all_tf_names=['.'.join((a,b)) for a,b in zip(all_tf_names,newfs)]
##now go through and clean up TF names
cleaned_tf_names=[]
for i,a in enumerate(all_tf_names):
tfn=set([b for b in a.split('.') if '$' not in b and b!=''])
if(len(tfn)==0):
tfn=a.split('.')
# else:
# print 'Replacing %s with %s'%(a,'.'.join(tfn))
cleaned_tf_names.append('.'.join(tfn))
all_tf_names=cleaned_tf_names
#print len(cleaned_tf_names)
##now actually map events to scores
##load motif matrix scanning output that maps matrices to regions
print 'Loading complete motif score file...'
event_scores=np.loadtxt(logistic_score_output)
print '\t...Loaded!'
#create new tgm matrix with approriate file name
newmat=np.zeros((len(all_tf_names),len(gene_names)),dtype='float')##fill in gene length),dtype='float')
if makeWindow:
distance_to_tss=distance_to_tss+'_bpWindow'
else:
distance_to_tss=distance_to_tss+'_bpUpstream'
if tgm_file=='':
tgm_file=re.sub('.txt','_'+distance_to_tss+'.tgm',os.path.basename(logistic_score_output))
if do_pkl:
pkl_file=re.sub('.tgm','.pkl',tgm_file)
else:
pkl_file=''
##sort event indexes from seq_mids that are in the filtered_events file
event_indexes.sort()
#populate matrix with greatest score attributed to that gene/tf combo
for ind,arr in enumerate(event_scores):
##name of matrix/motif
mat=all_tf_names[ind]
#tfnames=[mat]
##here we enumerate which sequences were mapped to a gene within the window
for k,val in enumerate(seq_mids):#k in event_indexes:
#here we want the event midpoint for the index
# val=seq_mids[k]
#get score for that index
score=arr[k]
#now map it to closest gene for that midpoint
cg=filtered_events[val]
fc=1.0 ##update this if we want to normalize score by fold change
score=float(score)*float(fc) ##this should do nothing sine fcgenerally =1
#if len(tfnames)==1:
curscore=newmat[all_tf_names.index(mat),gene_names.index(cg)]
##updated to include maximum score!!
if np.abs(score)>np.abs(curscore):
newmat[all_tf_names.index(mat),gene_names.index(cg)]=score
#else:
# for t in tfnames:
# curscore=newmat[all_tf_names.index(t),gene_names.index(cg)]
# ##updated to include maximum score!!
# if np.abs(float(score))>np.abs(curscore):
# newmat[all_tf_names.index(t),gene_names.index(cg)]=float(score)
###save these intermediate files for debugging purposes
np.savetxt(tgm_file,newmat)
gin=re.sub('.tgm','_geneids.txt',tgm_file)
tin=re.sub('.tgm','_tfids.txt',tgm_file)
try:
open(gin,'w').writelines([g+'\n' for g in gene_names])
open(tin,'w').writelines([t+'\n' for t in all_tf_names])
except:
print "Error opening file:", sys.exc_info()[0]
print "Check to make sure file exists at %s"%(closest_gene_output)
raise
if pkl_file!='':
zipcmd='python '+os.path.join(progdir,'zipTgms.py')+' '+tgm_file+' '+tin+' '+gin+' --pkl='+pkl_file
print 'Compressing matrix file into pkl'
print zipcmd
os.system(zipcmd)
return pkl_file
else:
return tgm_file
def main():
'''
main method
'''
usage='usage: %prog [options] motif_scanning_file closest_gene_output fasta_file'
parser=OptionParser(usage=usage)
parser.add_option('--distance-to-gene',dest='distance',type='string',default='10000',help='Max distance allowed between event and closest gene')
parser.add_option('--motif-id-list',dest='motif_ids',type='string',default='',help='Comma-delimited list of files containing TF names that best map to each motif')
parser.add_option('--utilpath',default=os.path.join(progdir,'../src/'),dest='addpath',help='Destination of chipsequtil library')
parser.add_option('--outfile',default='',dest='outfile',help='Predefined output file, otherwise will create one automatically')
parser.add_option('--noPkl',default=True,action='store_false',dest='do_pkl',help='Set this flag if outfile is not in pkl form')
opts,args=parser.parse_args()
if len(args)!=3:
print usage
exit('Not enough arguments')
logistic_score_output,closest_gene_output,fasta_file=args
##do the path management
sys.path.insert(0,opts.addpath)
#sys.path.insert(0,opts.addpath+'chipsequtil')
# print sys.path
# print opts.do_pkl
res=build_annotated_tgm(closest_gene_output,opts.distance,logistic_score_output,fasta_file,opts.motif_ids,tgm_file=opts.outfile,do_pkl=opts.do_pkl)
if __name__=='__main__':
main()
|
{
"content_hash": "adeb91c5221f6797c5abd8b01c57ddfd",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 167,
"avg_line_length": 41.38996138996139,
"alnum_prop": 0.5949626865671642,
"repo_name": "agitter/OmicsIntegrator",
"id": "c00553e4b96b29ca101500fb979ff9ae359f4b94",
"size": "10721",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scripts/get_window_binding_matrix.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "2783"
},
{
"name": "Makefile",
"bytes": "1554"
},
{
"name": "Python",
"bytes": "365832"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import pickle
from datetime import timedelta
import pendulum
def test_pickle():
it = pendulum.duration(days=3, seconds=2456, microseconds=123456)
s = pickle.dumps(it)
it2 = pickle.loads(s)
assert it == it2
def test_comparison_to_timedelta():
duration = pendulum.duration(days=3)
assert duration < timedelta(days=4)
|
{
"content_hash": "e39f884b6318f3c9125125989a72356e",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 69,
"avg_line_length": 18.238095238095237,
"alnum_prop": 0.7049608355091384,
"repo_name": "sdispater/pendulum",
"id": "a97bbde6bbf525b7c4e36e01fc5c35053743128b",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/duration/test_behavior.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "62000"
},
{
"name": "Makefile",
"bytes": "1324"
},
{
"name": "Meson",
"bytes": "511"
},
{
"name": "Python",
"bytes": "682924"
},
{
"name": "Shell",
"bytes": "604"
}
],
"symlink_target": ""
}
|
import requests
import datetime
import dateutil
import dateutil.parser
import logging
from plugins.packagetracker.provider import Package
__author__ = "tigge"
class DHLPackage(Package):
API_URL = "https://api-eu.dhl.com/track/shipments?trackingNumber="
apikey = None
@classmethod
def get_type(cls):
return "DHL"
@classmethod
def set_apikey(cls, id):
cls.apikey = id
@staticmethod
def create_event(event):
e = DHLPackage.Event()
e.datetime = dateutil.parser.parse(event["timestamp"])
e.description = (
f"{event['location']['address']['addressLocality']}: {event['description']}"
)
return e
@classmethod
def is_package(cls, package_id):
data = cls._get_data(package_id)
if "shipments" in data and len(data["shipments"]) > 0:
return True
return False
@classmethod
def _get_url(cls, package_id):
return DHLPackage.API_URL + package_id
@classmethod
def _get_data(cls, package_id):
try:
return requests.get(
DHLPackage._get_url(package_id),
headers={
"Accept": "application/json",
"DHL-API-Key": DHLPackage.apikey,
},
).json()
except ValueError as e:
logging.exception("Exception while getting package")
return {}
def update(self):
data = self._get_data(self.id)
try:
for dhl_shipment in data["shipments"]:
self.consignor = dhl_shipment["origin"]["address"]["addressLocality"]
self.consignee = dhl_shipment["destination"]["address"][
"addressLocality"
]
last_updated = self.last_updated
for dhl_event in dhl_shipment["events"]:
event = self.create_event(dhl_event)
if event.datetime > last_updated:
last_updated = event.datetime
if event.datetime > self.last_updated:
self.on_event(event)
self.last_updated = last_updated
except Exception as e:
logging.exception("Exception while updating package")
logging.debug("Data: %r", data)
|
{
"content_hash": "e37b652cd6e80742f767b4b04309559a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 88,
"avg_line_length": 27.06896551724138,
"alnum_prop": 0.5494692144373673,
"repo_name": "Tigge/platinumshrimp",
"id": "ed1d4ede55694b92c9c2b11f57d5d275ce627587",
"size": "2355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/packagetracker/provider_dhl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96471"
}
],
"symlink_target": ""
}
|
from date_conventions import *
class OvernightIndexSwap:
''' We define the product by its:
- startDate
- endDate
- floatingLegNominal: the nominal used to compute the flows of the floating leg:
if positive the flows are received, negative paid
- fixedLegDates: the union of the start and end dates of all of the flows paid by the fixed leg (each flow has two
dates needed to computed the accrual period, the end date of the i-th flow coincides with the start
date of the i+1-th flow)
- fixedRate: the coupon paid/received in the fixed leg
- fixedLegNominal: the nominal used to compute the flows of the floating leg:
if positive the flows are received, negative paid
'''
def __init__(self, startDate, endDate, floatingLegNominal, fixedLegDates, fixedRate, fixedLegNominal):
# we want opposite signs for the two nominals: if one leg is paid, the other is received
# if this is not the case generates an error that will stop the program
if floatingLegNominal * fixedLegNominal > 0:
raise "Nominal must have opposite sign"
# store the input variables
self.startDate = startDate
self.endDate = endDate
self.fixedRate = fixedRate
self.fixedLegDates = fixedLegDates
self.floatingLegNominal = floatingLegNominal
self.fixedLegNominal = fixedLegNominal
# With this method we compute the value of the floating leg at the observation date of the discount curve
def npv_floating_leg(self, discountCurve):
# this formula comes from the fact that for OIS the evaluation method is still the same of
# the "old" world with just one single curve for forward rate estimation and flow discounting
floatingleg_npv = discountCurve.df(self.startDate) - discountCurve.df(self.endDate)
# We multiply the result for the nominal before returning it
return floatingleg_npv * self.floatingLegNominal
def npv_fixed_leg(self, discountCurve):
# we now evaluate the fixed leg
fixed_npv = 0
for i in range(len(self.fixedLegDates) - 1):
startPeriod = self.fixedLegDates[i]
endPeriod = self.fixedLegDates[i+1]
tau = dc_act360(startPeriod, endPeriod)
df = discountCurve.df(endPeriod)
fixed_npv = fixed_npv + df * tau * self.fixedRate
# We multiply the result for the nominal before returning it
return fixed_npv * self.fixedLegNominal
def npv(self, discountCurve):
# the npv is the sum of the floating and fixed leg values (taken with their sign)
floatingleg_npv = self.npv_floating_leg(discountCurve)
fixed_npv = self.npv_fixed_leg(discountCurve)
# we sum them (the nominal have opposite sign)
npv = fixed_npv + floatingleg_npv
return npv
# This function just makes life easier; it allows to create a standard OIS with less
# parameters because it uses some common conventions:
# - startDate: the start date of the swap
# - maturity: the maturity of the swap express as a number of months (2 year: 24 months)
# - fixedTenor: the frequency of the fixed leg expressed in months: semi-annual payments -> fixedTenor = 6
# Market convention is 12 months
# - nominal: the absolute value nominal of the swap (1 is 1 Eur for example)
# - swapType: a string that can be "receiver" (it means that the fixed rate is received) or payer
def buildOIS(startDate, maturity, fixedTenor, fixedRate, nominal = 1, swapType = "receiver"):
endDate = startDate + relativedelta(months = maturity)
fixedLegDates = dates_generator(fixedTenor, startDate, endDate)
if swapType == "receiver":
fixedLegNominal = nominal
floatingLegNominal = - nominal
elif swapType == "payer":
fixedLegNominal = - nominal
floatingLegNominal = nominal
else:
raise "SwapType not supported"
ois = OvernightIndexSwap(startDate, endDate, floatingLegNominal, fixedLegDates, fixedRate, fixedLegNominal)
return ois
from ir_curves import DiscountCurve
if __name__ == '__main__':
obsdate = date(2010,1,1)
pillars = [date(2011,1,1), date(2012,1,1)]
dfs = [0.9, 0.8]
dc = DiscountCurve(obsdate, pillars, dfs)
# we build an Overnight Index Swap with 1 year maturity and strike 8%
startSwap = date(2010,2,1)
maturity = 12
ois = buildOIS(startSwap, maturity, 12, 0.08)
print "Swap NPV:", ois.npv(dc)
|
{
"content_hash": "d647f5aba7f075d918b2d94ffd00f28a",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 120,
"avg_line_length": 47.578947368421055,
"alnum_prop": 0.6856194690265487,
"repo_name": "gabberthomson/fm_finpy",
"id": "79a11c17fd1550cdecc00ee90a776955e411489f",
"size": "4520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ois_products.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40348"
}
],
"symlink_target": ""
}
|
"""``lldp.py``
"""
import re
from .linux import lldp
class Tlv(object):
@staticmethod
def get_simple_tlv_row(row_name, value):
# zero lenght values return an empty list, convert to ''
return {row_name: value[0] if value else ''}
@staticmethod
def get_tlv_from_list(tlvs, predicate):
# chain all the tlvs together
"""
Args:
tlvs(list): list tlvs
predicate(function): predicate function
Returns:
list
"""
return next(val for t, val in tlvs if predicate(t))
@staticmethod
def get_local_port_tlv_row(tlv):
"""
Args:
tlv(list): list or dict of the port TLV
Returns:
dict: remMan style dict
"""
row = {}
# have to use elif because of endswith substring matching
for subtype, value in tlv:
if subtype.endswith("MAC"):
row['PortIdSubtype'] = \
lldp.PortIdSubTypes.MAC_ADDRESS
elif subtype.endswith("IPv4"):
row['PortIdSubtype'] = \
lldp.PortIdSubTypes.NETWORK_ADDRESS
elif subtype.endswith("IPv6"):
row['PortIdSubtype'] = \
lldp.PortIdSubTypes.NETWORK_ADDRESS
elif subtype.startswith("Network Address Type"):
row['PortIdSubtype'] = \
lldp.PortIdSubTypes.NETWORK_ADDRESS
row['PortId'] = value
elif subtype == 'Interface Alias':
row['PortIdSubtype'] = \
lldp.PortIdSubTypes.INTERFACE_ALIAS
elif subtype == 'Port Component':
row['PortIdSubtype'] = \
lldp.PortIdSubTypes.PORT_COMPONENT
elif subtype == 'Ifname':
row['PortIdSubtype'] = \
lldp.PortIdSubTypes.INTERFACE_NAME
elif subtype == 'Local':
row['PortIdSubtype'] = \
lldp.PortIdSubTypes.LOCALLY_ASSIGNED
elif subtype == 'Agent Circuit ID':
row['PortIdSubtype'] = \
lldp.PortIdSubTypes.AGENT_CIRCUIT_ID
elif subtype == 'Bad Port ID':
# use None to indicate invalid
row['PortIdSubtype'] = None
row['PortId'] = value
return row
@staticmethod
def get_local_chassis_tlv_row(tlv):
"""
Args:
tlv(list): list or dict of the chassis TLV
Returns:
dict: row style dict
"""
row = {}
for subtype, value in tlv:
if subtype.endswith("MAC"):
row['ChassisIdSubtype'] = \
lldp.ChassisIdSubTypes.MAC_ADDRESS
elif subtype.endswith("IPv4"):
row['ChassisIdSubtype'] = \
lldp.ChassisIdSubTypes.NETWORK_ADDRESS
elif subtype.endswith("IPv6"):
row['ChassisIdSubtype'] = \
lldp.ChassisIdSubTypes.NETWORK_ADDRESS
elif subtype.startswith("Network Address Type"):
row['ChassisIdSubtype'] = \
lldp.ChassisIdSubTypes.NETWORK_ADDRESS
row['ChassisId'] = value
elif subtype == 'Chassis Component':
row['ChassisIdIfIdSubtype'] = \
lldp.ChassisIdSubTypes.CHASSIS_COMPONENT
elif subtype == 'IfAlias':
row['ChassisIdIfIdSubtype'] = \
lldp.ChassisIdSubTypes.INTERFACE_ALIAS
elif subtype == 'Port Component':
row['ChassisIdIfIdSubtype'] = \
lldp.ChassisIdSubTypes.PORT_COMPONENT
elif subtype == 'Ifname':
row['ChassisIdIfIdSubtype'] = \
lldp.ChassisIdSubTypes.INTERFACE_NAME
elif subtype == 'Local':
row['ChassisIdIfIdSubtype'] = \
lldp.ChassisIdSubTypes.LOCALLY_ASSIGNED
elif subtype == 'Bad Chassis ID':
# use None to indicate invalid
row['ChassisIdIfIdSubtype'] = None
row['ChassisId'] = value
return row
@staticmethod
def get_local_cap_tlv_row(tlv):
"""
Args:
tlv(list): list or dict of the port TLV
Returns:
dict: row style dict
"""
row = {}
# have to use elif because of endswith substring matching
for subtype, value in tlv:
cap_strings = (s.strip() for s in value.split(','))
sys_cap = sum(lldp.SYS_CAPABILITIES[c] for c in cap_strings)
if subtype == "System capabilities":
row['SysCapSupported'] = sys_cap
elif subtype == "Enabled capabilities":
row['SysCapEnabled'] = sys_cap
return row
@staticmethod
def get_port_tlv_row(tlv):
"""
Args:
tlv(list): list or dict of the port TLV
Returns:
dict: row style dict
"""
row = {}
# have to use elif because of endswith substring matching
for subtype, value in tlv:
if subtype.endswith("MAC"):
row['remPortIdSubtype'] = \
lldp.PortIdSubTypes.MAC_ADDRESS
elif subtype.endswith("IPv4"):
row['remPortIdSubtype'] = \
lldp.PortIdSubTypes.NETWORK_ADDRESS
elif subtype.endswith("IPv6"):
row['remPortIdSubtype'] = \
lldp.PortIdSubTypes.NETWORK_ADDRESS
elif subtype.startswith("Network Address Type"):
row['remPortIdSubtype'] = \
lldp.PortIdSubTypes.NETWORK_ADDRESS
row['remPortId'] = value
elif subtype == 'Interface Alias':
row['remPortIdSubtype'] = \
lldp.PortIdSubTypes.INTERFACE_ALIAS
elif subtype == 'Port Component':
row['remPortIdSubtype'] = \
lldp.PortIdSubTypes.PORT_COMPONENT
elif subtype == 'Ifname':
row['remPortIdSubtype'] = \
lldp.PortIdSubTypes.INTERFACE_NAME
elif subtype == 'Local':
row['remPortIdSubtype'] = \
lldp.PortIdSubTypes.LOCALLY_ASSIGNED
elif subtype == 'Agent Circuit ID':
row['remPortIdSubtype'] = \
lldp.PortIdSubTypes.AGENT_CIRCUIT_ID
elif subtype == 'Bad Port ID':
# use None to indicate invalid
row['remPortIdSubtype'] = None
row['remPortId'] = value
return row
@staticmethod
def get_sys_cap_tlv_row(tlv):
"""
Args:
tlv(list): list or dict of the port TLV
Returns:
dict: row style dict
"""
row = {}
# have to use elif because of endswith substring matching
for subtype, value in tlv:
cap_strings = (s.strip() for s in value.split(','))
sys_cap = sum(lldp.SYS_CAPABILITIES[c] for c in cap_strings)
if subtype == "System capabilities":
row['remSysCapSupported'] = sys_cap
elif subtype == "Enabled capabilities":
row['remSysCapEnabled'] = sys_cap
return row
@staticmethod
def get_chassis_tlv_row(tlv):
"""
Args:
tlv(list): list or dict of the chassis TLV
Returns:
dict: remMan style dict
"""
row = {}
for subtype, value in tlv:
if subtype.endswith("MAC"):
row['remChassisIdSubtype'] = \
lldp.ChassisIdSubTypes.MAC_ADDRESS
elif subtype.endswith("IPv4"):
row['remChassisIdSubtype'] = \
lldp.ChassisIdSubTypes.NETWORK_ADDRESS
elif subtype.endswith("IPv6"):
row['remChassisIdSubtype'] = \
lldp.ChassisIdSubTypes.NETWORK_ADDRESS
elif subtype.startswith("Network Address Type"):
row['remChassisIdSubtype'] = \
lldp.ChassisIdSubTypes.NETWORK_ADDRESS
row['remChassisId'] = value
elif subtype == 'Chassis Component':
row['remChassisIdIfIdSubtype'] = \
lldp.ChassisIdSubTypes.CHASSIS_COMPONENT
elif subtype == 'IfAlias':
row['remChassisIdIfIdSubtype'] = \
lldp.ChassisIdSubTypes.INTERFACE_ALIAS
elif subtype == 'Port Component':
row['remChassisIdIfIdSubtype'] = \
lldp.ChassisIdSubTypes.PORT_COMPONENT
elif subtype == 'Ifname':
row['remChassisIdIfIdSubtype'] = \
lldp.ChassisIdSubTypes.INTERFACE_NAME
elif subtype == 'Local':
row['remChassisIdIfIdSubtype'] = \
lldp.ChassisIdSubTypes.LOCALLY_ASSIGNED
elif subtype == 'Bad Chassis ID':
# use None to indicate invalid
row['remChassisIdIfIdSubtype'] = None
row['remChassisId'] = value
return row
@staticmethod
def get_mgmt_row(tlv):
"""
Args:
tlv(list): list or dict of the mgmt TLV sub-tlvs
Returns:
dict: remMan style dict
"""
row = {
# default '' for OID because it might not be present
'remManAddrOID': '',
}
for sub_tlv, value in tlv:
if sub_tlv.endswith("OID"):
row['remManAddrOID'] = value
if sub_tlv.endswith("MAC"):
row['remManAddrSubtype'] = lldp.ManAddrSubTypes.ALL802
row['remManAddr'] = value
elif sub_tlv.endswith("IPv4"):
row['remManAddrSubtype'] = lldp.ManAddrSubTypes.IPV4
row['remManAddr'] = value
elif sub_tlv.endswith("IPv6"):
row['remManAddrSubtype'] = lldp.ManAddrSubTypes.IPV6
row['remManAddr'] = value
elif sub_tlv.startswith("Network Address Type"):
# convert to int for test cases
subtype = int(re.search(r'Network Address Type (\d+)',
sub_tlv).group(1))
row['remManAddrSubtype'] = subtype
row['remManAddr'] = value
elif sub_tlv == 'Ifindex':
row['remManAddrIfSubtype'] = \
lldp.ManAddrIfSubTypes.IFINDEX
row['remManAddrIfId'] = int(value)
elif sub_tlv == 'System port number':
row['remManAddrIfSubtype'] = \
lldp.ManAddrIfSubTypes.SYS_PORT_NUM
row['remManAddrIfId'] = int(value)
elif sub_tlv == 'Unknown interface subtype':
row['remManAddrIfSubtype'] = \
lldp.ManAddrIfSubTypes.UNKNOWN
row['remManAddrIfId'] = int(value)
elif sub_tlv == 'Bad interface numbering subtype':
# use None to indicate invalid
row['remManAddrIfSubtype'] = None
row['remManAddrIfId'] = int(value)
return row
|
{
"content_hash": "c16abe6428f0569ecd92c5823b774f9c",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 72,
"avg_line_length": 36.060317460317464,
"alnum_prop": 0.5141297649440972,
"repo_name": "taf3/taf",
"id": "de53019b35231a3ad546ef155e0809439e8253da",
"size": "11953",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "taf/testlib/lldp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6745"
},
{
"name": "Dockerfile",
"bytes": "4185"
},
{
"name": "JavaScript",
"bytes": "1771"
},
{
"name": "Python",
"bytes": "3859799"
},
{
"name": "Shell",
"bytes": "3146"
},
{
"name": "Tcl",
"bytes": "68098"
},
{
"name": "XSLT",
"bytes": "41538"
}
],
"symlink_target": ""
}
|
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def main():
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(['tests'])
sys.exit(bool(failures))
if __name__ == '__main__':
main()
|
{
"content_hash": "a2ffe06f1085cd136cab25bbb86f53e8",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 59,
"avg_line_length": 21.555555555555557,
"alnum_prop": 0.6752577319587629,
"repo_name": "algolia/algoliasearch-django",
"id": "4318d948382c030031c94eb6e54e9dfac3ec2cd8",
"size": "411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "242"
},
{
"name": "Python",
"bytes": "82891"
}
],
"symlink_target": ""
}
|
"""Glow generative model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.models.research import glow_init_hook
from tensor2tensor.models.research import glow_ops
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
arg_scope = tf.contrib.framework.arg_scope
add_arg_scope = tf.contrib.framework.add_arg_scope
GLOW_DECODE_HPARAMS = ("identity_output=True,log_results=False,"
"decode_in_memory=True,display_decoded_images=True")
@registry.register_hparams
def glow_hparams():
"""Glow Hparams."""
hparams = common_hparams.basic_params1()
hparams.clip_grad_norm = None
hparams.weight_decay = 0.0
hparams.learning_rate_constant = 3e-4
hparams.batch_size = 32
# can be prev_level, prev_step or normal.
# see: glow_ops.merge_level_and_latent_dist
hparams.add_hparam("level_scale", "prev_level")
hparams.add_hparam("n_levels", 3)
hparams.add_hparam("n_bits_x", 8)
hparams.add_hparam("depth", 32)
# Coupling layer, additive or affine.
hparams.add_hparam("coupling", "affine")
hparams.add_hparam("coupling_width", 512)
hparams.add_hparam("top_prior", "single_conv")
# init_batch_size denotes the number of examples used for data-dependent
# initialization. A higher init_batch_size is required for training
# stability especially when hparams.batch_size is low.
hparams.add_hparam("init_batch_size", 256)
hparams.add_hparam("temperature", 1.0)
return hparams
@registry.register_model
class Glow(t2t_model.T2TModel):
"""Glow generative model.
Reference: https://arxiv.org/abs/1807.03039"""
def init_preprocess(self, features):
"""Preprocessing as per the input modality."""
return features
def preprocess(self, x):
"""Normalize x.
Args:
x: 4-D Tensor.
Returns:
x: Scaled such that x lies in-between -0.5 and 0.5
"""
n_bits_x = self.hparams.n_bits_x
n_bins = 2**n_bits_x
x = tf.cast(x, dtype=tf.float32)
if n_bits_x < 8:
x = tf.floor(x / 2 ** (8 - n_bits_x))
x = x / n_bins - 0.5
return x
@property
def temperature(self):
if self.is_predicting:
return self.hparams.temperature
return 1.0
def scale(self, x):
"""Scale x from -0.5 - 0.5 to 0 - 255."""
x = tf.where(tf.is_nan(x), tf.ones_like(x), x)
x = tf.where(tf.is_inf(x), tf.ones_like(x), x)
x = tf.clip_by_value(x, -0.5, 0.5)
x += 0.5
x = x * 2**self.hparams.n_bits_x
return tf.cast(tf.clip_by_value(x, 0, 255), dtype=tf.uint8)
@property
def is_training(self):
return self.hparams.mode == tf.estimator.ModeKeys.TRAIN
def infer(self, features, *args, **kwargs): # pylint: disable=arguments-differ
del args, kwargs
x = features["inputs"]
batch_size = common_layers.shape_list(x)[0]
features["targets"] = tf.zeros(shape=(batch_size, 1, 1, 1))
_, _ = self(features) # pylint: disable=not-callable
ops = [glow_ops.get_variable_ddi, glow_ops.actnorm]
var_scope = tf.variable_scope("glow/body", reuse=True)
# If eps=None, images are sampled from the prior.
with arg_scope(ops, init=False), var_scope:
predictions, _, _, _ = glow_ops.encoder_decoder(
"codec", self.z_sample, self.hparams, eps=None, reverse=True,
temperature=self.temperature)
return self.scale(predictions)
def create_init_batch(self, features):
"""Returns a batch of size "hparams.init_batch_size" for initialization.
Args:
features: input features.
Returns:
init_features: initialization features.
"""
train_dataset = self.hparams.problem.dataset(
tf.estimator.ModeKeys.TRAIN, hparams=self.hparams)
train_dataset = train_dataset.batch(self.hparams.init_batch_size)
train_dataset = self.init_preprocess(train_dataset)
return train_dataset.make_one_shot_iterator().get_next()
@staticmethod
def train_hooks(hook_context):
del hook_context
return [glow_init_hook.GlowInitHook()]
def top_prior(self):
"""Objective based on the prior over latent z.
Returns:
dist: instance of tfp.distributions.Normal, prior distribution.
"""
return glow_ops.top_prior(
"top_prior", self.z_top_shape, learn_prior=self.hparams.top_prior,
temperature=self.temperature)
def body(self, features):
exp_coupling = ["affine", "additive"]
if self.hparams.coupling not in exp_coupling:
raise ValueError("Expected hparams.coupling to be in %s, got %s" %
(exp_coupling, self.hparams.coupling))
if self.is_training:
init_features = self.create_init_batch(features)
init_op = self.objective_tower(init_features, init=True)
init_op = tf.Print(
init_op, [init_op], message="Triggering data-dependent init.",
first_n=20)
tf.add_to_collection("glow_init_op", init_op)
train_op = self.objective_tower(features, init=False)
return tf.zeros_like(features["targets"]), {"training": train_op}
def objective_tower(self, features, init=True):
"""Objective in terms of bits-per-pixel.
Args:
features: dict of tensors with "features" and "targets" keys.
init: Whether or not to run data-dependent init.
Returns:
objective: float, bits-per-pixel.
"""
x = features["inputs"]
# Scale x such that the pixels lie in-between -0.5 and.0.5
x = self.preprocess(x)
x, objective = glow_ops.uniform_binning_correction(x)
# The arg_scope call ensures that the actnorm parameters are set such that
# the per-channel output activations have zero mean and unit variance
# ONLY during the first step. After that the parameters are learned
# through optimisation.
ops = [glow_ops.get_variable_ddi, glow_ops.actnorm]
with arg_scope(ops, init=init):
self.z, encoder_objective, self.eps, _, _ = glow_ops.encoder_decoder(
"codec", x, self.hparams, eps=None, reverse=False)
objective += encoder_objective
self.z_top_shape = common_layers.shape_list(self.z)
prior_dist = self.top_prior()
prior_objective = tf.reduce_sum(
prior_dist.log_prob(self.z), axis=[1, 2, 3])
self.z_sample = prior_dist.sample()
objective += prior_objective
# bits per pixel
_, h, w, c = common_layers.shape_list(x)
objective = -objective / (np.log(2) * h * w * c)
return objective
|
{
"content_hash": "1aa61859ac77f4aa0cc6d6caa0234023",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 81,
"avg_line_length": 34.49738219895288,
"alnum_prop": 0.6691455456063136,
"repo_name": "mlperf/training_results_v0.5",
"id": "c3c01192b85f2158d97776844e528ebe7c2f76cf",
"size": "7195",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/models/research/glow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5720"
},
{
"name": "C++",
"bytes": "1288180"
},
{
"name": "CMake",
"bytes": "40880"
},
{
"name": "CSS",
"bytes": "32420"
},
{
"name": "Cuda",
"bytes": "1362093"
},
{
"name": "Dockerfile",
"bytes": "19488"
},
{
"name": "Go",
"bytes": "1088660"
},
{
"name": "HTML",
"bytes": "19756888"
},
{
"name": "Java",
"bytes": "45405"
},
{
"name": "JavaScript",
"bytes": "302838"
},
{
"name": "Jupyter Notebook",
"bytes": "9104667"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "Makefile",
"bytes": "3652"
},
{
"name": "Python",
"bytes": "31508548"
},
{
"name": "Scala",
"bytes": "106211"
},
{
"name": "Shell",
"bytes": "409745"
}
],
"symlink_target": ""
}
|
##====================DICE========================
import random
die = [4, 6, 8, 10, 20]
print(die)
#prompt for die
print("Choose a Dice")
userdie = int(input())
while userdie not in die:
print("Choose a correct die")
userdie = int(input())
print()
print("How many times would you like to roll the die?")
roll = int(input())
while roll > 50:
print("try a smaller number")
roll = int(input())
print()
for event in range(roll):
dyRoll= random.randint(0,userdie)
print(f"You rolled a {dyRoll}")
|
{
"content_hash": "f4933f9cda727c01c8df24ee48011570",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 55,
"avg_line_length": 17.4375,
"alnum_prop": 0.5555555555555556,
"repo_name": "nirarin7/PythonPractice",
"id": "72fcc70f05cee0cafa8f7bdcd4e53573026c3b6b",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "die1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17956"
}
],
"symlink_target": ""
}
|
"""Plotting functions.
"""
from __future__ import division, absolute_import
import numpy as np
import pylab as pl
import seaborn as sb
sb.set(font_scale=0.7, style='white')
def recovery_performance(mixed, cost_func, true_bias, estimated_signal, true_signal, estimated_bias):
"""
Print recovery performance statistics.
Parameters
----------
mixed : numpy.ndarray, shape=(n_samples, n_features)
Corrupted signal to be cleaned.
cost_func : func
Objective function for evaluation of current bias matrix estimate.
true_bias : numpy.ndarray, shape=(n_samples, n_features)
True bias matrix.
estimated_signal : numpy.ndarray, shape=(n_samples, n_features)
Estimated signal matrix.
true_signal : numpy.ndarray, shape=(n_samples, n_features)
True signal matrix.
estimated_bias: numpy.ndarray, shape=(n_samples, n_features)
Estimated bias matrix.
Returns
-------
d : dict
Performance metrics.
"""
d = {}
error_cost_func_true_bias = cost_func(true_bias)
error_cost_func_estimated_bias = cost_func(estimated_bias)
d['Error cost function (true bias)'] = error_cost_func_true_bias
d['Error cost function (estimated bias)'] = error_cost_func_estimated_bias
divisor = np.sum(~np.isnan(mixed))
d['Number of valid values in corrupted signal'] = divisor
mean_absolute_error_true_signal = np.nansum(
np.absolute(true_signal - (mixed - true_bias))) / divisor
mean_absolute_error_estimated_signal = np.nansum(
np.absolute(true_signal - estimated_signal)) / divisor
d['Mean absolute error (true_signal)'] = mean_absolute_error_true_signal
d['Mean absolute error (estimated_signal)'] = mean_absolute_error_estimated_signal
mean_absolute_error_zeros = np.nansum(
np.absolute(true_signal - mixed)) / divisor
d['Mean absolute error (zeros)'] = mean_absolute_error_zeros
ratio_estimated_signal_to_zeros = mean_absolute_error_estimated_signal / \
mean_absolute_error_zeros
d['Ratio mean absolute error (estimated signal / zeros)'] = ratio_estimated_signal_to_zeros
return d
def show_absolute(signal, kind, unshuffled=False, unshuffle=False, map_backward=None, vmin=-4, vmax=4):
"""
Plot the absolute values of the given signal matrix.
Parameters
----------
signal : numpy.ndarray, shape=(n_samples, n_features)
True signal matrix.
kind : str, values=('Bias', 'Signal')
Type of absolute value matrix to be shown (used as annotation on plot).
unshuffled : bool
If the input data is unshuffled.
unshuffle : bool
If to unshuffle the input data.
map_backward : dict, value=('feature', 'sample'), values=dict
Map from new annotation to old annotion.
vmin : int
Minimum absolute value on color scale.
vmax : int
Maximum absolute value on color scale.
"""
cmap = sb.diverging_palette(
250, 15, s=75, l=40, as_cmap=True, center="dark")
indices_x = np.arange(signal.shape[0], dtype=int)
indices_y = np.arange(signal.shape[1], dtype=int)
fig = pl.figure(figsize=(7 * (signal.shape[1] / signal.shape[0]), 7))
ax = fig.add_subplot(111)
if unshuffle:
ax.set_title('{} (unshuffled)'.format(kind))
indices_x = np.asarray([map_backward['sample'][i] for i in indices_x])
indices_y = np.asarray([map_backward['feature'][i] for i in indices_y])
signal = signal[indices_x]
signal = signal[:, indices_y]
if unshuffled:
ax.set_title('{} (unshuffled)'.format(kind))
indices_x = np.asarray([map_backward['sample'][i] for i in indices_x])
indices_y = np.asarray([map_backward['feature'][i] for i in indices_y])
else:
ax.set_title('{}'.format(kind))
ax_seaborn = sb.heatmap(signal, vmin=vmin, vmax=vmax, cmap=cmap, ax=ax, cbar_kws={
'shrink': 0.5}, xticklabels=indices_y, yticklabels=indices_x)
ax.tick_params(axis='both', which='both', length=0)
ax.set_xlabel('Features')
ax.set_ylabel('Samples')
def show_dependences(signal, pairs, space, n_pairs=5, n_points=50):
"""
Plot the signal dependences for a subset of correlated pairs.
Parameters
----------
signal : numpy.ndarray, shape=(n_samples, n_features)
True signal matrix.
pairs : dict, keys=('feature', 'sample'), values=numpy.ndarray, shape=(n, 2)
Correlated pair indices.
space : str, values=('feature', 'sample')
Feature or sample space.
n_pairs : int
Number of correlated pairs to show.
n_points : int
Number of data point to show.
"""
cmap = sb.diverging_palette(250, 15, s=75, l=40, n=10, center="dark")
if space == 'feature':
shape = signal.T.shape
if space == 'sample':
shape = signal.shape
pairs = pairs[space]
for n, i in enumerate(np.random.choice(np.arange(len(pairs), dtype=int), min(n_pairs, len(pairs)), replace=False)):
j = np.random.choice(np.arange(shape[1], dtype=int), min(
n_points, shape[1]), replace=False)
if space == 'sample':
grid = sb.jointplot(signal[np.atleast_2d(pairs[i][1]), j], signal[np.atleast_2d(
pairs[i][0]), j], ylim=(-4, 4), xlim=(-4, 4), alpha=0.6, size=5, stat_func=None, color='black')
grid.set_axis_labels('Sample {}'.format(
pairs[i][1]), 'Sample {}'.format(pairs[i][0]))
if space == 'feature':
grid = sb.jointplot(signal[j[:, None], pairs[i][1]], signal[j[:, None], pairs[i][0]], ylim=(
-4, 4), xlim=(-4, 4), alpha=0.6, size=5, stat_func=None, color='black')
grid.set_axis_labels('Feature {}'.format(
pairs[i][1]), 'Feature {}'.format(pairs[i][0]))
pl.setp(grid.ax_marg_y.patches, color=cmap[2])
pl.setp(grid.ax_marg_x.patches, color=cmap[-2])
def show_recovery(mixed, guess_X, true_signal, estimated_signal, true_pairs, estimated_pairs, true_stds, estimated_stds, true_directions, estimated_directions, n_pairs=5, n_points=50):
"""
Plot the signal dependences for a subset of correlated pairs overlayed with the estimated and true values.
Parameters
----------
mixed : numpy.ndarray, shape=(n_samples, n_features)
Corrupted signal to be cleaned.
guess_X : numpy.ndarray, shape=(n_samples, n_features)
Initial guess used for the final solution.
true_signal : numpy.ndarray, shape=(n_samples, n_features)
True signal matrix.
estimated_signal : numpy.ndarray, shape=(n_samples, n_features)
Estimated signal matrix.
true_pairs : numpy.ndarray, shape=(n, 2)
True correlated pairs.
estimated_pairs : numpy.ndarray, shape=(n, 2)
Estimated correlated pairs.
true_stds : numpy.ndarray, len=n
True standard deviations.
estimated_stds : numpy.ndarray, len=n
Estimated standard deviations.
true_directions : numpy.ndarray, len=n
True directions.
estimated_directions : numpy.ndarray, len=n
Estimated directions.
n_pairs : int
Number of correlated pairs to show.
n_points : int
Number of data point to show.
"""
def pair_index(pairs, pair):
index = np.where(np.all(pairs == pair, axis=1))
try:
index = index[0][0]
except IndexError:
index = None
return index
fig = pl.figure(figsize=(5, 5 * n_pairs))
pairs = np.vstack([true_pairs, estimated_pairs])
np.random.shuffle(pairs)
for i in xrange(n_pairs):
ax = fig.add_subplot(n_pairs, 1, i + 1)
for j in xrange(n_points):
ax.plot(mixed[pairs[i][1]][j], mixed[pairs[i][0]]
[j], 'o', color='red', alpha=0.6)
ax.plot(estimated_signal[pairs[i][1]][j],
estimated_signal[pairs[i][0]][j], 'D', color='blue', alpha=0.6)
ax.plot(guess_X[pairs[i][1]][j], guess_X[pairs[i][0]]
[j], 'o', color='brown', alpha=0.6)
ax.plot(true_signal[pairs[i][1]][j], true_signal[pairs[i]
[0]][j], 'o', color='green', alpha=0.6)
ax.plot([true_signal[pairs[i][1]][j], mixed[pairs[i][1]][j]], [
true_signal[pairs[i][0]][j], mixed[pairs[i][0]][j]], '-', color='red', alpha=0.6)
ax.plot([true_signal[pairs[i][1]][j], estimated_signal[pairs[i][1]][j]], [
true_signal[pairs[i][0]][j], estimated_signal[pairs[i][0]][j]], '-', color='blue', alpha=0.6)
if pairs[i] in true_pairs:
direction = true_directions[pair_index(true_pairs, pairs[i])]
std_b, std_a = true_stds[pair_index(true_pairs, pairs[i])]
std_b = std_b * -1 * direction
m = -std_b / float(std_a)
ax.plot(list(ax.get_xlim()), [
m * p + 0.0 for p in ax.get_xlim()], '-', color='orange', alpha=0.6)
if pairs[i] in estimated_pairs:
direction = estimated_directions[pair_index(
estimated_pairs, pairs[i])]
std_b, std_a = estimated_stds[pair_index(
estimated_pairs, pairs[i])]
std_b = std_b * -1 * direction
m = -std_b / float(std_a)
ax.plot(list(ax.get_xlim()), [
m * p + 0.0 for p in ax.get_xlim()], '--', color='black', alpha=0.6)
sb.despine()
ax.set_xlabel('Sample {}'.format(pairs[i][1]))
ax.set_ylabel('Sample {}'.format(pairs[i][0]))
ax.set_ylim(-4, 4)
ax.set_xlim(-4, 4)
def show_independences(signal, pairs, space, n_pairs=5, n_points=50):
"""
Plot the signal dependences for a subset of uncorrelated pairs.
Parameters
----------
signal : numpy.ndarray, shape=(n_samples, n_features)
True signal matrix.
pairs : dict, keys=('feature', 'sample'), values=numpy.ndarray, shape=(n, 2)
Correlated pair indices.
space : str, values=('feature', 'sample')
Feature or sample space.
n_pairs : int
Number of correlated pairs to show.
n_points : int
Number of data point to show.
"""
if space == 'feature':
shape = signal.T.shape
if space == 'sample':
shape = signal.shape
true_pairs = set()
for pair in pairs[space]:
true_pairs.add((pair[0], pair[1]))
true_pairs.add((pair[1], pair[0]))
all_pairs = set()
for i in xrange(shape[0]):
for j in range(shape[0]):
all_pairs.add((i, j))
all_pairs.add((j, i))
identical = set([(i, i) for i in range(shape[0])])
non_pairs = all_pairs - true_pairs - identical
pairs = {space: np.asarray(list(non_pairs), dtype=int)}
show_dependences(signal, pairs, space, n_pairs=n_pairs, n_points=n_points)
def show_dependence_structure(correlations, space, unshuffled=False, map_backward=None):
"""
Plot a correlation matrix.
Parameters
----------
correlations : dict, keys=('feature', 'sample'), values=numpy.ndarray, shape=(n_samples, n_samples) or (n_features, n_features)
Correlation matrix.
space : str, values=('feature', 'sample')
Feature or sample space.
unshuffled : bool
If the input data is unshuffled.
map_backward : dict, value=('feature', 'sample'), values=dict
Map from new annotation to old annotion.
"""
cmap = sb.diverging_palette(
250, 15, s=75, l=40, as_cmap=True, center="dark")
indices = np.arange(correlations[space].shape[0], dtype=int)
if space == 'feature':
size = 7 * (correlations['feature'].shape[0] /
correlations['sample'].shape[0])
if space == 'sample':
size = 7
fig = pl.figure(figsize=(size, size))
ax = fig.add_subplot(111)
if unshuffled:
ax.set_title('Correlations (unshuffled)')
indices = np.asarray([map_backward[space][i] for i in indices])
else:
ax.set_title('Correlations')
sb.heatmap(correlations[space], cmap=cmap, vmin=-1, vmax=1, square=True,
ax=ax, cbar_kws={'shrink': 0.5}, xticklabels=indices, yticklabels=indices)
if space == 'feature':
ax.set_xlabel('Features')
ax.set_ylabel('Features')
if space == 'sample':
ax.set_xlabel('Samples')
ax.set_ylabel('Samples')
def show_threshold(correlations, threshold, space):
"""
Plot the number of estimated pairs at a particular correlation threshold.
Parameters
----------
correlations : dict, keys=('feature', 'sample'), values=numpy.ndarray, shape=(n_samples, n_samples) or (n_features, n_features)
Correlation matrix.
threshold : float
Correlation threshold at which to cut-off (used to draw vertical line)
space : str, values=('feature', 'sample')
Feature or sample space.
"""
cmap = sb.diverging_palette(250, 15, s=75, l=40, n=10, center="dark")
fig = pl.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
trimmed = np.trim_zeros(
np.sort(np.tril(np.absolute(correlations[space]), -1).ravel()))
ax.set_xlabel('Threshold')
ax.set_ylabel('# pairs')
x = trimmed
y = np.arange(1, len(trimmed) + 1)
ax.plot(x, y[::-1], '-', alpha=0.8, color='black')
ax.axvline(threshold, min(x), max(x), linestyle='dashed', color=cmap[2])
sb.despine()
|
{
"content_hash": "a65c3f8932cdef7553051884dcd63a82",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 184,
"avg_line_length": 41.34662576687116,
"alnum_prop": 0.5987832925291193,
"repo_name": "a378ec99/bcn",
"id": "f328a4d7eb96af18ec75a580ac3c91b56537178d",
"size": "13479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcn/utils/visualization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "866054"
},
{
"name": "Python",
"bytes": "119177"
}
],
"symlink_target": ""
}
|
from typing import Optional
from appium.options.common.supports_capabilities import SupportsCapabilities
VERBOSITY = 'verbosity'
class VerbosityOption(SupportsCapabilities):
@property
def verbosity(self) -> Optional[str]:
"""
The verbosity level of driver logging.
"""
return self.get_capability(VERBOSITY)
@verbosity.setter
def verbosity(self, value: str) -> None:
"""
The verbosity level of driver logging.
By default, minimum verbosity is applied.
Either 'debug' or 'trace'.
"""
self.set_capability(VERBOSITY, value)
|
{
"content_hash": "07df304dd0ec51f878d518e37b3d4ff8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 76,
"avg_line_length": 27,
"alnum_prop": 0.6570048309178744,
"repo_name": "appium/python-client",
"id": "9888ddf2d68c35a71ff60443c80bb122eeb99d8c",
"size": "1409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appium/options/gecko/verbosity_option.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "835"
},
{
"name": "Python",
"bytes": "801497"
},
{
"name": "Shell",
"bytes": "3195"
}
],
"symlink_target": ""
}
|
"""
wakatime-cli
~~~~~~~~~~~~
Command-line entry point.
:copyright: (c) 2013 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import wakatime
if __name__ == '__main__':
sys.exit(wakatime.main(sys.argv))
|
{
"content_hash": "ed03421876fdaff313ecf5d2148d3cef",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 62,
"avg_line_length": 19.36842105263158,
"alnum_prop": 0.6277173913043478,
"repo_name": "AppVentus/AvTime-client",
"id": "8552075a5f144b602917afffea98c42b8265df29",
"size": "392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/wakatime/wakatime-cli.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "104443"
},
{
"name": "Python",
"bytes": "5029709"
}
],
"symlink_target": ""
}
|
import os
import time
import datetime
import json
# parse inputs
print os.environ["TEXT"]
|
{
"content_hash": "e2f9d1bb695ddefbc772aed09f26094c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 24,
"avg_line_length": 11.375,
"alnum_prop": 0.7802197802197802,
"repo_name": "graboskyc/BasicCloudShellRobot",
"id": "84591dc698f0ab8316b37d6c81607aafc8e34e2a",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CSScripts/Repeat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21029"
},
{
"name": "RobotFramework",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy.orm import exc as sa_exc
from neutron.common import exceptions as exc
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_tunnel
LOG = log.getLogger(__name__)
VXLAN_UDP_PORT = 4789
MAX_VXLAN_VNI = 16777215
vxlan_opts = [
cfg.ListOpt('vni_ranges',
default=[],
help=_("Comma-separated list of <vni_min>:<vni_max> tuples "
"enumerating ranges of VXLAN VNI IDs that are "
"available for tenant network allocation")),
cfg.StrOpt('vxlan_group',
help=_("Multicast group for VXLAN. If unset, disables VXLAN "
"multicast mode.")),
]
cfg.CONF.register_opts(vxlan_opts, "ml2_type_vxlan")
class VxlanAllocation(model_base.BASEV2):
__tablename__ = 'ml2_vxlan_allocations'
vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False)
class VxlanEndpoints(model_base.BASEV2):
"""Represents tunnel endpoint in RPC mode."""
__tablename__ = 'ml2_vxlan_endpoints'
ip_address = sa.Column(sa.String(64), primary_key=True)
udp_port = sa.Column(sa.Integer, primary_key=True, nullable=False,
autoincrement=False)
def __repr__(self):
return "<VxlanTunnelEndpoint(%s)>" % self.ip_address
class VxlanTypeDriver(type_tunnel.TunnelTypeDriver):
def get_type(self):
return p_const.TYPE_VXLAN
def initialize(self):
self.vxlan_vni_ranges = []
self._parse_tunnel_ranges(
cfg.CONF.ml2_type_vxlan.vni_ranges,
self.vxlan_vni_ranges,
p_const.TYPE_VXLAN
)
self._sync_vxlan_allocations()
def reserve_provider_segment(self, session, segment):
segmentation_id = segment.get(api.SEGMENTATION_ID)
with session.begin(subtransactions=True):
try:
alloc = (session.query(VxlanAllocation).
filter_by(vxlan_vni=segmentation_id).
with_lockmode('update').
one())
if alloc.allocated:
raise exc.TunnelIdInUse(tunnel_id=segmentation_id)
LOG.debug(_("Reserving specific vxlan tunnel %s from pool"),
segmentation_id)
alloc.allocated = True
except sa_exc.NoResultFound:
LOG.debug(_("Reserving specific vxlan tunnel %s outside pool"),
segmentation_id)
alloc = VxlanAllocation(vxlan_vni=segmentation_id)
alloc.allocated = True
session.add(alloc)
def allocate_tenant_segment(self, session):
with session.begin(subtransactions=True):
alloc = (session.query(VxlanAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if alloc:
LOG.debug(_("Allocating vxlan tunnel vni %(vxlan_vni)s"),
{'vxlan_vni': alloc.vxlan_vni})
alloc.allocated = True
return {api.NETWORK_TYPE: p_const.TYPE_VXLAN,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: alloc.vxlan_vni}
def release_segment(self, session, segment):
vxlan_vni = segment[api.SEGMENTATION_ID]
with session.begin(subtransactions=True):
try:
alloc = (session.query(VxlanAllocation).
filter_by(vxlan_vni=vxlan_vni).
with_lockmode('update').
one())
alloc.allocated = False
for low, high in self.vxlan_vni_ranges:
if low <= vxlan_vni <= high:
LOG.debug(_("Releasing vxlan tunnel %s to pool"),
vxlan_vni)
break
else:
session.delete(alloc)
LOG.debug(_("Releasing vxlan tunnel %s outside pool"),
vxlan_vni)
except sa_exc.NoResultFound:
LOG.warning(_("vxlan_vni %s not found"), vxlan_vni)
def _sync_vxlan_allocations(self):
"""
Synchronize vxlan_allocations table with configured tunnel ranges.
"""
# determine current configured allocatable vnis
vxlan_vnis = set()
for tun_min, tun_max in self.vxlan_vni_ranges:
if tun_max + 1 - tun_min > MAX_VXLAN_VNI:
LOG.error(_("Skipping unreasonable VXLAN VNI range "
"%(tun_min)s:%(tun_max)s"),
{'tun_min': tun_min, 'tun_max': tun_max})
else:
vxlan_vnis |= set(xrange(tun_min, tun_max + 1))
session = db_api.get_session()
with session.begin(subtransactions=True):
# remove from table unallocated tunnels not currently allocatable
allocs = session.query(VxlanAllocation).with_lockmode("update")
for alloc in allocs:
try:
# see if tunnel is allocatable
vxlan_vnis.remove(alloc.vxlan_vni)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug(_("Removing tunnel %s from pool"),
alloc.vxlan_vni)
session.delete(alloc)
# add missing allocatable tunnels to table
for vxlan_vni in sorted(vxlan_vnis):
alloc = VxlanAllocation(vxlan_vni=vxlan_vni)
session.add(alloc)
def get_vxlan_allocation(self, session, vxlan_vni):
with session.begin(subtransactions=True):
return session.query(VxlanAllocation).filter_by(
vxlan_vni=vxlan_vni).first()
def get_endpoints(self):
"""Get every vxlan endpoints from database."""
LOG.debug(_("get_vxlan_endpoints() called"))
session = db_api.get_session()
with session.begin(subtransactions=True):
vxlan_endpoints = session.query(VxlanEndpoints)
return [{'ip_address': vxlan_endpoint.ip_address,
'udp_port': vxlan_endpoint.udp_port}
for vxlan_endpoint in vxlan_endpoints]
def add_endpoint(self, ip, udp_port=VXLAN_UDP_PORT):
LOG.debug(_("add_vxlan_endpoint() called for ip %s"), ip)
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
vxlan_endpoint = (session.query(VxlanEndpoints).
filter_by(ip_address=ip).
with_lockmode('update').one())
except sa_exc.NoResultFound:
vxlan_endpoint = VxlanEndpoints(ip_address=ip,
udp_port=udp_port)
session.add(vxlan_endpoint)
return vxlan_endpoint
|
{
"content_hash": "bcc5da96e6a99f94a9cf408886940116",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 79,
"avg_line_length": 40.20855614973262,
"alnum_prop": 0.5498071552068095,
"repo_name": "subramani95/neutron",
"id": "3e5d4756791f875b8e98588c99d5f5fafa6ea3f1",
"size": "8204",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/plugins/ml2/drivers/type_vxlan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""Test the flow classes."""
import asyncio
from unittest.mock import patch
import pytest
import voluptuous as vol
from homeassistant import config_entries, data_entry_flow
from homeassistant.core import HomeAssistant
from homeassistant.util.decorator import Registry
from tests.common import async_capture_events
@pytest.fixture
def manager():
"""Return a flow manager."""
handlers = Registry()
entries = []
class FlowManager(data_entry_flow.FlowManager):
"""Test flow manager."""
async def async_create_flow(self, handler_key, *, context, data):
"""Test create flow."""
handler = handlers.get(handler_key)
if handler is None:
raise data_entry_flow.UnknownHandler
flow = handler()
flow.init_step = context.get("init_step", "init")
return flow
async def async_finish_flow(self, flow, result):
"""Test finish flow."""
if result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
result["source"] = flow.context.get("source")
entries.append(result)
return result
mgr = FlowManager(None)
mgr.mock_created_entries = entries
mgr.mock_reg_handler = handlers.register
return mgr
async def test_configure_reuses_handler_instance(manager):
"""Test that we reuse instances."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
handle_count = 0
async def async_step_init(self, user_input=None):
self.handle_count += 1
return self.async_show_form(
errors={"base": str(self.handle_count)}, step_id="init"
)
form = await manager.async_init("test")
assert form["errors"]["base"] == "1"
form = await manager.async_configure(form["flow_id"])
assert form["errors"]["base"] == "2"
assert manager.async_progress() == [
{
"flow_id": form["flow_id"],
"handler": "test",
"step_id": "init",
"context": {},
}
]
assert len(manager.mock_created_entries) == 0
async def test_configure_two_steps(manager):
"""Test that we reuse instances."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 1
async def async_step_first(self, user_input=None):
if user_input is not None:
self.init_data = user_input
return await self.async_step_second()
return self.async_show_form(step_id="first", data_schema=vol.Schema([str]))
async def async_step_second(self, user_input=None):
if user_input is not None:
return self.async_create_entry(
title="Test Entry", data=self.init_data + user_input
)
return self.async_show_form(step_id="second", data_schema=vol.Schema([str]))
form = await manager.async_init("test", context={"init_step": "first"})
with pytest.raises(vol.Invalid):
form = await manager.async_configure(form["flow_id"], "INCORRECT-DATA")
form = await manager.async_configure(form["flow_id"], ["INIT-DATA"])
form = await manager.async_configure(form["flow_id"], ["SECOND-DATA"])
assert form["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 1
result = manager.mock_created_entries[0]
assert result["handler"] == "test"
assert result["data"] == ["INIT-DATA", "SECOND-DATA"]
async def test_show_form(manager):
"""Test that we can show a form."""
schema = vol.Schema({vol.Required("username"): str, vol.Required("password"): str})
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
async def async_step_init(self, user_input=None):
return self.async_show_form(
step_id="init",
data_schema=schema,
errors={"username": "Should be unique."},
)
form = await manager.async_init("test")
assert form["type"] == data_entry_flow.RESULT_TYPE_FORM
assert form["data_schema"] is schema
assert form["errors"] == {"username": "Should be unique."}
async def test_abort_removes_instance(manager):
"""Test that abort removes the flow from progress."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
is_new = True
async def async_step_init(self, user_input=None):
old = self.is_new
self.is_new = False
return self.async_abort(reason=str(old))
form = await manager.async_init("test")
assert form["reason"] == "True"
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 0
form = await manager.async_init("test")
assert form["reason"] == "True"
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 0
async def test_create_saves_data(manager):
"""Test creating a config entry."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
async def async_step_init(self, user_input=None):
return self.async_create_entry(title="Test Title", data="Test Data")
await manager.async_init("test")
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 1
entry = manager.mock_created_entries[0]
assert entry["version"] == 5
assert entry["handler"] == "test"
assert entry["title"] == "Test Title"
assert entry["data"] == "Test Data"
assert entry["source"] is None
async def test_discovery_init_flow(manager):
"""Test a flow initialized by discovery."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
async def async_step_init(self, info):
return self.async_create_entry(title=info["id"], data=info)
data = {"id": "hello", "token": "secret"}
await manager.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data=data
)
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 1
entry = manager.mock_created_entries[0]
assert entry["version"] == 5
assert entry["handler"] == "test"
assert entry["title"] == "hello"
assert entry["data"] == data
assert entry["source"] == config_entries.SOURCE_DISCOVERY
async def test_finish_callback_change_result_type(hass):
"""Test finish callback can change result type."""
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 1
async def async_step_init(self, input):
"""Return init form with one input field 'count'."""
if input is not None:
return self.async_create_entry(title="init", data=input)
return self.async_show_form(
step_id="init", data_schema=vol.Schema({"count": int})
)
class FlowManager(data_entry_flow.FlowManager):
async def async_create_flow(self, handler_name, *, context, data):
"""Create a test flow."""
return TestFlow()
async def async_finish_flow(self, flow, result):
"""Redirect to init form if count <= 1."""
if result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
if result["data"] is None or result["data"].get("count", 0) <= 1:
return flow.async_show_form(
step_id="init", data_schema=vol.Schema({"count": int})
)
else:
result["result"] = result["data"]["count"]
return result
manager = FlowManager(hass)
result = await manager.async_init("test")
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await manager.async_configure(result["flow_id"], {"count": 0})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
assert "result" not in result
result = await manager.async_configure(result["flow_id"], {"count": 2})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"] == 2
async def test_external_step(hass, manager):
"""Test external step logic."""
manager.hass = hass
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
data = None
async def async_step_init(self, user_input=None):
if not user_input:
return self.async_external_step(
step_id="init", url="https://example.com"
)
self.data = user_input
return self.async_external_step_done(next_step_id="finish")
async def async_step_finish(self, user_input=None):
return self.async_create_entry(title=self.data["title"], data=self.data)
events = async_capture_events(
hass, data_entry_flow.EVENT_DATA_ENTRY_FLOW_PROGRESSED
)
result = await manager.async_init("test")
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert len(manager.async_progress()) == 1
assert len(manager.async_progress_by_handler("test")) == 1
assert manager.async_get(result["flow_id"])["handler"] == "test"
# Mimic external step
# Called by integrations: `hass.config_entries.flow.async_configure(…)`
result = await manager.async_configure(result["flow_id"], {"title": "Hello"})
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP_DONE
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data == {
"handler": "test",
"flow_id": result["flow_id"],
"refresh": True,
}
# Frontend refreshses the flow
result = await manager.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Hello"
async def test_show_progress(hass, manager):
"""Test show progress logic."""
manager.hass = hass
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
data = None
task_one_done = False
async def async_step_init(self, user_input=None):
if not user_input:
if not self.task_one_done:
self.task_one_done = True
progress_action = "task_one"
else:
progress_action = "task_two"
return self.async_show_progress(
step_id="init",
progress_action=progress_action,
)
self.data = user_input
return self.async_show_progress_done(next_step_id="finish")
async def async_step_finish(self, user_input=None):
return self.async_create_entry(title=self.data["title"], data=self.data)
events = async_capture_events(
hass, data_entry_flow.EVENT_DATA_ENTRY_FLOW_PROGRESSED
)
result = await manager.async_init("test")
assert result["type"] == data_entry_flow.RESULT_TYPE_SHOW_PROGRESS
assert result["progress_action"] == "task_one"
assert len(manager.async_progress()) == 1
assert len(manager.async_progress_by_handler("test")) == 1
assert manager.async_get(result["flow_id"])["handler"] == "test"
# Mimic task one done and moving to task two
# Called by integrations: `hass.config_entries.flow.async_configure(…)`
result = await manager.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_SHOW_PROGRESS
assert result["progress_action"] == "task_two"
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data == {
"handler": "test",
"flow_id": result["flow_id"],
"refresh": True,
}
# Mimic task two done and continuing step
# Called by integrations: `hass.config_entries.flow.async_configure(…)`
result = await manager.async_configure(result["flow_id"], {"title": "Hello"})
assert result["type"] == data_entry_flow.RESULT_TYPE_SHOW_PROGRESS_DONE
await hass.async_block_till_done()
assert len(events) == 2
assert events[1].data == {
"handler": "test",
"flow_id": result["flow_id"],
"refresh": True,
}
# Frontend refreshes the flow
result = await manager.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Hello"
async def test_abort_flow_exception(manager):
"""Test that the AbortFlow exception works."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
async def async_step_init(self, user_input=None):
raise data_entry_flow.AbortFlow("mock-reason", {"placeholder": "yo"})
form = await manager.async_init("test")
assert form["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert form["reason"] == "mock-reason"
assert form["description_placeholders"] == {"placeholder": "yo"}
async def test_initializing_flows_canceled_on_shutdown(hass, manager):
"""Test that initializing flows are canceled on shutdown."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
async def async_step_init(self, user_input=None):
await asyncio.sleep(1)
task = asyncio.create_task(manager.async_init("test"))
await hass.async_block_till_done()
await manager.async_shutdown()
with pytest.raises(asyncio.exceptions.CancelledError):
await task
async def test_init_unknown_flow(manager):
"""Test that UnknownFlow is raised when async_create_flow returns None."""
with pytest.raises(data_entry_flow.UnknownFlow), patch.object(
manager, "async_create_flow", return_value=None
):
await manager.async_init("test")
async def test_async_get_unknown_flow(manager):
"""Test that UnknownFlow is raised when async_get is called with a flow_id that does not exist."""
with pytest.raises(data_entry_flow.UnknownFlow):
await manager.async_get("does_not_exist")
async def test_async_has_matching_flow(
hass: HomeAssistant, manager: data_entry_flow.FlowManager
):
"""Test we can check for matching flows."""
manager.hass = hass
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
async def async_step_init(self, user_input=None):
return self.async_show_progress(
step_id="init",
progress_action="task_one",
)
result = await manager.async_init(
"test",
context={"source": config_entries.SOURCE_HOMEKIT},
data={"properties": {"id": "aa:bb:cc:dd:ee:ff"}},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_SHOW_PROGRESS
assert result["progress_action"] == "task_one"
assert len(manager.async_progress()) == 1
assert len(manager.async_progress_by_handler("test")) == 1
assert manager.async_get(result["flow_id"])["handler"] == "test"
assert (
manager.async_has_matching_flow(
"test",
{"source": config_entries.SOURCE_HOMEKIT},
{"properties": {"id": "aa:bb:cc:dd:ee:ff"}},
)
is True
)
assert (
manager.async_has_matching_flow(
"test",
{"source": config_entries.SOURCE_SSDP},
{"properties": {"id": "aa:bb:cc:dd:ee:ff"}},
)
is False
)
assert (
manager.async_has_matching_flow(
"other",
{"source": config_entries.SOURCE_HOMEKIT},
{"properties": {"id": "aa:bb:cc:dd:ee:ff"}},
)
is False
)
async def test_move_to_unknown_step_raises_and_removes_from_in_progress(manager):
"""Test that moving to an unknown step raises and removes the flow from in progress."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 1
with pytest.raises(data_entry_flow.UnknownStep):
await manager.async_init("test", context={"init_step": "does_not_exist"})
assert manager.async_progress() == []
async def test_configure_raises_unknown_flow_if_not_in_progress(manager):
"""Test configure raises UnknownFlow if the flow is not in progress."""
with pytest.raises(data_entry_flow.UnknownFlow):
await manager.async_configure("wrong_flow_id")
async def test_abort_raises_unknown_flow_if_not_in_progress(manager):
"""Test abort raises UnknownFlow if the flow is not in progress."""
with pytest.raises(data_entry_flow.UnknownFlow):
await manager.async_abort("wrong_flow_id")
@pytest.mark.parametrize(
"menu_options",
(["target1", "target2"], {"target1": "Target 1", "target2": "Target 2"}),
)
async def test_show_menu(hass, manager, menu_options):
"""Test show menu."""
manager.hass = hass
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
data = None
task_one_done = False
async def async_step_init(self, user_input=None):
return self.async_show_menu(
step_id="init",
menu_options=menu_options,
description_placeholders={"name": "Paulus"},
)
async def async_step_target1(self, user_input=None):
return self.async_show_form(step_id="target1")
async def async_step_target2(self, user_input=None):
return self.async_show_form(step_id="target2")
result = await manager.async_init("test")
assert result["type"] == data_entry_flow.RESULT_TYPE_MENU
assert result["menu_options"] == menu_options
assert result["description_placeholders"] == {"name": "Paulus"}
assert len(manager.async_progress()) == 1
assert len(manager.async_progress_by_handler("test")) == 1
assert manager.async_get(result["flow_id"])["handler"] == "test"
# Mimic picking a step
result = await manager.async_configure(
result["flow_id"], {"next_step_id": "target1"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "target1"
|
{
"content_hash": "e5b42d919b5bc187e4ff9260c1f3dbde",
"timestamp": "",
"source": "github",
"line_count": 532,
"max_line_length": 102,
"avg_line_length": 34.87218045112782,
"alnum_prop": 0.6177231565329884,
"repo_name": "toddeye/home-assistant",
"id": "18d5469a1621cb644242c58e9171bafadcdc055c",
"size": "18558",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/test_data_entry_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""
==========================
kikola.contrib.basicsearch
==========================
Application to lightweight search over models, existed in your project.
Installation
============
1. Add ``kikola.contrib.basicsearch`` to your project's ``settings``
``INSTALLED_APPS`` var.
2. Set up ``SEARCH_MODELS`` var in your project's ``settings`` module. (see
default config for ``SEARCH_MODELS`` below_)
3. Include ``kikola.contrib.basicsearch.urls`` in your project's
``ROOT_URLCONF`` module::
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^search/', include('kikola.contrib.basicsearch.urls')),
)
4. Go to search url and enjoy :)
.. _below: `SEARCH_MODELS`_
Configuration
=============
You can customize ``basicsearch`` application by next setting vars
SEARCH_FORM
-----------
Full path to default ``SearchForm`` class.
By default uses ``kikola.contrib.basicsearch.forms.SearchForm`` class.
SEARCH_MODELS
-------------
**Required.** Sets up models for searching. For example to search over
Django's FlatPages use next config::
SEARCH_MODELS = {
# Use same format as ``app_label`` in serialized data
'flatpages.FlatPage': {
# Object description in search results
'description': '{{ obj.content|truncatewords_html:20 }}',
# Object fields to search
'fields': ('title', 'content'),
# Use fulltext search (use this only when
# ``settings.DATABASE_ENGINE == 'mysql'``)
'fulltext': False,
# Object link in search results (by default
# ``{{ obj.get_absolute_url }}`` used)
'link': '{% url flatpage obj.url %}',
# Priority. Useful when search not over one model. Objects with
# higher priority rendering first in search results.
'priority': 0,
# Object title in search results (by default ``{{ obj }}`` used)
'title': '{{ obj.title }}',
# Trigger. Custom filter to found search results. For example,
# current trigger enables search only over flatpages with
# ``enable_comments``.
#
# To disable trigger, set ``'trigger': None``
'trigger': lambda obj: obj.enable_comments,
}
}
SEARCH_NOT_FOUND_MESSAGE
------------------------
Default search "not found" message. By default: ``Any objects was found by
your query.``
SEARCH_QUERY_MIN_LENGTH
-----------------------
Minimal length of search query. By default: 3.
SEARCH_QUERY_MAX_LENGTH
-----------------------
Maximal length of search query. By default: 64.
SEARCH_RESULTS_PER_PAGE
-----------------------
Number of search results, rendering at search page. By default: 10.
SEARCH_TEMPLATE_NAME
--------------------
Template used for rendering search results. By default:
``basicsearch/search.html``.
"""
|
{
"content_hash": "3099e17863eafb53d6bc21a4f9972dd1",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 76,
"avg_line_length": 26.825688073394495,
"alnum_prop": 0.5889192886456909,
"repo_name": "playpauseandstop/kikola",
"id": "92bb88f6fbfe7cd73aea4a4f37ffe70f2c5b5bda",
"size": "2924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kikola/contrib/basicsearch/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "137012"
}
],
"symlink_target": ""
}
|
import commonware.log
import lib.iarc
import mkt
from mkt.translations.utils import find_language
log = commonware.log.getLogger('z.webapps')
def get_locale_properties(manifest, property, default_locale=None):
locale_dict = {}
for locale in manifest.get('locales', {}):
if property in manifest['locales'][locale]:
locale_dict[locale] = manifest['locales'][locale][property]
# Add in the default locale name.
default = manifest.get('default_locale') or default_locale
root_property = manifest.get(property)
if default and root_property:
locale_dict[default] = root_property
return locale_dict
def get_supported_locales(manifest):
"""
Returns a list of locales found in the "locales" property of the manifest.
This will convert locales found in the SHORTER_LANGUAGES setting to their
full locale. It will also remove locales not found in AMO_LANGUAGES.
Note: The default_locale is not included.
"""
return sorted(filter(None, map(find_language, set(
manifest.get('locales', {}).keys()))))
def dehydrate_content_rating(rating):
"""
{body.id, rating.id} to translated rating.label.
"""
try:
body = mkt.ratingsbodies.dehydrate_ratings_body(
mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])])
except TypeError:
# Legacy ES format (bug 943371).
return {}
rating = mkt.ratingsbodies.dehydrate_rating(
body.ratings[int(rating['rating'])])
return rating.label
def dehydrate_content_ratings(content_ratings):
"""Dehydrate an object of content ratings from rating IDs to dict."""
for body in content_ratings or {}:
# Dehydrate all content ratings.
content_ratings[body] = dehydrate_content_rating(content_ratings[body])
return content_ratings
def iarc_get_app_info(app):
client = lib.iarc.client.get_iarc_client('services')
iarc = app.iarc_info
iarc_id = iarc.submission_id
iarc_code = iarc.security_code
# Generate XML.
xml = lib.iarc.utils.render_xml(
'get_app_info.xml',
{'submission_id': iarc_id, 'security_code': iarc_code})
# Process that shizzle.
resp = client.Get_App_Info(XMLString=xml)
# Handle response.
return lib.iarc.utils.IARC_XML_Parser().parse_string(resp)
|
{
"content_hash": "60e8ae72cdb3220f38cbc1c7fe72aefe",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 29.2125,
"alnum_prop": 0.6739409499358151,
"repo_name": "ngokevin/zamboni",
"id": "3f436a1e50f55b15ec0b1601205452d135a04fd9",
"size": "2361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/webapps/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "356777"
},
{
"name": "JavaScript",
"bytes": "536388"
},
{
"name": "Python",
"bytes": "3883015"
},
{
"name": "Shell",
"bytes": "13597"
}
],
"symlink_target": ""
}
|
"""Tests for the jewish_calendar component."""
from collections import namedtuple
from contextlib import contextmanager
from datetime import datetime
from unittest.mock import patch
from homeassistant.components import jewish_calendar
import homeassistant.util.dt as dt_util
_LatLng = namedtuple("_LatLng", ["lat", "lng"])
HDATE_DEFAULT_ALTITUDE = 754
NYC_LATLNG = _LatLng(40.7128, -74.0060)
JERUSALEM_LATLNG = _LatLng(31.778, 35.235)
ORIG_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
def teardown_module():
"""Reset time zone."""
dt_util.set_default_time_zone(ORIG_TIME_ZONE)
def make_nyc_test_params(dtime, results, havdalah_offset=0):
"""Make test params for NYC."""
if isinstance(results, dict):
time_zone = dt_util.get_time_zone("America/New_York")
results = {
key: time_zone.localize(value) if isinstance(value, datetime) else value
for key, value in results.items()
}
return (
dtime,
jewish_calendar.CANDLE_LIGHT_DEFAULT,
havdalah_offset,
True,
"America/New_York",
NYC_LATLNG.lat,
NYC_LATLNG.lng,
results,
)
def make_jerusalem_test_params(dtime, results, havdalah_offset=0):
"""Make test params for Jerusalem."""
if isinstance(results, dict):
time_zone = dt_util.get_time_zone("Asia/Jerusalem")
results = {
key: time_zone.localize(value) if isinstance(value, datetime) else value
for key, value in results.items()
}
return (
dtime,
jewish_calendar.CANDLE_LIGHT_DEFAULT,
havdalah_offset,
False,
"Asia/Jerusalem",
JERUSALEM_LATLNG.lat,
JERUSALEM_LATLNG.lng,
results,
)
@contextmanager
def alter_time(local_time):
"""Manage multiple time mocks."""
utc_time = dt_util.as_utc(local_time)
patch1 = patch("homeassistant.util.dt.utcnow", return_value=utc_time)
patch2 = patch("homeassistant.util.dt.now", return_value=local_time)
with patch1, patch2:
yield
|
{
"content_hash": "dc668534a060bafe435a6dc493aa3c62",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 84,
"avg_line_length": 28.52777777777778,
"alnum_prop": 0.6455696202531646,
"repo_name": "adrienbrault/home-assistant",
"id": "2d42458cf1b437ea674757ec884d394c038e2225",
"size": "2054",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/jewish_calendar/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name = "Daily Notices for Moodle",
packages = ['cli', 'notices', 'gns'],
version = "1.0",
description = "",
author = "Adam Morris",
author_email = "classroomtechtools.ctt@gmail.com",
keywords = ["moodle"],
install_requires = ['click', 'sqlalchemy', 'psycopg2'],
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
],
entry_points='''
[console_scripts]
notices=cli.main:notices
''',
long_description = """\
TODO: DESCRIBE THIS!
This version requires Python 3 or later.
"""
)
|
{
"content_hash": "a7c991e7702bd83ae162f7771bbfe7c0",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 89,
"avg_line_length": 30.75,
"alnum_prop": 0.5981416957026713,
"repo_name": "classroomtechtools/moodle_daily_notices",
"id": "b2177a4e5643ee93b8fc178b68ae8ce5dde8f815",
"size": "861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "353500"
}
],
"symlink_target": ""
}
|
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
# from nose import with_setup
# -*- coding: utf-8 -*-
from ..data_structures.sarray import SArray
import pandas as pd
import numpy as np
import unittest
import random
import copy
import os
import math
import array
import time
import itertools
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
class SArraySketchTest(unittest.TestCase):
def __validate_sketch_result(self, sketch, sa, delta = 1E-7):
df = pd.DataFrame(list(sa.dropna()))
pds = pd.Series(list(sa.dropna()))
if (sa.dtype() == int or sa.dtype() == float):
if (len(sa) == 0):
self.assertTrue(math.isnan(sketch.min()))
self.assertTrue(math.isnan(sketch.min()))
self.assertEquals(sketch.sum(), 0.0)
self.assertEquals(sketch.mean(), 0.0)
self.assertEquals(sketch.var(), 0.0)
self.assertEquals(sketch.std(), 0.0)
else:
self.assertEquals(sketch.min(), sa.min())
self.assertEquals(sketch.max(), sa.max())
self.assertEquals(sketch.sum(), sa.sum())
self.assertAlmostEqual(sketch.mean(), sa.dropna().mean(), delta=delta)
self.assertAlmostEqual(sketch.var(), sa.dropna().var(), delta=delta)
self.assertAlmostEqual(sketch.std(), sa.dropna().std(), delta=delta)
self.assertAlmostEqual(sketch.quantile(0.5), df.quantile(0.5)[0], delta=1)
self.assertEqual(sketch.quantile(0), df.quantile(0)[0])
self.assertEqual(sketch.quantile(1), df.quantile(1)[0])
self.assertEqual(sketch.frequent_items(), SArray(pds).sketch_summary().frequent_items())
for item in pds.value_counts().index:
self.assertEqual(sketch.frequency_count(item), pds.value_counts()[item])
self.assertAlmostEqual(sketch.num_unique(), len(sa.unique()), delta=3)
else:
with self.assertRaises(RuntimeError):
sketch.quantile((0.5))
self.assertEqual(sketch.num_undefined(), sa.num_missing())
self.assertEqual(sketch.size(), len(sa))
self.assertEqual(sketch.sketch_ready(), True)
self.assertEqual(sketch.num_elements_processed(), sketch.size())
def __validate_nested_sketch_result(self, sa):
sketch = sa.sketch_summary()
self.__validate_sketch_result(sketch, sa)
# element length summary
t = sketch.element_length_summary()
len_sa = sa.dropna().item_length()
self.__validate_sketch_result(t, len_sa)
def test_sketch_int(self):
int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, None]
sa = SArray(data=int_data)
self.__validate_sketch_result(sa.sketch_summary(), sa)
def test_sketch_float(self):
int_data = [1.2, 3,.4, 6.789, None]
sa = SArray(data=int_data)
self.__validate_sketch_result(sa.sketch_summary(), sa)
def test_vector_sketch(self):
vector_data = [[], [1,2], [3], [4,5,6,7], [8,9,10], None]
sa = SArray(data=vector_data)
sketch = sa.sketch_summary()
self.__validate_sketch_result(sketch, sa)
self.__validate_sketch_result(sketch.element_length_summary(), sa.dropna().item_length())
flattened = list(itertools.chain.from_iterable(list(sa.dropna())))
self.__validate_sketch_result(sketch.element_summary(), SArray(flattened))
fi = sketch.frequent_items()
self.assertEqual(len(fi), 5)
self.assertEqual((fi['[1 2]']), 1)
self.assertEqual((fi['[4 5 6 7]']), 1)
# sub sketch with one key
s = sa.sketch_summary(sub_sketch_keys = 1).element_sub_sketch(1)
expected = sa.vector_slice(1)
self.__validate_sketch_result(s, expected)
# sub sketch with multiple keys
keys = [1,3]
s = sa.sketch_summary(sub_sketch_keys = keys).element_sub_sketch(keys)
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(key in s)
expected = sa.vector_slice(key)
self.__validate_sketch_result(s[key], expected)
indexes = range(0,10)
s = sa.sketch_summary(sub_sketch_keys = indexes).element_sub_sketch()
self.assertEqual(len(s), len(indexes))
def test_list_sketch(self):
list_data = [[], [1,2],[1,2], ['a', 'a', 'a', 'b'], [ 1 ,1 , 2], None]
sa = SArray(list_data)
self.__validate_nested_sketch_result(sa)
sketch = sa.sketch_summary()
self.assertEqual(sketch.num_unique(), 4)
element_summary = sketch.element_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dropna())))
self.__validate_sketch_result(element_summary, SArray(another_rep, str))
fi = sketch.frequent_items()
self.assertEqual(len(fi), 4)
self.assertEqual((fi['[1,2]']), 2)
self.assertEqual((fi['["a","a","a","b"]']), 1)
def test_dict_sketch_int_value(self):
dict_data = [{}, {'a':1, 'b':2}, {'a':1, 'b':2}, {'a':3, 'c':1}, {'a': 1, 'b': 2, 'c': 3}, None]
sa = SArray(data=dict_data)
self.__validate_nested_sketch_result(sa)
sketch = sa.sketch_summary()
self.assertEqual(sketch.num_unique(), 4)
fi = sketch.frequent_items()
self.assertEqual(len(fi), 4)
# The order in which keys are reported is different in python2 vs python3.
# So when the dictionary is converted to a string, it results in different
# strings. Try both possible combinations for dictionary.
v = fi['{"a":1, "b":2}'] if '{"a":1, "b":2}' in fi else fi['{"b":2, "a":1}']
self.assertEqual(v, 2)
v = fi['{"a":3, "c":1}'] if '{"a":3, "c":1}' in fi else fi['{"c":1, "a":3}']
self.assertEqual(v, 1)
# Get dict key sketch
key_summary = sketch.dict_key_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_keys().dropna())))
self.__validate_sketch_result(key_summary, SArray(another_rep))
# Get dict value sketch
value_summary = sketch.dict_value_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_values().dropna())))
self.__validate_sketch_result(value_summary, SArray(another_rep))
# sub sketch with one key
s = sa.sketch_summary(sub_sketch_keys ='a').element_sub_sketch('a')
expected = sa.unpack(column_name_prefix="")['a']
self.__validate_sketch_result(s, expected)
s = sa.sketch_summary(sub_sketch_keys ='Nonexist').element_sub_sketch('Nonexist')
self.assertEqual(s.num_undefined(), len(sa))
# sub sketch with multiple keys
keys = ['a', 'b']
s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch(keys)
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(key in s)
expected = sa.unpack(column_name_prefix="")[key]
self.__validate_sketch_result(s[key], expected)
def test_dict_sketch_str_value(self):
# Dict value sketch type should be auto inferred
dict_data = [{'a':'b', 'b':'c'}, {'a':'b', 'b':'c'}, {'a':'d', 'b':'4'}, None]
sa = SArray(data=dict_data)
self.__validate_nested_sketch_result(sa)
sketch = sa.sketch_summary()
fi = sketch.frequent_items()
self.assertEqual(len(fi), 2)
# The order in which keys are reported is different in python2 vs python3.
# So when the dictionary is converted to a string, it results in different
# strings. Try both possible combinations for dictionary.
v = fi['{"b":"c", "a":"b"}'] if '{"b":"c", "a":"b"}' in fi else fi['{"a":"b", "b":"c"}']
self.assertEqual(v, 2)
v = fi['{"a":"d", "b":"4"}'] if '{"a":"d", "b":"4"}' in fi else fi['{"b":"4", "a":"d"}']
self.assertEqual(v, 1)
# Get dict key sketch
key_summary = sketch.dict_key_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_keys().dropna())))
self.__validate_sketch_result(key_summary, SArray(another_rep))
# Get dict value sketch
value_summary = sketch.dict_value_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_values().dropna())))
self.__validate_sketch_result(value_summary, SArray(another_rep))
# sub sketch with one key
s = sa.sketch_summary(sub_sketch_keys ='a').element_sub_sketch('a')
expected = sa.unpack(column_name_prefix="")['a']
self.__validate_sketch_result(s, expected)
s = sa.sketch_summary(sub_sketch_keys ='Nonexist').element_sub_sketch('Nonexist')
self.assertEqual(s.num_undefined(), len(sa))
# sub sketch with multiple keys
keys = ['a', 'b']
s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch(keys)
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(key in s)
expected = sa.unpack(column_name_prefix="")[key]
self.__validate_sketch_result(s[key], expected)
# allow pass in empty keys, which will retrieve all keys
s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch()
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(key in s)
expected = sa.unpack(column_name_prefix="")[key]
self.__validate_sketch_result(s[key], expected)
def test_dict_many_nones(self):
sa = SArray([None] * 200 + [{'a':'b'}])
self.assertEqual(sa.sketch_summary().num_elements_processed(), 201)
def test_str_sketch(self):
str_data = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", None]
sa = SArray(data=str_data)
sketch = sa.sketch_summary()
with self.assertRaises(RuntimeError):
sketch.min()
with self.assertRaises(RuntimeError):
sketch.max()
with self.assertRaises(RuntimeError):
sketch.sum()
with self.assertRaises(RuntimeError):
sketch.mean()
with self.assertRaises(RuntimeError):
sketch.var()
with self.assertRaises(RuntimeError):
sketch.std()
self.assertAlmostEqual(sketch.num_unique(), 10, delta=3)
self.assertEqual(sketch.num_undefined(), 1)
self.assertEqual(sketch.size(), len(str_data))
with self.assertRaises(RuntimeError):
sketch.quantile(0.5)
self.assertEqual(sketch.frequency_count("1"), 1)
self.assertEqual(sketch.frequency_count("2"), 1)
t = sketch.frequent_items()
self.assertEqual(len(t), 10)
def test_empty_sketch(self):
int_data = []
sa = SArray(data=int_data)
sketch = sa.sketch_summary()
self.assertTrue(math.isnan(sketch.min()))
self.assertTrue(math.isnan(sketch.max()))
self.assertEquals(sketch.sum(), 0)
self.assertEqual(sketch.mean(), 0)
self.assertEqual(sketch.var(), 0)
self.assertEqual(sketch.std(), 0)
self.assertEqual(sketch.num_unique(), 0)
self.assertEqual(sketch.num_undefined(),0)
self.assertEqual(sketch.size(), 0)
with self.assertRaises(RuntimeError):
sketch.quantile(0.5)
t = sketch.frequent_items()
self.assertEqual(len(t), 0)
def test_large_value_sketch(self):
sa = SArray([1234567890 for i in range(100)])
sk = sa.sketch_summary()
self.__validate_sketch_result(sa.sketch_summary(), sa, 1E-5)
def test_cancelation(self):
sa = SArray(range(1,10000))
s = sa.sketch_summary(background=True)
s.cancel()
# this can be rather non-deterministic, so there is very little
# real output validation that can be done...
|
{
"content_hash": "df53a6eae2f944a3073e20e28606167e",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 104,
"avg_line_length": 41.491467576791806,
"alnum_prop": 0.5866578925721806,
"repo_name": "haijieg/SFrame",
"id": "750453f342edde055a3056df4ff1e2135ba73ee7",
"size": "12157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oss_src/unity/python/sframe/test/test_sarray_sketch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "169684"
},
{
"name": "C++",
"bytes": "12042105"
},
{
"name": "CMake",
"bytes": "104454"
},
{
"name": "CSS",
"bytes": "127000"
},
{
"name": "HTML",
"bytes": "24575"
},
{
"name": "Hack",
"bytes": "277"
},
{
"name": "JavaScript",
"bytes": "20909"
},
{
"name": "Makefile",
"bytes": "9614"
},
{
"name": "Perl",
"bytes": "9663"
},
{
"name": "Python",
"bytes": "2160225"
},
{
"name": "R",
"bytes": "537"
},
{
"name": "Scala",
"bytes": "5232"
},
{
"name": "Shell",
"bytes": "51745"
},
{
"name": "Smarty",
"bytes": "966"
},
{
"name": "XSLT",
"bytes": "74068"
}
],
"symlink_target": ""
}
|
import sys
import subprocess
import time
import start_craq_server
import start_craq_router
def main(args):
zookeeper_node = args[1]
zookeeper_addr = args[2]
craq_nodes = args[3:]
print "Starting zookeeper"
subprocess.Popen(['./cluster/zookeeper/start_zookeeper.sh', zookeeper_node])
print "Finished starting zookeeper"
time.sleep(45)
print "Starting Craqs"
start_craq_server.main(zookeeper_addr, craq_nodes)
print "Finished starting Craqs"
time.sleep(45)
print "Starting Router"
start_craq_router.main(zookeeper_addr)
print "Finished starting Router"
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
{
"content_hash": "f10f7823431237490211638da4910aa6",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 84,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.6573426573426573,
"repo_name": "sirikata/sirikata",
"id": "cbf3f1c724e56999a6d283dd0528dee883ae80a1",
"size": "734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/cluster/craq/restart_craq_local.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "402297"
},
{
"name": "C++",
"bytes": "13009496"
},
{
"name": "CMake",
"bytes": "287559"
},
{
"name": "CSS",
"bytes": "4961"
},
{
"name": "EmberScript",
"bytes": "951536"
},
{
"name": "GAP",
"bytes": "83739"
},
{
"name": "HTML",
"bytes": "7887"
},
{
"name": "JavaScript",
"bytes": "328077"
},
{
"name": "Makefile",
"bytes": "3874"
},
{
"name": "PHP",
"bytes": "5259"
},
{
"name": "Perl",
"bytes": "503"
},
{
"name": "Protocol Buffer",
"bytes": "2030"
},
{
"name": "Python",
"bytes": "251439"
},
{
"name": "Shell",
"bytes": "12168"
}
],
"symlink_target": ""
}
|
"""Generated message classes for dataproc version v1.
Manages Hadoop-based clusters and jobs on Google Cloud Platform.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'dataproc'
class CancelJobRequest(_messages.Message):
"""A request to cancel a job."""
class Cluster(_messages.Message):
"""Describes the identifying information, config, and status of a cluster of
Google Compute Engine instances.
Messages:
LabelsValue: Optional The labels to associate with this cluster. Label
keys must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
be associated with a cluster.
Fields:
clusterName: Required The cluster name. Cluster names within a project
must be unique. Names of deleted clusters can be reused.
clusterUuid: Output-only A cluster UUID (Unique Universal Identifier).
Cloud Dataproc generates this value when it creates the cluster.
config: Required The cluster config. Note that Cloud Dataproc may set
default values, and values may change when clusters are updated.
labels: Optional The labels to associate with this cluster. Label keys
must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
be associated with a cluster.
metrics: Contains cluster daemon metrics such as HDFS and YARN stats.Beta
Feature: This report is available for testing purposes only. It may be
changed before final release.
projectId: Required The Google Cloud Platform project ID that the cluster
belongs to.
status: Output-only Cluster status.
statusHistory: Output-only The previous cluster status.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""Optional The labels to associate with this cluster. Label keys must
contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if
present, must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
associated with a cluster.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterName = _messages.StringField(1)
clusterUuid = _messages.StringField(2)
config = _messages.MessageField('ClusterConfig', 3)
labels = _messages.MessageField('LabelsValue', 4)
metrics = _messages.MessageField('ClusterMetrics', 5)
projectId = _messages.StringField(6)
status = _messages.MessageField('ClusterStatus', 7)
statusHistory = _messages.MessageField('ClusterStatus', 8, repeated=True)
class ClusterConfig(_messages.Message):
"""The cluster config.
Fields:
configBucket: Optional A Google Cloud Storage staging bucket used for
sharing generated SSH keys and config. If you do not specify a staging
bucket, Cloud Dataproc will determine an appropriate Cloud Storage
location (US, ASIA, or EU) for your cluster's staging bucket according
to the Google Compute Engine zone where your cluster is deployed, and
then it will create and manage this project-level, per-location bucket
for you.
gceClusterConfig: Required The shared Google Compute Engine config
settings for all instances in a cluster.
initializationActions: Optional Commands to execute on each node after
config is completed. By default, executables are run on main and all
worker nodes. You can test a node's <code>role</code> metadata to run an
executable on a main or worker node, as shown below using curl (you
can also use wget): ROLE=$(curl -H Metadata-Flavor:Google
http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if
[[ "${ROLE}" == 'Main' ]]; then ... main specific actions ... else
... worker specific actions ... fi
mainConfig: Optional The Google Compute Engine config settings for the
main instance in a cluster.
secondaryWorkerConfig: Optional The Google Compute Engine config settings
for additional worker instances in a cluster.
softwareConfig: Optional The config settings for software inside the
cluster.
workerConfig: Optional The Google Compute Engine config settings for
worker instances in a cluster.
"""
configBucket = _messages.StringField(1)
gceClusterConfig = _messages.MessageField('GceClusterConfig', 2)
initializationActions = _messages.MessageField('NodeInitializationAction', 3, repeated=True)
mainConfig = _messages.MessageField('InstanceGroupConfig', 4)
secondaryWorkerConfig = _messages.MessageField('InstanceGroupConfig', 5)
softwareConfig = _messages.MessageField('SoftwareConfig', 6)
workerConfig = _messages.MessageField('InstanceGroupConfig', 7)
class ClusterMetrics(_messages.Message):
"""Contains cluster daemon metrics, such as HDFS and YARN stats.Beta
Feature: This report is available for testing purposes only. It may be
changed before final release.
Messages:
HdfsMetricsValue: The HDFS metrics.
YarnMetricsValue: The YARN metrics.
Fields:
hdfsMetrics: The HDFS metrics.
yarnMetrics: The YARN metrics.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class HdfsMetricsValue(_messages.Message):
"""The HDFS metrics.
Messages:
AdditionalProperty: An additional property for a HdfsMetricsValue
object.
Fields:
additionalProperties: Additional properties of type HdfsMetricsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a HdfsMetricsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.IntegerField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class YarnMetricsValue(_messages.Message):
"""The YARN metrics.
Messages:
AdditionalProperty: An additional property for a YarnMetricsValue
object.
Fields:
additionalProperties: Additional properties of type YarnMetricsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a YarnMetricsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.IntegerField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
hdfsMetrics = _messages.MessageField('HdfsMetricsValue', 1)
yarnMetrics = _messages.MessageField('YarnMetricsValue', 2)
class ClusterOperationMetadata(_messages.Message):
"""Metadata describing the operation.
Messages:
LabelsValue: Output-only labels associated with the operation
Fields:
clusterName: Output-only Name of the cluster for the operation.
clusterUuid: Output-only Cluster UUID for the operation.
description: Output-only Short description of operation.
labels: Output-only labels associated with the operation
operationType: Output-only The operation type.
status: Output-only Current operation status.
statusHistory: Output-only The previous operation status.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""Output-only labels associated with the operation
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterName = _messages.StringField(1)
clusterUuid = _messages.StringField(2)
description = _messages.StringField(3)
labels = _messages.MessageField('LabelsValue', 4)
operationType = _messages.StringField(5)
status = _messages.MessageField('ClusterOperationStatus', 6)
statusHistory = _messages.MessageField('ClusterOperationStatus', 7, repeated=True)
class ClusterOperationStatus(_messages.Message):
"""The status of the operation.
Enums:
StateValueValuesEnum: Output-only A message containing the operation
state.
Fields:
details: Output-onlyA message containing any operation metadata details.
innerState: Output-only A message containing the detailed operation state.
state: Output-only A message containing the operation state.
stateStartTime: Output-only The time this state was entered.
"""
class StateValueValuesEnum(_messages.Enum):
"""Output-only A message containing the operation state.
Values:
UNKNOWN: Unused.
PENDING: The operation has been created.
RUNNING: The operation is running.
DONE: The operation is done; either cancelled or completed.
"""
UNKNOWN = 0
PENDING = 1
RUNNING = 2
DONE = 3
details = _messages.StringField(1)
innerState = _messages.StringField(2)
state = _messages.EnumField('StateValueValuesEnum', 3)
stateStartTime = _messages.StringField(4)
class ClusterStatus(_messages.Message):
"""The status of a cluster and its instances.
Enums:
StateValueValuesEnum: Output-only The cluster's state.
Fields:
detail: Output-only Optional details of cluster's state.
state: Output-only The cluster's state.
stateStartTime: Output-only Time when this state was entered.
"""
class StateValueValuesEnum(_messages.Enum):
"""Output-only The cluster's state.
Values:
UNKNOWN: The cluster state is unknown.
CREATING: The cluster is being created and set up. It is not ready for
use.
RUNNING: The cluster is currently running and healthy. It is ready for
use.
ERROR: The cluster encountered an error. It is not ready for use.
DELETING: The cluster is being deleted. It cannot be used.
UPDATING: The cluster is being updated. It continues to accept and
process jobs.
"""
UNKNOWN = 0
CREATING = 1
RUNNING = 2
ERROR = 3
DELETING = 4
UPDATING = 5
detail = _messages.StringField(1)
state = _messages.EnumField('StateValueValuesEnum', 2)
stateStartTime = _messages.StringField(3)
class DataprocProjectsRegionsClustersCreateRequest(_messages.Message):
"""A DataprocProjectsRegionsClustersCreateRequest object.
Fields:
cluster: A Cluster resource to be passed as the request body.
projectId: Required The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required The Cloud Dataproc region in which to handle the request.
"""
cluster = _messages.MessageField('Cluster', 1)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
class DataprocProjectsRegionsClustersDeleteRequest(_messages.Message):
"""A DataprocProjectsRegionsClustersDeleteRequest object.
Fields:
clusterName: Required The cluster name.
projectId: Required The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required The Cloud Dataproc region in which to handle the request.
"""
clusterName = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
class DataprocProjectsRegionsClustersDiagnoseRequest(_messages.Message):
"""A DataprocProjectsRegionsClustersDiagnoseRequest object.
Fields:
clusterName: Required The cluster name.
diagnoseClusterRequest: A DiagnoseClusterRequest resource to be passed as
the request body.
projectId: Required The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required The Cloud Dataproc region in which to handle the request.
"""
clusterName = _messages.StringField(1, required=True)
diagnoseClusterRequest = _messages.MessageField('DiagnoseClusterRequest', 2)
projectId = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
class DataprocProjectsRegionsClustersGetRequest(_messages.Message):
"""A DataprocProjectsRegionsClustersGetRequest object.
Fields:
clusterName: Required The cluster name.
projectId: Required The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required The Cloud Dataproc region in which to handle the request.
"""
clusterName = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
class DataprocProjectsRegionsClustersListRequest(_messages.Message):
"""A DataprocProjectsRegionsClustersListRequest object.
Fields:
filter: Optional A filter constraining the clusters to list. Filters are
case-sensitive and have the following syntax:field = value AND field =
value ...where field is one of status.state, clusterName, or
labels.[KEY], and [KEY] is a label key. value can be * to match all
values. status.state can be one of the following: ACTIVE, INACTIVE,
CREATING, RUNNING, ERROR, DELETING, or UPDATING. ACTIVE contains the
CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING
and ERROR states. clusterName is the name of the cluster provided at
creation time. Only the logical AND operator is supported; space-
separated items are treated as having an implicit AND operator.Example
filter:status.state = ACTIVE AND clusterName = mycluster AND labels.env
= staging AND labels.starred = *
pageSize: Optional The standard List page size.
pageToken: Optional The standard List page token.
projectId: Required The ID of the Google Cloud Platform project that the
cluster belongs to.
region: Required The Cloud Dataproc region in which to handle the request.
"""
filter = _messages.StringField(1)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
region = _messages.StringField(5, required=True)
class DataprocProjectsRegionsClustersPatchRequest(_messages.Message):
"""A DataprocProjectsRegionsClustersPatchRequest object.
Fields:
cluster: A Cluster resource to be passed as the request body.
clusterName: Required The cluster name.
projectId: Required The ID of the Google Cloud Platform project the
cluster belongs to.
region: Required The Cloud Dataproc region in which to handle the request.
updateMask: Required Specifies the path, relative to <code>Cluster</code>,
of the field to update. For example, to change the number of workers in
a cluster to 5, the <code>update_mask</code> parameter would be
specified as <code>config.worker_config.num_instances</code>, and the
PATCH request body would specify the new value, as follows: {
"config":{ "workerConfig":{ "numInstances":"5" } } }
Similarly, to change the number of preemptible workers in a cluster to
5, the <code>update_mask</code> parameter would be
<code>config.secondary_worker_config.num_instances</code>, and the PATCH
request body would be set as follows: { "config":{
"secondaryWorkerConfig":{ "numInstances":"5" } } }
<strong>Note:</strong> Currently,
<code>config.worker_config.num_instances</code> and
<code>config.secondary_worker_config.num_instances</code> are the only
fields that can be updated.
"""
cluster = _messages.MessageField('Cluster', 1)
clusterName = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
updateMask = _messages.StringField(5)
class DataprocProjectsRegionsJobsCancelRequest(_messages.Message):
"""A DataprocProjectsRegionsJobsCancelRequest object.
Fields:
cancelJobRequest: A CancelJobRequest resource to be passed as the request
body.
jobId: Required The job ID.
projectId: Required The ID of the Google Cloud Platform project that the
job belongs to.
region: Required The Cloud Dataproc region in which to handle the request.
"""
cancelJobRequest = _messages.MessageField('CancelJobRequest', 1)
jobId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
class DataprocProjectsRegionsJobsDeleteRequest(_messages.Message):
"""A DataprocProjectsRegionsJobsDeleteRequest object.
Fields:
jobId: Required The job ID.
projectId: Required The ID of the Google Cloud Platform project that the
job belongs to.
region: Required The Cloud Dataproc region in which to handle the request.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
class DataprocProjectsRegionsJobsGetRequest(_messages.Message):
"""A DataprocProjectsRegionsJobsGetRequest object.
Fields:
jobId: Required The job ID.
projectId: Required The ID of the Google Cloud Platform project that the
job belongs to.
region: Required The Cloud Dataproc region in which to handle the request.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
region = _messages.StringField(3, required=True)
class DataprocProjectsRegionsJobsListRequest(_messages.Message):
"""A DataprocProjectsRegionsJobsListRequest object.
Enums:
JobStateMatcherValueValuesEnum: Optional Specifies enumerated categories
of jobs to list (default = match ALL jobs).
Fields:
clusterName: Optional If set, the returned jobs list includes only jobs
that were submitted to the named cluster.
filter: Optional A filter constraining the jobs to list. Filters are case-
sensitive and have the following syntax:field = value AND field = value
...where field is status.state or labels.[KEY], and [KEY] is a label
key. value can be * to match all values. status.state can be either
ACTIVE or INACTIVE. Only the logical AND operator is supported; space-
separated items are treated as having an implicit AND operator.Example
filter:status.state = ACTIVE AND labels.env = staging AND labels.starred
= *
jobStateMatcher: Optional Specifies enumerated categories of jobs to list
(default = match ALL jobs).
pageSize: Optional The number of results to return in each response.
pageToken: Optional The page token, returned by a previous call, to
request the next page of results.
projectId: Required The ID of the Google Cloud Platform project that the
job belongs to.
region: Required The Cloud Dataproc region in which to handle the request.
"""
class JobStateMatcherValueValuesEnum(_messages.Enum):
"""Optional Specifies enumerated categories of jobs to list (default =
match ALL jobs).
Values:
ALL: <no description>
ACTIVE: <no description>
NON_ACTIVE: <no description>
"""
ALL = 0
ACTIVE = 1
NON_ACTIVE = 2
clusterName = _messages.StringField(1)
filter = _messages.StringField(2)
jobStateMatcher = _messages.EnumField('JobStateMatcherValueValuesEnum', 3)
pageSize = _messages.IntegerField(4, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(5)
projectId = _messages.StringField(6, required=True)
region = _messages.StringField(7, required=True)
class DataprocProjectsRegionsJobsPatchRequest(_messages.Message):
"""A DataprocProjectsRegionsJobsPatchRequest object.
Fields:
job: A Job resource to be passed as the request body.
jobId: Required The job ID.
projectId: Required The ID of the Google Cloud Platform project that the
job belongs to.
region: Required The Cloud Dataproc region in which to handle the request.
updateMask: Required Specifies the path, relative to <code>Job</code>, of
the field to update. For example, to update the labels of a Job the
<code>update_mask</code> parameter would be specified as
<code>labels</code>, and the PATCH request body would specify the new
value. <strong>Note:</strong> Currently, <code>labels</code> is the only
field that can be updated.
"""
job = _messages.MessageField('Job', 1)
jobId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
updateMask = _messages.StringField(5)
class DataprocProjectsRegionsJobsSubmitRequest(_messages.Message):
"""A DataprocProjectsRegionsJobsSubmitRequest object.
Fields:
projectId: Required The ID of the Google Cloud Platform project that the
job belongs to.
region: Required The Cloud Dataproc region in which to handle the request.
submitJobRequest: A SubmitJobRequest resource to be passed as the request
body.
"""
projectId = _messages.StringField(1, required=True)
region = _messages.StringField(2, required=True)
submitJobRequest = _messages.MessageField('SubmitJobRequest', 3)
class DataprocProjectsRegionsOperationsCancelRequest(_messages.Message):
"""A DataprocProjectsRegionsOperationsCancelRequest object.
Fields:
name: The name of the operation resource to be cancelled.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsDeleteRequest(_messages.Message):
"""A DataprocProjectsRegionsOperationsDeleteRequest object.
Fields:
name: The name of the operation resource to be deleted.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsGetRequest(_messages.Message):
"""A DataprocProjectsRegionsOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class DataprocProjectsRegionsOperationsListRequest(_messages.Message):
"""A DataprocProjectsRegionsOperationsListRequest object.
Fields:
filter: The standard list filter.
name: The name of the operation collection.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class DiagnoseClusterOutputLocation(_messages.Message):
"""The location where output from diagnostic command can be found.
Fields:
outputUri: Output-only The Google Cloud Storage URI of the diagnostic
output. This will be a plain text file with summary of collected
diagnostics.
"""
outputUri = _messages.StringField(1)
class DiagnoseClusterRequest(_messages.Message):
"""A request to collect cluster diagnostic information."""
class DiagnoseClusterResults(_messages.Message):
"""The location of diagnostic output.
Fields:
outputUri: Output-only The Google Cloud Storage URI of the diagnostic
output. The output report is a plain text file with a summary of
collected diagnostics.
"""
outputUri = _messages.StringField(1)
class DiskConfig(_messages.Message):
"""Specifies the config of disk options for a group of VM instances.
Fields:
bootDiskSizeGb: Optional Size in GB of the boot disk (default is 500GB).
numLocalSsds: Optional Number of attached SSDs, from 0 to 4 (default is
0). If SSDs are not attached, the boot disk is used to store runtime
logs and HDFS
(https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If
one or more SSDs are attached, this runtime bulk data is spread across
them, and the boot disk contains only basic config and installed
binaries.
"""
bootDiskSizeGb = _messages.IntegerField(1, variant=_messages.Variant.INT32)
numLocalSsds = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class Empty(_messages.Message):
"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON
representation for Empty is empty JSON object {}.
"""
class GceClusterConfig(_messages.Message):
"""Common config settings for resources of Google Compute Engine cluster
instances, applicable to all instances in the cluster.
Messages:
MetadataValue: The Google Compute Engine metadata entries to add to all
instances (see Project and instance metadata
(https://cloud.google.com/compute/docs/storing-retrieving-
metadata#project_and_instance_metadata)).
Fields:
internalIpOnly: Optional If true, all instances in the cluster will only
have internal IP addresses. By default, clusters are not restricted to
internal IP addresses, and will have ephemeral external IP addresses
assigned to each instance. This internal_ip_only restriction can only be
enabled for subnetwork enabled networks, and all off-cluster
dependencies must be configured to be accessible without external IP
addresses.
metadata: The Google Compute Engine metadata entries to add to all
instances (see Project and instance metadata
(https://cloud.google.com/compute/docs/storing-retrieving-
metadata#project_and_instance_metadata)).
networkUri: Optional The Google Compute Engine network to be used for
machine communications. Cannot be specified with subnetwork_uri. If
neither network_uri nor subnetwork_uri is specified, the "default"
network of the project is used, if it exists. Cannot be a "Custom Subnet
Network" (see Using Subnetworks for more information). Example: https://
www.googleapis.com/compute/v1/projects/[project_id]/regions/global/defau
lt.
serviceAccount: Optional The service account of the instances. Defaults to
the default Google Compute Engine service account. Custom service
accounts need permissions equivalent to the folloing IAM roles:
roles/logging.logWriter roles/storage.objectAdmin(see
https://cloud.google.com/compute/docs/access/service-
accounts#custom_service_accounts for more information). Example:
[account_id]@[project_id].iam.gserviceaccount.com
serviceAccountScopes: Optional The URIs of service account scopes to be
included in Google Compute Engine instances. The following base set of
scopes is always included:
https://www.googleapis.com/auth/cloud.useraccounts.readonly
https://www.googleapis.com/auth/devstorage.read_write
https://www.googleapis.com/auth/logging.writeIf no scopes are specified,
the following defaults are also provided:
https://www.googleapis.com/auth/bigquery
https://www.googleapis.com/auth/bigtable.admin.table
https://www.googleapis.com/auth/bigtable.data
https://www.googleapis.com/auth/devstorage.full_control
subnetworkUri: Optional The Google Compute Engine subnetwork to be used
for machine communications. Cannot be specified with network_uri.
Example:
https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-
east1/sub0.
tags: The Google Compute Engine tags to add to all instances (see Tagging
instances).
zoneUri: Required The zone where the Google Compute Engine cluster will be
located. Example: https://www.googleapis.com/compute/v1/projects/[projec
t_id]/zones/[zone].
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
"""The Google Compute Engine metadata entries to add to all instances (see
Project and instance metadata (https://cloud.google.com/compute/docs
/storing-retrieving-metadata#project_and_instance_metadata)).
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
internalIpOnly = _messages.BooleanField(1)
metadata = _messages.MessageField('MetadataValue', 2)
networkUri = _messages.StringField(3)
serviceAccount = _messages.StringField(4)
serviceAccountScopes = _messages.StringField(5, repeated=True)
subnetworkUri = _messages.StringField(6)
tags = _messages.StringField(7, repeated=True)
zoneUri = _messages.StringField(8)
class HadoopJob(_messages.Message):
"""A Cloud Dataproc job for running Apache Hadoop MapReduce
(https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-
mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN
(https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-
site/YARN.html).
Messages:
PropertiesValue: Optional A mapping of property names to values, used to
configure Hadoop. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site and classes in user code.
Fields:
archiveUris: Optional HCFS URIs of archives to be extracted in the working
directory of Hadoop drivers and tasks. Supported file types: .jar, .tar,
.tar.gz, .tgz, or .zip.
args: Optional The arguments to pass to the driver. Do not include
arguments, such as -libjars or -Dfoo=bar, that can be set as job
properties, since a collision may occur that causes an incorrect job
submission.
fileUris: Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be
copied to the working directory of Hadoop drivers and distributed tasks.
Useful for naively parallel tasks.
jarFileUris: Optional Jar file URIs to add to the CLASSPATHs of the Hadoop
driver and tasks.
loggingConfig: Optional The runtime log config for job execution.
mainClass: The name of the driver's main class. The jar file containing
the class must be in the default CLASSPATH or specified in
jar_file_uris.
mainJarFileUri: The HCFS URI of the jar file containing the main class.
Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-
mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar'
'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
properties: Optional A mapping of property names to values, used to
configure Hadoop. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site and classes in user code.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
"""Optional A mapping of property names to values, used to configure
Hadoop. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in /etc/hadoop/conf/*-site
and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
archiveUris = _messages.StringField(1, repeated=True)
args = _messages.StringField(2, repeated=True)
fileUris = _messages.StringField(3, repeated=True)
jarFileUris = _messages.StringField(4, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 5)
mainClass = _messages.StringField(6)
mainJarFileUri = _messages.StringField(7)
properties = _messages.MessageField('PropertiesValue', 8)
class HiveJob(_messages.Message):
"""A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
queries on YARN.
Messages:
PropertiesValue: Optional A mapping of property names and values, used to
configure Hive. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes
in user code.
ScriptVariablesValue: Optional Mapping of query variable names to values
(equivalent to the Hive command: SET name="value";).
Fields:
continueOnFailure: Optional Whether to continue executing queries if a
query fails. The default value is false. Setting to true can be useful
when executing independent parallel queries.
jarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATH of
the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
and UDFs.
properties: Optional A mapping of property names and values, used to
configure Hive. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes
in user code.
queryFileUri: The HCFS URI of the script that contains Hive queries.
queryList: A list of queries.
scriptVariables: Optional Mapping of query variable names to values
(equivalent to the Hive command: SET name="value";).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
"""Optional A mapping of property names and values, used to configure
Hive. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in
user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ScriptVariablesValue(_messages.Message):
"""Optional Mapping of query variable names to values (equivalent to the
Hive command: SET name="value";).
Messages:
AdditionalProperty: An additional property for a ScriptVariablesValue
object.
Fields:
additionalProperties: Additional properties of type ScriptVariablesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ScriptVariablesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
continueOnFailure = _messages.BooleanField(1)
jarFileUris = _messages.StringField(2, repeated=True)
properties = _messages.MessageField('PropertiesValue', 3)
queryFileUri = _messages.StringField(4)
queryList = _messages.MessageField('QueryList', 5)
scriptVariables = _messages.MessageField('ScriptVariablesValue', 6)
class InstanceGroupConfig(_messages.Message):
"""Optional The config settings for Google Compute Engine resources in an
instance group, such as a main or worker group.
Fields:
diskConfig: Optional Disk option config settings.
imageUri: Output-only The Google Compute Engine image resource used for
cluster instances. Inferred from SoftwareConfig.image_version.
instanceNames: Optional The list of instance names. Cloud Dataproc derives
the names from cluster_name, num_instances, and the instance group if
not set by user (recommended practice is to let Cloud Dataproc derive
the name).
isPreemptible: Optional Specifies that this instance group contains
preemptible instances.
machineTypeUri: Required The Google Compute Engine machine type used for
cluster instances. Example:
https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-
east1-a/machineTypes/n1-standard-2.
managedGroupConfig: Output-only The config for Google Compute Engine
Instance Group Manager that manages this group. This is only used for
preemptible instance groups.
numInstances: Required The number of VM instances in the instance group.
For main instance groups, must be set to 1.
"""
diskConfig = _messages.MessageField('DiskConfig', 1)
imageUri = _messages.StringField(2)
instanceNames = _messages.StringField(3, repeated=True)
isPreemptible = _messages.BooleanField(4)
machineTypeUri = _messages.StringField(5)
managedGroupConfig = _messages.MessageField('ManagedGroupConfig', 6)
numInstances = _messages.IntegerField(7, variant=_messages.Variant.INT32)
class Job(_messages.Message):
"""A Cloud Dataproc job resource.
Messages:
LabelsValue: Optional The labels to associate with this job. Label keys
must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
be associated with a job.
Fields:
driverControlFilesUri: Output-only If present, the location of
miscellaneous control files which may be used as part of job setup and
handling. If not present, control files may be placed in the same
location as driver_output_uri.
driverOutputResourceUri: Output-only A URI pointing to the location of the
stdout of the job's driver program.
hadoopJob: Job is a Hadoop job.
hiveJob: Job is a Hive job.
labels: Optional The labels to associate with this job. Label keys must
contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
if present, must contain 1 to 63 characters, and must conform to RFC
1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can
be associated with a job.
pigJob: Job is a Pig job.
placement: Required Job information, including how, when, and where to run
the job.
pysparkJob: Job is a Pyspark job.
reference: Optional The fully qualified reference to the job, which can be
used to obtain the equivalent REST path of the job resource. If this
property is not specified when a job is created, the server generates a
<code>job_id</code>.
sparkJob: Job is a Spark job.
sparkSqlJob: Job is a SparkSql job.
status: Output-only The job status. Additional application-specific status
information may be contained in the <code>type_job</code> and
<code>yarn_applications</code> fields.
statusHistory: Output-only The previous job status.
yarnApplications: Output-only The collection of YARN applications spun up
by this job.Beta Feature: This report is available for testing purposes
only. It may be changed before final release.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""Optional The labels to associate with this job. Label keys must contain
1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if
present, must contain 1 to 63 characters, and must conform to RFC 1035
(https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
associated with a job.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
driverControlFilesUri = _messages.StringField(1)
driverOutputResourceUri = _messages.StringField(2)
hadoopJob = _messages.MessageField('HadoopJob', 3)
hiveJob = _messages.MessageField('HiveJob', 4)
labels = _messages.MessageField('LabelsValue', 5)
pigJob = _messages.MessageField('PigJob', 6)
placement = _messages.MessageField('JobPlacement', 7)
pysparkJob = _messages.MessageField('PySparkJob', 8)
reference = _messages.MessageField('JobReference', 9)
sparkJob = _messages.MessageField('SparkJob', 10)
sparkSqlJob = _messages.MessageField('SparkSqlJob', 11)
status = _messages.MessageField('JobStatus', 12)
statusHistory = _messages.MessageField('JobStatus', 13, repeated=True)
yarnApplications = _messages.MessageField('YarnApplication', 14, repeated=True)
class JobPlacement(_messages.Message):
"""Cloud Dataproc job config.
Fields:
clusterName: Required The name of the cluster where the job will be
submitted.
clusterUuid: Output-only A cluster UUID generated by the Cloud Dataproc
service when the job is submitted.
"""
clusterName = _messages.StringField(1)
clusterUuid = _messages.StringField(2)
class JobReference(_messages.Message):
"""Encapsulates the full scoping used to reference a job.
Fields:
jobId: Optional The job ID, which must be unique within the project. The
job ID is generated by the server upon job submission or provided by the
user as a means to perform retries without creating duplicate jobs. The
ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
or hyphens (-). The maximum length is 512 characters.
projectId: Required The ID of the Google Cloud Platform project that the
job belongs to.
"""
jobId = _messages.StringField(1)
projectId = _messages.StringField(2)
class JobStatus(_messages.Message):
"""Cloud Dataproc job status.
Enums:
StateValueValuesEnum: Output-only A state message specifying the overall
job state.
Fields:
details: Output-only Optional job state details, such as an error
description if the state is <code>ERROR</code>.
state: Output-only A state message specifying the overall job state.
stateStartTime: Output-only The time when this state was entered.
"""
class StateValueValuesEnum(_messages.Enum):
"""Output-only A state message specifying the overall job state.
Values:
STATE_UNSPECIFIED: The job state is unknown.
PENDING: The job is pending; it has been submitted, but is not yet
running.
SETUP_DONE: Job has been received by the service and completed initial
setup; it will soon be submitted to the cluster.
RUNNING: The job is running on the cluster.
CANCEL_PENDING: A CancelJob request has been received, but is pending.
CANCEL_STARTED: Transient in-flight resources have been canceled, and
the request to cancel the running job has been issued to the cluster.
CANCELLED: The job cancellation was successful.
DONE: The job has completed successfully.
ERROR: The job has completed, but encountered an error.
"""
STATE_UNSPECIFIED = 0
PENDING = 1
SETUP_DONE = 2
RUNNING = 3
CANCEL_PENDING = 4
CANCEL_STARTED = 5
CANCELLED = 6
DONE = 7
ERROR = 8
details = _messages.StringField(1)
state = _messages.EnumField('StateValueValuesEnum', 2)
stateStartTime = _messages.StringField(3)
class ListClustersResponse(_messages.Message):
"""The list of all clusters in a project.
Fields:
clusters: Output-only The clusters in the project.
nextPageToken: Output-only This token is included in the response if there
are more results to fetch. To fetch additional results, provide this
value as the page_token in a subsequent
<code>ListClustersRequest</code>.
"""
clusters = _messages.MessageField('Cluster', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListJobsResponse(_messages.Message):
"""A list of jobs in a project.
Fields:
jobs: Output-only Jobs list.
nextPageToken: Optional This token is included in the response if there
are more results to fetch. To fetch additional results, provide this
value as the page_token in a subsequent <code>ListJobsRequest</code>.
"""
jobs = _messages.MessageField('Job', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListOperationsResponse(_messages.Message):
"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class LoggingConfig(_messages.Message):
"""The runtime logging config of the job.
Messages:
DriverLogLevelsValue: The per-package log levels for the driver. This may
include "root" package name to configure rootLogger. Examples:
'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
Fields:
driverLogLevels: The per-package log levels for the driver. This may
include "root" package name to configure rootLogger. Examples:
'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DriverLogLevelsValue(_messages.Message):
"""The per-package log levels for the driver. This may include "root"
package name to configure rootLogger. Examples: 'com.google = FATAL',
'root = INFO', 'org.apache = DEBUG'
Messages:
AdditionalProperty: An additional property for a DriverLogLevelsValue
object.
Fields:
additionalProperties: Additional properties of type DriverLogLevelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a DriverLogLevelsValue object.
Enums:
ValueValueValuesEnum:
Fields:
key: Name of the additional property.
value: A ValueValueValuesEnum attribute.
"""
class ValueValueValuesEnum(_messages.Enum):
"""ValueValueValuesEnum enum type.
Values:
LEVEL_UNSPECIFIED: <no description>
ALL: <no description>
TRACE: <no description>
DEBUG: <no description>
INFO: <no description>
WARN: <no description>
ERROR: <no description>
FATAL: <no description>
OFF: <no description>
"""
LEVEL_UNSPECIFIED = 0
ALL = 1
TRACE = 2
DEBUG = 3
INFO = 4
WARN = 5
ERROR = 6
FATAL = 7
OFF = 8
key = _messages.StringField(1)
value = _messages.EnumField('ValueValueValuesEnum', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
driverLogLevels = _messages.MessageField('DriverLogLevelsValue', 1)
class ManagedGroupConfig(_messages.Message):
"""Specifies the resources used to actively manage an instance group.
Fields:
instanceGroupManagerName: Output-only The name of the Instance Group
Manager for this group.
instanceTemplateName: Output-only The name of the Instance Template used
for the Managed Instance Group.
"""
instanceGroupManagerName = _messages.StringField(1)
instanceTemplateName = _messages.StringField(2)
class NodeInitializationAction(_messages.Message):
"""Specifies an executable to run on a fully configured node and a timeout
period for executable completion.
Fields:
executableFile: Required Google Cloud Storage URI of executable file.
executionTimeout: Optional Amount of time executable has to complete.
Default is 10 minutes. Cluster creation fails with an explanatory error
message (the name of the executable that caused the error and the
exceeded timeout period) if the executable is not completed at end of
the timeout period.
"""
executableFile = _messages.StringField(1)
executionTimeout = _messages.StringField(2)
class Operation(_messages.Message):
"""This resource represents a long-running operation that is the result of a
network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success. If
the original method returns no data on success, such as Delete, the
response is google.protobuf.Empty. If the original method is standard
Get/Create/Update, the response should be the resource. For other
methods, the response should have the type XxxResponse, where Xxx is the
original method name. For example, if the original method name is
TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
Fields:
done: If the value is false, it means the operation is still in progress.
If true, the operation is completed, and either error or response is
available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the name should have the format of operations/some/unique/name.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as Delete, the response
is google.protobuf.Empty. If the original method is standard
Get/Create/Update, the response should be the resource. For other
methods, the response should have the type XxxResponse, where Xxx is the
original method name. For example, if the original method name is
TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
"""The normal response of the operation in case of success. If the
original method returns no data on success, such as Delete, the response
is google.protobuf.Empty. If the original method is standard
Get/Create/Update, the response should be the resource. For other methods,
the response should have the type XxxResponse, where Xxx is the original
method name. For example, if the original method name is TakeSnapshot(),
the inferred response type is TakeSnapshotResponse.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OperationMetadata(_messages.Message):
"""Metadata describing the operation.
Enums:
StateValueValuesEnum: A message containing the operation state.
Fields:
clusterName: Name of the cluster for the operation.
clusterUuid: Cluster UUId for the operation.
description: Output-only Short description of operation.
details: A message containing any operation metadata details.
endTime: The time that the operation completed.
innerState: A message containing the detailed operation state.
insertTime: The time that the operation was requested.
operationType: Output-only The operation type.
startTime: The time that the operation was started by the server.
state: A message containing the operation state.
status: Output-only Current operation status.
statusHistory: Output-only Previous operation status.
"""
class StateValueValuesEnum(_messages.Enum):
"""A message containing the operation state.
Values:
UNKNOWN: Unused.
PENDING: The operation has been created.
RUNNING: The operation is currently running.
DONE: The operation is done, either cancelled or completed.
"""
UNKNOWN = 0
PENDING = 1
RUNNING = 2
DONE = 3
clusterName = _messages.StringField(1)
clusterUuid = _messages.StringField(2)
description = _messages.StringField(3)
details = _messages.StringField(4)
endTime = _messages.StringField(5)
innerState = _messages.StringField(6)
insertTime = _messages.StringField(7)
operationType = _messages.StringField(8)
startTime = _messages.StringField(9)
state = _messages.EnumField('StateValueValuesEnum', 10)
status = _messages.MessageField('OperationStatus', 11)
statusHistory = _messages.MessageField('OperationStatus', 12, repeated=True)
class OperationStatus(_messages.Message):
"""The status of the operation.
Enums:
StateValueValuesEnum: A message containing the operation state.
Fields:
details: A message containing any operation metadata details.
innerState: A message containing the detailed operation state.
state: A message containing the operation state.
stateStartTime: The time this state was entered.
"""
class StateValueValuesEnum(_messages.Enum):
"""A message containing the operation state.
Values:
UNKNOWN: Unused.
PENDING: The operation has been created.
RUNNING: The operation is running.
DONE: The operation is done; either cancelled or completed.
"""
UNKNOWN = 0
PENDING = 1
RUNNING = 2
DONE = 3
details = _messages.StringField(1)
innerState = _messages.StringField(2)
state = _messages.EnumField('StateValueValuesEnum', 3)
stateStartTime = _messages.StringField(4)
class PigJob(_messages.Message):
"""A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/)
queries on YARN.
Messages:
PropertiesValue: Optional A mapping of property names to values, used to
configure Pig. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes
in user code.
ScriptVariablesValue: Optional Mapping of query variable names to values
(equivalent to the Pig command: name=[value]).
Fields:
continueOnFailure: Optional Whether to continue executing queries if a
query fails. The default value is false. Setting to true can be useful
when executing independent parallel queries.
jarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATH of
the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
loggingConfig: Optional The runtime log config for job execution.
properties: Optional A mapping of property names to values, used to
configure Pig. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes
in user code.
queryFileUri: The HCFS URI of the script that contains the Pig queries.
queryList: A list of queries.
scriptVariables: Optional Mapping of query variable names to values
(equivalent to the Pig command: name=[value]).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
"""Optional A mapping of property names to values, used to configure Pig.
Properties that conflict with values set by the Cloud Dataproc API may be
overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
/etc/pig/conf/pig.properties, and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ScriptVariablesValue(_messages.Message):
"""Optional Mapping of query variable names to values (equivalent to the
Pig command: name=[value]).
Messages:
AdditionalProperty: An additional property for a ScriptVariablesValue
object.
Fields:
additionalProperties: Additional properties of type ScriptVariablesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ScriptVariablesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
continueOnFailure = _messages.BooleanField(1)
jarFileUris = _messages.StringField(2, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 3)
properties = _messages.MessageField('PropertiesValue', 4)
queryFileUri = _messages.StringField(5)
queryList = _messages.MessageField('QueryList', 6)
scriptVariables = _messages.MessageField('ScriptVariablesValue', 7)
class PySparkJob(_messages.Message):
"""A Cloud Dataproc job for running Apache PySpark
(https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
applications on YARN.
Messages:
PropertiesValue: Optional A mapping of property names to values, used to
configure PySpark. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
Fields:
archiveUris: Optional HCFS URIs of archives to be extracted in the working
directory of .jar, .tar, .tar.gz, .tgz, and .zip.
args: Optional The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
fileUris: Optional HCFS URIs of files to be copied to the working
directory of Python drivers and distributed tasks. Useful for naively
parallel tasks.
jarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATHs of
the Python driver and tasks.
loggingConfig: Optional The runtime log config for job execution.
mainPythonFileUri: Required The HCFS URI of the main Python file to use as
the driver. Must be a .py file.
properties: Optional A mapping of property names to values, used to
configure PySpark. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
pythonFileUris: Optional HCFS file URIs of Python files to pass to the
PySpark framework. Supported file types: .py, .egg, and .zip.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
"""Optional A mapping of property names to values, used to configure
PySpark. Properties that conflict with values set by the Cloud Dataproc
API may be overwritten. Can include properties set in /etc/spark/conf
/spark-defaults.conf and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
archiveUris = _messages.StringField(1, repeated=True)
args = _messages.StringField(2, repeated=True)
fileUris = _messages.StringField(3, repeated=True)
jarFileUris = _messages.StringField(4, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 5)
mainPythonFileUri = _messages.StringField(6)
properties = _messages.MessageField('PropertiesValue', 7)
pythonFileUris = _messages.StringField(8, repeated=True)
class QueryList(_messages.Message):
"""A list of queries to run on a cluster.
Fields:
queries: Required The queries to execute. You do not need to terminate a
query with a semicolon. Multiple queries can be specified in one string
by separating each with a semicolon. Here is an example of an Cloud
Dataproc API snippet that uses a QueryList to specify a HiveJob:
"hiveJob": { "queryList": { "queries": [ "query1",
"query2", "query3;query4", ] } }
"""
queries = _messages.StringField(1, repeated=True)
class SoftwareConfig(_messages.Message):
"""Specifies the selection and config of software inside the cluster.
Messages:
PropertiesValue: Optional The properties to set on daemon config
files.Property keys are specified in prefix:property format, such as
core:fs.defaultFS. The following are supported prefixes and their
mappings: core: core-site.xml hdfs: hdfs-site.xml mapred: mapred-
site.xml yarn: yarn-site.xml hive: hive-site.xml pig: pig.properties
spark: spark-defaults.conf
Fields:
imageVersion: Optional The version of software inside the cluster. It must
match the regular expression [0-9]+\.[0-9]+. If unspecified, it defaults
to the latest version (see Cloud Dataproc Versioning).
properties: Optional The properties to set on daemon config files.Property
keys are specified in prefix:property format, such as core:fs.defaultFS.
The following are supported prefixes and their mappings: core: core-
site.xml hdfs: hdfs-site.xml mapred: mapred-site.xml yarn: yarn-site.xml
hive: hive-site.xml pig: pig.properties spark: spark-defaults.conf
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
"""Optional The properties to set on daemon config files.Property keys are
specified in prefix:property format, such as core:fs.defaultFS. The
following are supported prefixes and their mappings: core: core-site.xml
hdfs: hdfs-site.xml mapred: mapred-site.xml yarn: yarn-site.xml hive:
hive-site.xml pig: pig.properties spark: spark-defaults.conf
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
imageVersion = _messages.StringField(1)
properties = _messages.MessageField('PropertiesValue', 2)
class SparkJob(_messages.Message):
"""A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
applications on YARN.
Messages:
PropertiesValue: Optional A mapping of property names to values, used to
configure Spark. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
Fields:
archiveUris: Optional HCFS URIs of archives to be extracted in the working
directory of Spark drivers and tasks. Supported file types: .jar, .tar,
.tar.gz, .tgz, and .zip.
args: Optional The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
fileUris: Optional HCFS URIs of files to be copied to the working
directory of Spark drivers and distributed tasks. Useful for naively
parallel tasks.
jarFileUris: Optional HCFS URIs of jar files to add to the CLASSPATHs of
the Spark driver and tasks.
loggingConfig: Optional The runtime log config for job execution.
mainClass: The name of the driver's main class. The jar file that contains
the class must be in the default CLASSPATH or specified in
jar_file_uris.
mainJarFileUri: The HCFS URI of the jar file that contains the main class.
properties: Optional A mapping of property names to values, used to
configure Spark. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten. Can include properties set in
/etc/spark/conf/spark-defaults.conf and classes in user code.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
"""Optional A mapping of property names to values, used to configure
Spark. Properties that conflict with values set by the Cloud Dataproc API
may be overwritten. Can include properties set in /etc/spark/conf/spark-
defaults.conf and classes in user code.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
archiveUris = _messages.StringField(1, repeated=True)
args = _messages.StringField(2, repeated=True)
fileUris = _messages.StringField(3, repeated=True)
jarFileUris = _messages.StringField(4, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 5)
mainClass = _messages.StringField(6)
mainJarFileUri = _messages.StringField(7)
properties = _messages.MessageField('PropertiesValue', 8)
class SparkSqlJob(_messages.Message):
"""A Cloud Dataproc job for running Apache Spark SQL
(http://spark.apache.org/sql/) queries.
Messages:
PropertiesValue: Optional A mapping of property names to values, used to
configure Spark SQL's SparkConf. Properties that conflict with values
set by the Cloud Dataproc API may be overwritten.
ScriptVariablesValue: Optional Mapping of query variable names to values
(equivalent to the Spark SQL command: SET name="value";).
Fields:
jarFileUris: Optional HCFS URIs of jar files to be added to the Spark
CLASSPATH.
loggingConfig: Optional The runtime log config for job execution.
properties: Optional A mapping of property names to values, used to
configure Spark SQL's SparkConf. Properties that conflict with values
set by the Cloud Dataproc API may be overwritten.
queryFileUri: The HCFS URI of the script that contains SQL queries.
queryList: A list of queries.
scriptVariables: Optional Mapping of query variable names to values
(equivalent to the Spark SQL command: SET name="value";).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
"""Optional A mapping of property names to values, used to configure Spark
SQL's SparkConf. Properties that conflict with values set by the Cloud
Dataproc API may be overwritten.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ScriptVariablesValue(_messages.Message):
"""Optional Mapping of query variable names to values (equivalent to the
Spark SQL command: SET name="value";).
Messages:
AdditionalProperty: An additional property for a ScriptVariablesValue
object.
Fields:
additionalProperties: Additional properties of type ScriptVariablesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ScriptVariablesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
jarFileUris = _messages.StringField(1, repeated=True)
loggingConfig = _messages.MessageField('LoggingConfig', 2)
properties = _messages.MessageField('PropertiesValue', 3)
queryFileUri = _messages.StringField(4)
queryList = _messages.MessageField('QueryList', 5)
scriptVariables = _messages.MessageField('ScriptVariablesValue', 6)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class Status(_messages.Message):
"""The Status type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by gRPC (https://github.com/grpc). The error model is designed to be:
Simple to use and understand for most users Flexible enough to meet
unexpected needsOverviewThe Status message contains three pieces of data:
error code, error message, and error details. The error code should be an
enum value of google.rpc.Code, but it may accept additional error codes if
needed. The error message should be a developer-facing English message that
helps developers understand and resolve the error. If a localized user-
facing error message is needed, put the localized message in the error
details or localize it in the client. The optional error details may contain
arbitrary information about the error. There is a predefined set of error
detail types in the package google.rpc which can be used for common error
conditions.Language mappingThe Status message is the logical representation
of the error model, but it is not necessarily the actual wire format. When
the Status message is exposed in different client libraries and different
wire protocols, it can be mapped differently. For example, it will likely be
mapped to some exceptions in Java, but more likely mapped to some error
codes in C.Other usesThe error model and the Status message can be used in a
variety of environments, either with or without APIs, to provide a
consistent developer experience across different environments.Example uses
of this error model include: Partial errors. If a service needs to return
partial errors to the client, it may embed the Status in the normal response
to indicate the partial errors. Workflow errors. A typical workflow has
multiple steps. Each step may have a Status message for error reporting
purpose. Batch operations. If a client uses batch request and batch
response, the Status message should be used directly inside batch response,
one for each error sub-response. Asynchronous operations. If an API call
embeds asynchronous operation results in its response, the status of those
operations should be represented directly using the Status message. Logging.
If some API errors are stored in logs, the message Status could be used
directly after any stripping needed for security/privacy reasons.
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There will be a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class SubmitJobRequest(_messages.Message):
"""A request to submit a job.
Fields:
job: Required The job resource.
"""
job = _messages.MessageField('Job', 1)
class YarnApplication(_messages.Message):
"""A YARN application created by a job. Application information is a subset
of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</cod
e>.Beta Feature: This report is available for testing purposes only. It may
be changed before final release.
Enums:
StateValueValuesEnum: Required The application state.
Fields:
name: Required The application name.
progress: Required The numerical progress of the application, from 1 to
100.
state: Required The application state.
trackingUrl: Optional The HTTP URL of the ApplicationMain,
HistoryServer, or TimelineServer that provides application-specific
information. The URL uses the internal hostname, and requires a proxy
server for resolution and, possibly, access.
"""
class StateValueValuesEnum(_messages.Enum):
"""Required The application state.
Values:
STATE_UNSPECIFIED: Status is unspecified.
NEW: Status is NEW.
NEW_SAVING: Status is NEW_SAVING.
SUBMITTED: Status is SUBMITTED.
ACCEPTED: Status is ACCEPTED.
RUNNING: Status is RUNNING.
FINISHED: Status is FINISHED.
FAILED: Status is FAILED.
KILLED: Status is KILLED.
"""
STATE_UNSPECIFIED = 0
NEW = 1
NEW_SAVING = 2
SUBMITTED = 3
ACCEPTED = 4
RUNNING = 5
FINISHED = 6
FAILED = 7
KILLED = 8
name = _messages.StringField(1)
progress = _messages.FloatField(2, variant=_messages.Variant.FLOAT)
state = _messages.EnumField('StateValueValuesEnum', 3)
trackingUrl = _messages.StringField(4)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'dataproc')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'dataproc')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'dataproc')
|
{
"content_hash": "ffaa432f9f2f0d8ed497cda0808472b2",
"timestamp": "",
"source": "github",
"line_count": 2084,
"max_line_length": 94,
"avg_line_length": 39.76439539347409,
"alnum_prop": 0.7297059213940074,
"repo_name": "KaranToor/MA450",
"id": "b4cfd8d555b30193a9b6d881102d6c411c8a3b46",
"size": "82869",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/googlecloudsdk/third_party/apis/dataproc/v1/dataproc_v1_messages.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
}
|
"""Data Equivalence Tests"""
from __future__ import print_function
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Martin Billinger <martin.billinger@tugraz.at>
#
# License: BSD (3-clause)
import os.path as op
import inspect
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_array_almost_equal, assert_array_equal
from numpy.testing import assert_raises, assert_allclose
from scipy import io
import numpy as np
from mne import pick_types, concatenate_raws
from mne.externals.six import iterbytes
from mne.utils import _TempDir
from mne.io import Raw, read_raw_edf
import mne.io.edf.edf as edfmodule
from mne.event import find_events
FILE = inspect.getfile(inspect.currentframe())
data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
montage_path = op.join(data_dir, 'biosemi.hpts')
bdf_path = op.join(data_dir, 'test.bdf')
edf_path = op.join(data_dir, 'test.edf')
bdf_eeglab_path = op.join(data_dir, 'test_bdf_eeglab.mat')
edf_eeglab_path = op.join(data_dir, 'test_edf_eeglab.mat')
eog = ['REOG', 'LEOG', 'IEOG']
misc = ['EXG1', 'EXG5', 'EXG8', 'M1', 'M2']
def test_bdf_data():
"""Test reading raw bdf files
"""
raw_py = read_raw_edf(bdf_path, montage=montage_path, eog=eog,
misc=misc, preload=True)
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_py, _ = raw_py[picks]
print(raw_py) # to test repr
print(raw_py.info) # to test Info repr
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = io.loadmat(bdf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
data_eeglab = raw_eeglab[picks]
assert_array_almost_equal(data_py, data_eeglab)
# Manually checking that float coordinates are imported
assert_true((raw_py.info['chs'][0]['eeg_loc']).any())
assert_true((raw_py.info['chs'][25]['eeg_loc']).any())
assert_true((raw_py.info['chs'][63]['eeg_loc']).any())
# Make sure concatenation works
raw_concat = concatenate_raws([raw_py.copy(), raw_py])
assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
def test_edf_data():
"""Test reading raw edf files
"""
raw_py = read_raw_edf(edf_path, misc=range(-4, 0), stim_channel=139,
preload=True)
picks = pick_types(raw_py.info, meg=False, eeg=True,
exclude=['EDF Annotations'])
data_py, _ = raw_py[picks]
print(raw_py) # to test repr
print(raw_py.info) # to test Info repr
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = io.loadmat(edf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
data_eeglab = raw_eeglab[picks]
assert_array_almost_equal(data_py, data_eeglab)
# Make sure concatenation works
raw_concat = concatenate_raws([raw_py.copy(), raw_py])
assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
def test_read_segment():
"""Test writing raw edf files when preload is False
"""
tempdir = _TempDir()
raw1 = read_raw_edf(edf_path, stim_channel=None, preload=False)
raw1_file = op.join(tempdir, 'test1-raw.fif')
raw1.save(raw1_file, overwrite=True, buffer_size_sec=1)
raw11 = Raw(raw1_file, preload=True)
data1, times1 = raw1[:139, :]
data11, times11 = raw11[:139, :]
assert_allclose(data1, data11, rtol=1e-6)
assert_array_almost_equal(times1, times11)
assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
raw2 = read_raw_edf(edf_path, stim_channel=None, preload=True)
raw2_file = op.join(tempdir, 'test2-raw.fif')
raw2.save(raw2_file, overwrite=True)
data2, times2 = raw2[:139, :]
assert_allclose(data1, data2, rtol=1e-6)
assert_array_equal(times1, times2)
raw1 = Raw(raw1_file, preload=True)
raw2 = Raw(raw2_file, preload=True)
assert_array_equal(raw1._data, raw2._data)
# test the _read_segment function by only loading some of the data
raw1 = read_raw_edf(edf_path, stim_channel=None, preload=False)
raw2 = read_raw_edf(edf_path, stim_channel=None, preload=True)
# select some random range of data to compare
data1, times1 = raw1[:, 345:417]
data2, times2 = raw2[:, 345:417]
assert_array_equal(data1, data2)
assert_array_equal(times1, times2)
def test_append():
"""Test appending raw edf objects using Raw.append
"""
# Author: Alan Leggitt <alan.leggitt@ucsf.edu>
raw = read_raw_edf(bdf_path, preload=False)
raw0 = raw.copy()
raw1 = raw.copy()
raw0.append(raw1)
assert_true(2 * len(raw) == len(raw0))
def test_parse_annotation():
"""Test parsing the tal channel
"""
# test the parser
annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00'
b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00'
b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00'
b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00')
annot = [a for a in iterbytes(annot)]
annot[1::2] = [a * 256 for a in annot[1::2]]
tal_channel = map(sum, zip(annot[0::2], annot[1::2]))
events = edfmodule._parse_tal_channel(tal_channel)
assert_equal(events, [[180.0, 0, 'Lights off'],
[180.0, 0, 'Close door'],
[180.0, 0, 'Lights off'],
[180.0, 0, 'Close door'],
[3.14, 4.2, 'nothing'],
[1800.2, 25.5, 'Apnea']])
def test_edf_annotations():
"""Test if events are detected correctly in a typical MNE workflow.
"""
# test an actual file
raw = read_raw_edf(edf_path, tal_channel=-1, preload=True)
edf_events = find_events(raw, output='step', shortest_event=0,
stim_channel='STI 014')
# onset, duration, id
events = [[0.1344, 0.2560, 2],
[0.3904, 1.0000, 2],
[2.0000, 0.0000, 3],
[2.5000, 2.5000, 2]]
events = np.array(events)
events[:, :2] *= 512 # convert time to samples
events = np.array(events, dtype=int)
events[:, 1] -= 1
events[events[:, 1] <= 0, 1] = 1
events[:, 1] += events[:, 0]
onsets = events[:, [0, 2]]
offsets = events[:, [1, 2]]
events = np.zeros((2 * events.shape[0], 3), dtype=int)
events[0::2, [0, 2]] = onsets
events[1::2, [0, 1]] = offsets
assert_array_equal(edf_events, events)
def test_write_annotations():
"""Test writing raw files when annotations were parsed.
"""
tempdir = _TempDir()
raw1 = read_raw_edf(edf_path, tal_channel=-1, preload=True)
raw1_file = op.join(tempdir, 'test1-raw.fif')
raw1.save(raw1_file, overwrite=True, buffer_size_sec=1)
raw11 = Raw(raw1_file, preload=True)
data1, times1 = raw1[:, :]
data11, times11 = raw11[:, :]
assert_array_almost_equal(data1, data11)
assert_array_almost_equal(times1, times11)
assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
assert_raises(RuntimeError, read_raw_edf,
edf_path, tal_channel=-1, preload=False)
|
{
"content_hash": "27e292bbc19b0dd345aba54f83a7cf14",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 75,
"avg_line_length": 35.1256038647343,
"alnum_prop": 0.6248108925869894,
"repo_name": "agramfort/mne-python",
"id": "691b91701bfa0a70f63f767f3f769fcb2345db5f",
"size": "7271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/io/edf/tests/test_edf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "3751581"
},
{
"name": "Shell",
"bytes": "4011"
}
],
"symlink_target": ""
}
|
"""
utils mino
"""
import os
import errno
import shutil
def str2num(string):
"""convert string to numbers"""
try:
return int(string)
except ValueError:
return float(string)
def get_suffix_file(dirname, suffix):
"""get all files in a dir recursively with specific suffix"""
files = []
for dirpath, dirnames, filenames in os.walk(dirname):
for name in filenames:
filename, file_extension = os.path.splitext(name)
if file_extension == suffix:
files.append(os.path.join(dirpath, name))
return files
def copy(src, dest, force_create = False):
"""copy file from src to dest"""
if force_create:
head,tail = os.path.split(dest)
mkdir_p(head)
shutil.copyfile(src, dest)
else:
shutil.copyfile(src, dest)
def mkdir_p(path):
'''
functions like mkdir -p in bash
'''
try:
os.makedirs(path)
except OSError as exc: # Python > 2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
if __name__ == "__main__":
pass
|
{
"content_hash": "23dac1663905301cedd8471187b3fa33",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 65,
"avg_line_length": 21.76923076923077,
"alnum_prop": 0.5848056537102474,
"repo_name": "gongbudaizhe/bilib",
"id": "b64fb45bc9d89712bfeed185c88aa3c0f905953e",
"size": "1222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "21230"
},
{
"name": "Makefile",
"bytes": "77"
},
{
"name": "Matlab",
"bytes": "7242"
},
{
"name": "Python",
"bytes": "92163"
},
{
"name": "Shell",
"bytes": "8043"
},
{
"name": "Tcl",
"bytes": "3254"
}
],
"symlink_target": ""
}
|
from pyModbusTCP import constants
from setuptools import setup
with open('README.rst') as f:
readme = f.read()
setup(
name="pyModbusTCP",
version=constants.VERSION,
description="A simple Modbus/TCP library for Python",
long_description=readme,
author="Loic Lefebvre",
author_email="loic.celine@free.fr",
license="MIT",
url="https://github.com/sourceperl/pyModbusTCP",
packages=["pyModbusTCP"],
platforms="any",
)
|
{
"content_hash": "bc5d01f9205bde97cbf5dccf725e1898",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 57,
"avg_line_length": 25.5,
"alnum_prop": 0.690631808278867,
"repo_name": "sourceperl/pyModbusTCP",
"id": "3b42d32f845f87cbdb9501c9e6d5d12607347b46",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101002"
}
],
"symlink_target": ""
}
|
"""Default variable filters."""
import random as random_module
import re
import types
from decimal import ROUND_HALF_UP, Context, Decimal, InvalidOperation
from functools import wraps
from operator import itemgetter
from pprint import pformat
from urllib.parse import quote
from django.utils import formats
from django.utils.dateformat import format, time_format
from django.utils.encoding import iri_to_uri
from django.utils.html import (
avoid_wrapping, conditional_escape, escape, escapejs,
json_script as _json_script, linebreaks, strip_tags, urlize as _urlize,
)
from django.utils.safestring import SafeData, mark_safe
from django.utils.text import (
Truncator, normalize_newlines, phone2numeric, slugify as _slugify, wrap,
)
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import gettext, ngettext
from .base import Variable, VariableDoesNotExist
from .library import Library
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive strings. The object
passed as the first positional argument will be converted to a string.
"""
def _dec(*args, **kwargs):
args = list(args)
args[0] = str(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Add slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalize the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encode characters for use in JavaScript strings."""
return escapejs(value)
@register.filter(is_safe=True)
def json_script(value, element_id):
"""
Output value JSON-encoded, wrapped in a <script type="application/json">
tag.
"""
return _json_script(value, element_id)
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Display a float to a specified number of decimal places.
If called without an argument, display the floating point number with one
decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, always display exactly arg number of decimal places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, display arg number of decimal places -- but only if
there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, display the string representation
of that value.
"""
try:
input_val = repr(text)
d = Decimal(input_val)
except InvalidOperation:
try:
d = Decimal(str(float(text)))
except (ValueError, InvalidOperation, TypeError):
return ''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format('%d' % (int(d)), 0))
exp = Decimal(1).scaleb(-abs(p))
# Set the precision high enough to avoid an exception (#15789).
tupl = d.as_tuple()
units = len(tupl[1])
units += -tupl[2] if m else tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`,
# and `exponent` from Decimal.as_tuple() directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP, Context(prec=prec)).as_tuple()
digits = [str(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append('0')
digits.insert(-exponent, '.')
if sign:
digits.append('-')
number = ''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escape an IRI value for use in a URL."""
return iri_to_uri(value)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=True):
"""Display text with line numbers."""
lines = value.split('\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = str(len(str(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe('\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Convert a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Return the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Convert to ASCII. Convert spaces to hyphens. Remove characters that aren't
alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip
leading and trailing whitespace.
"""
return _slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Format the variable according to the arg, a string formatting specifier.
This specifier uses Python string formatting syntax, with the exception
that the leading "%" is dropped.
See https://docs.python.org/library/stdtypes.html#printf-style-string-formatting
for documentation of Python string formatting.
"""
if isinstance(value, tuple):
value = str(value)
try:
return ("%" + str(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Convert a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub(r"\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""Truncate a string after `arg` number of characters."""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatechars_html(value, arg):
"""
Truncate HTML after `arg` number of chars.
Preserve newlines in the HTML.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).chars(length, html=True)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncate a string after `arg` number of words.
Remove newlines within the string.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' …')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncate HTML after `arg` number of words.
Preserve newlines in the HTML.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' …')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Convert a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escape a value for use in a URL.
The ``safe`` parameter determines the characters which should not be
escaped by Python's quote() function. If not provided, use the default safe
characters (but an empty string can be provided when *all* characters
should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return quote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=True):
"""Convert URLs in plain text into clickable links."""
return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=True):
"""
Convert URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(_urlize(value, trim_url_limit=int(limit), nofollow=True, autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Return the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""Wrap words at `arg` line length."""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""Left-align the value in a field of a given width."""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""Right-align the value in a field of a given width."""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Center the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""Remove all values of arg from the given string."""
safe = isinstance(value, SafeData)
value = value.replace(arg, '')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""Mark the value as a string that should be auto-escaped."""
return conditional_escape(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escape a string's HTML. Return a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=True):
"""
Replace line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br>``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=True):
"""
Convert all newlines in a piece of plain text to HTML line breaks
(``<br>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br>'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""Mark the value as a string that should not be auto-escaped."""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Mark each element in the sequence,
individually, as safe, after converting them to strings. Return a list
with the results.
"""
return [mark_safe(obj) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strip all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
def _property_resolver(arg):
"""
When arg is convertible to float, behave like operator.itemgetter(arg)
Otherwise, behave like Variable(arg).resolve
>>> _property_resolver(1)('abc')
'b'
>>> _property_resolver('1')('abc')
Traceback (most recent call last):
...
TypeError: string indices must be integers
>>> class Foo:
... a = 42
... b = 3.14
... c = 'Hey!'
>>> _property_resolver('b')(Foo())
3.14
"""
try:
float(arg)
except ValueError:
return Variable(arg).resolve
else:
return itemgetter(arg)
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Given a list of dicts, return that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=_property_resolver(arg))
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Given a list of dicts, return that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=_property_resolver(arg), reverse=True)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def first(value):
"""Return the first item in a list."""
try:
return value[0]
except IndexError:
return ''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=True):
"""Join a list with a string, like Python's ``str.join(list)``."""
try:
if autoescape:
value = [conditional_escape(v) for v in value]
data = conditional_escape(arg).join(value)
except TypeError: # Fail silently if arg isn't iterable.
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"""Return the last item in a list."""
try:
return value[-1]
except IndexError:
return ''
@register.filter(is_safe=False)
def length(value):
"""Return the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return 0
@register.filter(is_safe=False)
def length_is(value, arg):
"""Return a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Return a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Return a slice of the list using the same syntax as Python's list slicing.
"""
try:
bits = []
for x in str(arg).split(':'):
if not x:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=True):
"""
Recursively take a self-nested list and return an HTML unordered list --
WITHOUT opening and closing <ul> tags.
Assume the list is in the proper format. For example, if ``var`` contains:
``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``, then
``{{ var|unordered_list }}`` returns::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
def escaper(x):
return x
def walk_items(item_list):
item_iterator = iter(item_list)
try:
item = next(item_iterator)
while True:
try:
next_item = next(item_iterator)
except StopIteration:
yield item, None
break
if isinstance(next_item, (list, tuple, types.GeneratorType)):
try:
iter(next_item)
except TypeError:
pass
else:
yield item, next_item
item = next(item_iterator)
continue
yield item, None
item = next_item
except StopIteration:
pass
def list_formatter(item_list, tabs=1):
indent = '\t' * tabs
output = []
for item, children in walk_items(item_list):
sublist = ''
if children:
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (
indent, list_formatter(children, tabs + 1), indent, indent)
output.append('%s<li>%s%s</li>' % (
indent, escaper(item), sublist))
return '\n'.join(output)
return mark_safe(list_formatter(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Add the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, return the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Return the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Format a date according to the given format."""
if value in (None, ''):
return ''
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Format a time according to the given format."""
if value in (None, ''):
return ''
try:
return formats.time_format(value, arg)
except (AttributeError, TypeError):
try:
return time_format(value, arg)
except (AttributeError, TypeError):
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Format a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Format a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Return True if the value is divisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false, and (optionally) None,
return one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = gettext('yes,no,maybe')
bits = arg.split(',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes_):
"""
Format the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc.).
"""
try:
bytes_ = float(bytes_)
except (TypeError, ValueError, UnicodeDecodeError):
value = ngettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
return avoid_wrapping(value)
def filesize_number_format(value):
return formats.number_format(round(value, 1), 1)
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
negative = bytes_ < 0
if negative:
bytes_ = -bytes_ # Allow formatting of negative numbers.
if bytes_ < KB:
value = ngettext("%(size)d byte", "%(size)d bytes", bytes_) % {'size': bytes_}
elif bytes_ < MB:
value = gettext("%s KB") % filesize_number_format(bytes_ / KB)
elif bytes_ < GB:
value = gettext("%s MB") % filesize_number_format(bytes_ / MB)
elif bytes_ < TB:
value = gettext("%s GB") % filesize_number_format(bytes_ / GB)
elif bytes_ < PB:
value = gettext("%s TB") % filesize_number_format(bytes_ / TB)
else:
value = gettext("%s PB") % filesize_number_format(bytes_ / PB)
if negative:
value = "-%s" % value
return avoid_wrapping(value)
@register.filter(is_safe=False)
def pluralize(value, arg='s'):
"""
Return a plural suffix if the value is not 1, '1', or an object of
length 1. By default, use 's' as the suffix:
* If value is 0, vote{{ value|pluralize }} display "votes".
* If value is 1, vote{{ value|pluralize }} display "vote".
* If value is 2, vote{{ value|pluralize }} display "votes".
If an argument is provided, use that string instead:
* If value is 0, class{{ value|pluralize:"es" }} display "classes".
* If value is 1, class{{ value|pluralize:"es" }} display "class".
* If value is 2, class{{ value|pluralize:"es" }} display "classes".
If the provided argument contains a comma, use the text before the comma
for the singular case and the text after the comma for the plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} display "candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} display "candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} display "candies".
"""
if ',' not in arg:
arg = ',' + arg
bits = arg.split(',')
if len(bits) > 2:
return ''
singular_suffix, plural_suffix = bits[:2]
try:
return singular_suffix if float(value) == 1 else plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
return singular_suffix if len(value) == 1 else plural_suffix
except TypeError: # len() of unsized object.
pass
return ''
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Take a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s: %s" % (e.__class__.__name__, e)
|
{
"content_hash": "e19db5207685265afc562054cdd4f171",
"timestamp": "",
"source": "github",
"line_count": 905,
"max_line_length": 101,
"avg_line_length": 28.678453038674032,
"alnum_prop": 0.6085767126454497,
"repo_name": "fenginx/django",
"id": "d8fb0a5396589757c1f04208989591cc3d668477",
"size": "25958",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/template/defaultfilters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "48399"
},
{
"name": "HTML",
"bytes": "175296"
},
{
"name": "JavaScript",
"bytes": "238848"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11137863"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import numpy as np
import sounddevice as sd
import sys
from wav_rw import wavwrite, enframe
from pandas import Series
#################################################################
#################################################
#detector de actividad vocal usando log energia y un umbral trh
naverage=10
def vad(logE,trh):
s=Series(logE)
logE = (s.rolling(window=naverage)).mean().values
logE[0:naverage-1]=logE[naverage:].min()
logE=np.roll(logE, -naverage/2, axis=0)
wvad=logE>trh
return wvad
#################################################################
sr = 16000 #khz
chann = 1 #numero de canales
duration = 2 # seconds grabar como maximo 2 segundos
#iniciar el proceso
while True:
print('Seleccionar una opcion:\n1) Iniciar proceso\n2) Salir')
opcion=input()
if opcion==1:
myrecording = sd.rec(int(duration * sr), samplerate=sr, channels=chann)
sd.wait()
#######
#la variable myrecording contiene el audio grabado, se puede iniciar el proceso
#en este punto, a continuacion se presenta un EJEMPLO
#primero se hace un analisis de energia en tiempo corto, se calculan ventanas
#de 512 puntos y se calcula su energia en decibeles
frames=enframe(x = myrecording)
#antes de guardar el audio se debe detectar cuales son los segmentos con silencio
logE = 20*np.log10((np.abs(frames)**2).sum(axis=1))
### se puede analizar la energia y si supera un umbral de -90 db entonces se unen
#esos frames y se guarda lo resultante, cortando los frames con muy poca energia
frames_audio=vad(logE,-90)
x=np.hstack((frames[frames_audio,:]))
#su codigo puede continuar dependiendo de lo que se pida es posible agregregar mas
#procesamiento, filtar..... o decidir si guardar o no si son pocos frames con sonido
##
#se guarda el audio resultante
wavwrite(x,16000,'prueba.wav')
else:
break
|
{
"content_hash": "3aebbd2df912d55c4fa160acd0efa218",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 92,
"avg_line_length": 35.59322033898305,
"alnum_prop": 0.5942857142857143,
"repo_name": "miltonsarria/dsp-python",
"id": "bad34af0bd5139a59c5b0984536737244a7abbea",
"size": "2168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "audio/example_recordAudio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "28084"
},
{
"name": "C++",
"bytes": "2359"
},
{
"name": "Jupyter Notebook",
"bytes": "497554"
},
{
"name": "Python",
"bytes": "406827"
}
],
"symlink_target": ""
}
|
from inspect import isclass, isfunction, ismodule
from functools import partial
is_func_or_partial = lambda f: isfunction(f) or isinstance(f, partial)
def write_docs_for_module(module, path, modules_to_skip=None,
generate_index=False):
if modules_to_skip is None:
modules_to_skip = {}
module_name = module.__name__
doc_dir = path / module_name
if not doc_dir.is_dir():
doc_dir.mkdir()
for k, v in module.__dict__.iteritems():
if ismodule(v):
print('Writing module {}'.format(module_name))
file_to_doc = docs_for_module(k, v, module_name,
generate_index=generate_index)
if len(file_to_doc) == 0 or k in modules_to_skip:
continue
mod_dir = doc_dir / k
if not mod_dir.is_dir():
mod_dir.mkdir()
for f_name in file_to_doc:
doc_file = mod_dir / (f_name + '.rst')
with open(str(doc_file), 'wb') as f:
f.write(file_to_doc[f_name])
def docs_for_module(module_name, module, package_name, generate_index=False):
file_to_doc = {}
for k, v in module.__dict__.iteritems():
if isclass(v):
file_to_doc[k] = generate_class_rst(module_name, k,
module.__name__, package_name)
elif is_func_or_partial(v):
file_to_doc[k] = generate_function_rst(module_name, k,
module.__name__,
package_name)
# only make an index if there is something to index
if generate_index and len(file_to_doc) > 0:
file_to_doc['index'] = generate_module_index(module_name, module)
return file_to_doc
def generate_module_index(module_name, module):
breadcrumb = '.. _api-{}-index:\n\n'.format(module_name)
title = ":mod:`{}`".format(module.__name__)
title = "{}\n{}\n".format(title, '=' * len(title))
toctree = "\n.. toctree::\n :maxdepth: 1\n\n "
items = [i for i, v in module.__dict__.items() if isclass(v) or
is_func_or_partial(v)]
return breadcrumb + title + toctree + "\n ".join(items)
def generate_class_rst(module_name, class_name, module, package_name):
breadcrumb = '.. _{}-{}-{}:\n\n'.format(package_name, module_name,
class_name)
current_module = '.. currentmodule:: {}\n\n'.format(module)
title = "{}\n{}\n".format(class_name, '=' * len(class_name))
body = (".. autoclass:: {}\n :members:\n :inherited-members:"
"\n :show-inheritance:\n".format(class_name))
return breadcrumb + current_module + title + body
def generate_function_rst(module_name, function_name, module, package_name):
breadcrumb = '.. _{}-{}-{}:\n\n'.format(package_name, module_name,
function_name)
current_module = '.. currentmodule:: {}\n\n'.format(module)
title = "{}\n{}\n".format(function_name, '=' * len(function_name))
body = ".. autofunction:: {}\n".format(function_name)
return breadcrumb + current_module + title + body
if __name__ == '__main__':
from pathlib import Path
import menpowidgets
path = Path(__file__).parent / 'source' / 'api'
print('Writing to {}'.format(path))
# Flip generate_index to True to make index.rst files too!
write_docs_for_module(menpowidgets, path, generate_index=False,
modules_to_skip={'_version'})
|
{
"content_hash": "e3e9302da7dff7a9eb92cab8bd487e5b",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 78,
"avg_line_length": 41.91860465116279,
"alnum_prop": 0.5478502080443828,
"repo_name": "grigorisg9gr/menpowidgets",
"id": "a872f45a8a7a69fef806eee7d41b8e67b317bcbb",
"size": "3605",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/generate_rst.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "155"
},
{
"name": "JavaScript",
"bytes": "5255"
},
{
"name": "Python",
"bytes": "905831"
},
{
"name": "Shell",
"bytes": "124"
}
],
"symlink_target": ""
}
|
"""Functions for plotting graphs related to IAST calculations."""
import matplotlib as mpl
import matplotlib.pyplot as plt
from pygaps.graphing.mpl_styles import BASE_STYLE
from pygaps.utilities.string_utilities import convert_chemformula_ltx
@mpl.rc_context(BASE_STYLE)
def plot_iast(
p_data: list,
l_data: list,
ads: list,
p_label: str,
l_label: str,
ax=None,
):
"""
Plot uptake-vs-pressure graph from IAST data.
Parameters
----------
p_data : array or list
The pressures at which uptakes are calculated.
l_data : 2D array or list of lists
Uptake for each component a function of pressure.
ads : list[str]
Names of the adsorbates.
p_unit : str
Unit of pressure, for axis labelling.
l_unit : str
Unit of loading, for axis labelling.
ax : matplotlib axes object, default None
The axes object where to plot the graph if a new figure is
not desired.
Returns
-------
ax : matplotlib ax
The ax object.
"""
# Generate the figure if needed
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ads = map(convert_chemformula_ltx, ads)
title_graph = "IAST uptake"
# graph title
ax.set_title(title_graph)
# labels for the axes
ax.set_xlabel(p_label)
ax.set_ylabel(l_label)
ax.tick_params(axis='both', which='major')
# Regular data
for lo, ads in zip(l_data.T, ads):
ax.plot(
p_data,
lo,
label=ads,
marker=".",
)
ax.legend(loc='best')
return ax
@mpl.rc_context(BASE_STYLE)
def plot_iast_vle(
x_data: list,
y_data: list,
ads1: str,
ads2: str,
pressure: float,
p_unit: str,
ax=None,
):
"""
Plot a vapour-adsorbed equilibrium graph from IAST data.
Parameters
----------
x_data : array or list
The molar fraction in the adsorbed phase.
y_data : array or list
The molar fraction in the gas phase.
ads1 : str
Name of the adsorbate which is regarded as the main component.
ads2 : str
Name of the adsorbate which is regarded as the secondary component.
pressure : float
Pressure at which the vle is plotted.
p_unit : str
Pressure unit, for labelling.
ax : matplotlib axes object, default None
The axes object where to plot the graph if a new figure is
not desired.
Returns
-------
ax : matplotlib ax
The ax object.
"""
# Generate the figure if needed
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ads1 = convert_chemformula_ltx(ads1)
ads2 = convert_chemformula_ltx(ads2)
text_x = f"Bulk fraction {ads1}"
text_y = f"Adsorbed fraction {ads1}"
title_graph = f"{ads1} in {ads2}"
label = f"{pressure:.2g} {p_unit}"
# graph title
ax.set_title(title_graph)
# labels for the axes
ax.set_xlabel(text_x)
ax.set_ylabel(text_y)
ax.tick_params(axis='both', which='major')
# Regular data
ax.plot(
y_data,
x_data,
label=label,
marker=".",
)
# Straight line
line = [0, 1]
ax.plot(line, line, color='black')
ax.legend(loc='best')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
return ax
@mpl.rc_context(BASE_STYLE)
def plot_iast_svp(
p_data: list,
s_data: list,
ads1: str,
ads2: str,
fraction: float,
p_unit: str,
ax=None,
):
"""
Plot a selectivity-vs-pressure graph from IAST data.
Parameters
----------
p_data : array or list
The pressures at which selectivity is calculated.
s_data : array or list
The selectivity towards the main component as a function of pressure.
ads1 : str
Name of the adsorbate which is regarded as the main component.
ads2 : str
Name of the adsorbate which is regarded as the secondary component.
fraction : float
Molar fraction of the main component in the mixture.
p_unit : str
Unit of the pressure, for axis labelling.
ax : matplotlib axes object, default None
The axes object where to plot the graph if a new figure is
not desired.
Returns
-------
ax : matplotlib ax
The ax object.
"""
# Generate the figure if needed
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ads1 = convert_chemformula_ltx(ads1)
ads2 = convert_chemformula_ltx(ads2)
text_x = f"Pressure [{p_unit}]"
text_y = f"Selectivity, {ads1}"
title_graph = f"{ads1} in {ads2}"
label = f"{fraction:.2%} {ads1}"
# graph title
ax.set_title(title_graph)
# labels for the axes
ax.set_xlabel(text_x)
ax.set_ylabel(text_y)
ax.tick_params(axis='both', which='major')
# Regular data
ax.plot(
p_data,
s_data,
label=label,
marker=".",
)
ax.legend(loc='best')
ax.set_ylim(bottom=0)
return ax
|
{
"content_hash": "e8d7a2da0d23d69bbae7e2ce32ac7f2c",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 77,
"avg_line_length": 22.83783783783784,
"alnum_prop": 0.5936883629191322,
"repo_name": "pauliacomi/pyGAPS",
"id": "3bb323e808a9838c3d639aef2620ccda1f73b558",
"size": "5070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pygaps/graphing/iast_graphs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3600"
},
{
"name": "PowerShell",
"bytes": "2995"
},
{
"name": "Python",
"bytes": "800102"
}
],
"symlink_target": ""
}
|
"""Validation tests for JAX-CFD."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
from jax_cfd.base import advection
from jax_cfd.base import diffusion
from jax_cfd.base import equations
from jax_cfd.base import funcutils
from jax_cfd.base import test_util
from jax_cfd.base import time_stepping
from jax_cfd.base import validation_problems
class ValidationTests(test_util.TestCase):
@parameterized.named_parameters(
dict(
testcase_name='_TaylorGreen_SemiImplicitNavierStokes',
problem=validation_problems.TaylorGreen(
shape=(1024, 1024), density=1., viscosity=1e-3),
solver=functools.partial(
equations.semi_implicit_navier_stokes,
convect=advection.convect_linear),
implicit_diffusion=False,
max_courant_number=.1,
time=10.,
atol=1e-5),
dict(
testcase_name='_TaylorGreen_SemiImplicitNavierStokes_rk1',
problem=validation_problems.TaylorGreen(
shape=(512, 512), density=1., viscosity=1e-2),
solver=functools.partial(
equations.semi_implicit_navier_stokes,
convect=advection.convect_linear,
time_stepper=time_stepping.forward_euler,
),
implicit_diffusion=False,
max_courant_number=.1,
time=40.,
atol=6e-6),
dict(
testcase_name='_TaylorGreen_SemiImplicitNavierStokes_rk4',
problem=validation_problems.TaylorGreen(
shape=(512, 512), density=1., viscosity=1e-2),
solver=functools.partial(
equations.semi_implicit_navier_stokes,
convect=advection.convect_linear,
time_stepper=time_stepping.classic_rk4,
),
implicit_diffusion=False,
max_courant_number=.1,
time=40.,
atol=8e-7),
dict(
testcase_name='_TaylorGreen_ImplicitDiffusionNavierStokes_matmul',
problem=validation_problems.TaylorGreen(
shape=(1024, 1024), density=1., viscosity=1e-3),
solver=functools.partial(
equations.implicit_diffusion_navier_stokes,
convect=advection.convect_linear,
diffusion_solve=functools.partial(
diffusion.solve_fast_diag, implementation='matmul'),
),
implicit_diffusion=True,
max_courant_number=.1,
time=10.,
atol=3e-5),
dict(
testcase_name='_TaylorGreen_ImplicitDiffusionNavierStokes_fft',
problem=validation_problems.TaylorGreen(
shape=(1024, 1024), density=1., viscosity=1e-3),
solver=functools.partial(
equations.implicit_diffusion_navier_stokes,
convect=advection.convect_linear,
diffusion_solve=functools.partial(
diffusion.solve_fast_diag, implementation='fft'),
),
implicit_diffusion=True,
max_courant_number=.1,
time=10.,
atol=4e-5),
dict(
testcase_name='_TaylorGreen_ImplicitDiffusionNavierStokes_rfft',
problem=validation_problems.TaylorGreen(
shape=(1024, 1024), density=1., viscosity=1e-3),
solver=functools.partial(
equations.implicit_diffusion_navier_stokes,
convect=advection.convect_linear,
diffusion_solve=functools.partial(
diffusion.solve_fast_diag, implementation='rfft'),
),
implicit_diffusion=True,
max_courant_number=.1,
time=10.,
atol=4e-5),
dict(
testcase_name='_TaylorGreen_ImplicitDiffusionNavierStokes_cg',
problem=validation_problems.TaylorGreen(
shape=(1024, 1024), density=1., viscosity=1e-3),
solver=functools.partial(
equations.implicit_diffusion_navier_stokes,
convect=advection.convect_linear,
diffusion_solve=functools.partial(
diffusion.solve_cg, atol=1e-6, maxiter=512)),
implicit_diffusion=True,
max_courant_number=.1,
time=10.,
atol=3e-5),
dict(
testcase_name='_TaylorGreen_ImplicitDiffusionNavierStokes_viscous',
problem=validation_problems.TaylorGreen(
shape=(1024, 1024), density=1., viscosity=0.5),
solver=functools.partial(
equations.implicit_diffusion_navier_stokes,
convect=advection.convect_linear),
implicit_diffusion=True,
max_courant_number=.5,
time=1.0,
atol=6e-4,
),
)
def test_accuracy(self, problem, solver, implicit_diffusion,
max_courant_number, time, atol):
"""Test the accuracy of `solver` on the given `problem`.
Args:
problem: an instance of `validation_problems.Problem`.
solver: a callable that takes `density`, `viscosity`, `dt`, `grid`, and
`steps`. It returns a callable that takes `velocity`,
`pressure_correction` and `force` and returns updated versions of these
values at the next time step.
implicit_diffusion: whether or not the solver models diffusion implicitly.
max_courant_number: a float used to choose the size of the time step `dt`
according to the Courant-Friedrichs-Lewy condition. See
https://en.wikipedia.org/wiki/Courant-Friedrichs-Lewy_condition.
time: the amount of time to run the simulation for.
atol: absolute error tolerance per entry.
"""
v = problem.velocity(0.)
dt = equations.dynamic_time_step(
v, max_courant_number, problem.viscosity, problem.grid,
implicit_diffusion)
steps = int(jnp.ceil(time / dt))
navier_stokes = solver(density=problem.density,
viscosity=problem.viscosity,
dt=dt,
grid=problem.grid)
v_computed = funcutils.repeated(navier_stokes, steps)(v)
v_analytic = problem.velocity(time)
for u_c, u_a in zip(v_computed, v_analytic):
self.assertAllClose(u_c, u_a, atol=atol)
if __name__ == '__main__':
absltest.main()
|
{
"content_hash": "882d09b68e258a09b30e95addc2b6e04",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 80,
"avg_line_length": 39.522012578616355,
"alnum_prop": 0.6115531508593253,
"repo_name": "google/jax-cfd",
"id": "3009c13e8225cbe3af8813ce2a6225fb93e319a8",
"size": "6860",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jax_cfd/base/validation_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7029140"
},
{
"name": "Python",
"bytes": "715552"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
from oslo_config import cfg
from oslo_log import log as logging
from congress.datalog import base as datalog_base
from congress.datalog import compile
from congress.datalog import database
from congress.datalog import materialized
from congress.datalog import nonrecursive
from congress.datalog import utility
from congress.db import db_policy_rules
from congress import exception
from congress.policy_engines import agnostic
from congress.synchronizer import policy_rule_synchronizer as synchronizer
from congress.tests import base
from congress.tests import helper
LOG = logging.getLogger(__name__)
NREC_THEORY = 'non-recursive theory'
class TestRuntime(base.TestCase):
"""Tests for Runtime that are not specific to any theory."""
def check_equal(self, actual_string, correct_string, msg):
self.assertTrue(helper.datalog_equal(
actual_string, correct_string, msg))
def test_theory_inclusion(self):
"""Test evaluation routines when one theory includes another."""
# spread out across inclusions
th1 = nonrecursive.NonrecursiveRuleTheory()
th2 = nonrecursive.NonrecursiveRuleTheory()
th3 = nonrecursive.NonrecursiveRuleTheory()
th1.includes.append(th2)
th2.includes.append(th3)
th1.insert(helper.str2form('p(x) :- q(x), r(x), s(2)'))
th2.insert(helper.str2form('q(1)'))
th1.insert(helper.str2form('r(1)'))
th3.insert(helper.str2form('s(2)'))
self.check_equal(
helper.pol2str(th1.select(helper.str2form('p(x)'))),
'p(1)', 'Data spread across inclusions')
def test_multi_policy_update(self):
"""Test updates that apply to multiple policies."""
def check_equal(actual, correct):
e = helper.datalog_equal(actual, correct)
self.assertTrue(e)
run = agnostic.Runtime()
run.create_policy('th1')
run.create_policy('th2')
events1 = [compile.Event(formula=x, insert=True, target='th1')
for x in helper.str2pol("p(1) p(2) q(1) q(3)")]
events2 = [compile.Event(formula=x, insert=True, target='th2')
for x in helper.str2pol("r(1) r(2) t(1) t(4)")]
run.update(events1 + events2)
check_equal(run.select('p(x)', 'th1'), 'p(1) p(2)')
check_equal(run.select('q(x)', 'th1'), 'q(1) q(3)')
check_equal(run.select('r(x)', 'th2'), 'r(1) r(2)')
check_equal(run.select('t(x)', 'th2'), 't(1) t(4)')
def test_initialize_tables(self):
"""Test initialize_tables() functionality of agnostic."""
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(1) p(2)')
facts = [compile.Fact('p', (3,)), compile.Fact('p', (4,))]
run.initialize_tables(['p'], facts)
e = helper.datalog_equal(run.select('p(x)'), 'p(3) p(4)')
self.assertTrue(e)
def test_single_policy(self):
"""Test ability to create/delete single policies."""
# single policy
run = agnostic.Runtime()
original = run.policy_names()
run.create_policy('test1')
run.insert('p(x) :- q(x)', 'test1')
run.insert('q(1)', 'test1')
self.assertEqual(
run.select('p(x)', 'test1'), 'p(1)', 'Policy creation')
self.assertEqual(
run.select('p(x)', 'test1'), 'p(1)', 'Policy creation')
run.delete_policy('test1')
self.assertEqual(
set(run.policy_names()), set(original), 'Policy deletion')
def test_multi_policy(self):
"""Test ability to create/delete multiple policies."""
# multiple policies
run = agnostic.Runtime()
original = run.policy_names()
run.create_policy('test2')
run.create_policy('test3')
self.assertEqual(
set(run.policy_names()),
set(original + ['test2', 'test3']),
'Multi policy creation')
run.delete_policy('test2')
run.create_policy('test4')
self.assertEqual(
set(run.policy_names()),
set(original + ['test3', 'test4']),
'Multiple policy deletion')
run.insert('p(x) :- q(x) q(1)', 'test4')
self.assertEqual(
run.select('p(x)', 'test4'),
'p(1)',
'Multipolicy deletion select')
def test_cross_policy_rule(self):
"""Test rule that refer to table from another policy."""
run = agnostic.Runtime()
run.create_policy('test1')
run.create_policy('test2')
run.create_policy('test3')
run.insert(
'p(x) :- test1:q(x),test2:q(x),test3:q(x),q(x) q(1) q(2) q(3)',
'test3')
run.insert('q(1)', 'test1')
run.insert('q(1) q(2)', 'test2')
self.assertEqual(
run.select('p(x)', 'test3'),
'p(1)',
'Cross-policy rule select')
def test_policy_types(self):
"""Test types for multiple policies."""
# policy types
run = agnostic.Runtime()
run.create_policy('test1', kind=datalog_base.NONRECURSIVE_POLICY_TYPE)
self.assertIsInstance(run.policy_object('test1'),
nonrecursive.NonrecursiveRuleTheory,
'Nonrecursive policy addition')
run.create_policy('test2', kind=datalog_base.ACTION_POLICY_TYPE)
self.assertIsInstance(run.policy_object('test2'),
nonrecursive.ActionTheory,
'Action policy addition')
run.create_policy('test3', kind=datalog_base.DATABASE_POLICY_TYPE)
self.assertIsInstance(run.policy_object('test3'),
database.Database,
'Database policy addition')
run.create_policy('test4', kind=datalog_base.MATERIALIZED_POLICY_TYPE)
self.assertIsInstance(run.policy_object('test4'),
materialized.MaterializedViewTheory,
'Materialized policy addition')
def test_policy_errors(self):
"""Test errors for multiple policies."""
# errors
run = agnostic.Runtime()
run.create_policy('existent')
self.assertRaises(KeyError, run.create_policy, 'existent')
self.assertRaises(KeyError, run.delete_policy, 'nonexistent')
self.assertRaises(KeyError, run.policy_object, 'nonexistent')
def test_wrong_arity_index(self):
run = agnostic.Runtime()
run.create_policy('test1')
run.insert('p(x) :- r(x), q(y, x)')
run.insert('r(1)')
run.insert('q(1,1)')
# run query first to build index
self.assertTrue(helper.datalog_equal(run.select('p(x)'), 'p(1)'))
# next insert causes an exceptions since the thing we indexed on
# doesn't exist
permitted, errs = run.insert('q(5)')
self.assertFalse(permitted)
self.assertEqual(len(errs), 1)
self.assertIsInstance(errs[0], exception.PolicyException)
# double-check that the error didn't result in an inconsistent state
self.assertEqual(run.select('q(5)'), '')
def test_get_tablename(self):
run = agnostic.DseRuntime('dse')
run.synchronizer = mock.MagicMock()
run.create_policy('test')
run.insert('p(x) :- q(x)')
run.insert('q(x) :- r(x)')
run.insert('execute[nova:disconnect(x, y)] :- s(x, y)')
tables = run.get_tablename('test', 'p')
self.assertEqual({'p'}, set(tables))
tables = run.get_tablename('test', 't')
self.assertIsNone(tables)
tables = run.get_tablenames('test')
self.assertEqual({'p', 'q', 'r', 's'}, set(tables))
tables = run.get_tablename('test', 'nova:disconnect')
self.assertIsNone(tables)
def test_tablenames(self):
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(x) :- q(x,y)')
run.insert('q(x,y) :- r(x,y)')
run.insert('t(x) :- q(x,y), r(x,z), equal(y, z)')
run.insert('execute[nova:disconnect(x, y)] :- s(x, y)')
tables = run.tablenames()
self.assertEqual({'p', 'q', 'r', 's', 't', 'nova:disconnect'},
set(tables))
tables = run.tablenames(include_builtin=True)
self.assertEqual({'p', 'q', 'r', 's', 't', 'nova:disconnect', 'equal'},
set(tables))
tables = run.tablenames(body_only=True)
self.assertEqual({'q', 'r', 's'}, set(tables))
tables = run.tablenames(include_modal=False)
self.assertEqual({'p', 'q', 'r', 's', 't'}, set(tables))
@mock.patch.object(db_policy_rules, 'add_policy')
def test_persistent_create_policy(self, mock_add):
run = agnostic.Runtime()
policy_name = 'invalid-table-name'
self.assertRaises(exception.PolicyException,
run.persistent_create_policy,
policy_name)
self.assertNotIn(policy_name, run.policy_names())
@mock.patch.object(db_policy_rules, 'add_policy', side_effect=Exception())
def test_persistent_create_policy_with_db_exception(self, mock_add):
run = agnostic.Runtime()
with mock.patch.object(run, 'delete_policy') as mock_delete:
run.synchronizer = mock.MagicMock()
policy_name = 'test_policy'
self.assertRaises(exception.PolicyException,
run.persistent_create_policy,
policy_name)
mock_add.assert_called_once_with(mock.ANY,
policy_name,
policy_name[:5],
mock.ANY,
'user',
'nonrecursive',
session=mock.ANY)
# mock_delete.assert_called_once_with(policy_name)
self.assertFalse(mock_delete.called)
self.assertFalse(run.synchronizer.sync_one_policy.called)
self.assertNotIn('test_policy', run.policy_names())
mock_db_policy_obj = lambda: None
setattr(mock_db_policy_obj, 'name', 'test_policy')
@mock.patch.object(db_policy_rules, 'add_policy_rule')
@mock.patch.object(db_policy_rules, 'policy_name',
side_effect=lambda x, session: x)
@mock.patch.object(
db_policy_rules, 'get_policies', return_value=[mock_db_policy_obj])
def test_persistent_insert_rules(
self, mock_add, mock_policy_name, mock_get_policies):
run = agnostic.Runtime()
run.synchronizer = mock.MagicMock()
run.create_policy('test_policy')
# test empty insert
result, _ = run.persistent_insert_rules('test_policy', [])
self.assertEqual(len(result), 0)
self.assertTrue(helper.datalog_equal(
run.select('p(x)'), ''))
# test empty string passed in rule
self.assertRaises(exception.PolicyException,
run.persistent_insert_rules,
'test_policy', [{'rule': '', 'name': '',
'comment': ''}])
# test duplicated insert, 3 rules, 2 unique
result, _ = run.persistent_insert_rules(
'test_policy',
[{'rule': 'p(1)', 'name': '', 'comment': ''},
{'rule': 'p(2)', 'name': '', 'comment': ''},
{'rule': 'p(1)', 'name': '', 'comment': ''}])
self.assertEqual(len(result), 2)
self.assertTrue(helper.datalog_equal(
run.select('p(x)'), 'p(1) p(2)'))
def test_tablenames_theory_name(self):
run = agnostic.Runtime()
run.create_policy('test')
run.create_policy('test2')
run.insert('p(x) :- q(x)', 'test')
run.insert('r(x) :- s(x)', 'test2')
tables = run.tablenames()
self.assertEqual(set(tables), set(['p', 'q', 'r', 's']))
tables = run.tablenames(theory_name='test')
self.assertEqual(set(tables), set(['p', 'q']))
class TestArity(base.TestCase):
def test_same_table_diff_policies(self):
run = agnostic.Runtime()
run.create_policy('alice')
run.create_policy('bob')
run.insert('p(x) :- q(x, y)', 'alice')
run.insert('p(x, y) :- r(x, y, z)', 'bob')
self.assertEqual(1, run.arity('p', 'alice'))
self.assertEqual(2, run.arity('p', 'bob'))
def test_complex_table(self):
run = agnostic.Runtime()
run.create_policy('alice')
run.create_policy('bob')
run.insert('p(x) :- q(x, y)', 'alice')
run.insert('p(x, y) :- r(x, y, z)', 'bob')
self.assertEqual(1, run.arity('alice:p', 'bob'))
self.assertEqual(1, run.arity('alice:p', 'alice'))
def test_modals(self):
run = agnostic.Runtime()
run.create_policy('alice')
run.insert('execute[nova:p(x)] :- q(x, y)', 'alice')
self.assertEqual(1, run.arity('nova:p', 'alice', 'execute'))
class TestTriggerRegistry(base.TestCase):
def setUp(self):
super(TestTriggerRegistry, self).setUp()
self.f = lambda tbl, old, new: old
def test_trigger(self):
trigger1 = agnostic.Trigger('table', 'policy', self.f)
trigger2 = agnostic.Trigger('table', 'policy', self.f)
trigger3 = agnostic.Trigger('table2', 'policy', self.f)
trigger4 = agnostic.Trigger('table', 'policy', lambda x: x)
s = set()
s.add(trigger1)
s.add(trigger2)
s.add(trigger3)
s.add(trigger4)
self.assertEqual(4, len(s))
s.discard(trigger1)
self.assertEqual(3, len(s))
s.discard(trigger2)
self.assertEqual(2, len(s))
s.discard(trigger3)
self.assertEqual(1, len(s))
s.discard(trigger4)
self.assertEqual(0, len(s))
def test_register(self):
g = compile.RuleDependencyGraph()
reg = agnostic.TriggerRegistry(g)
# register
p_trigger = reg.register_table('p', 'alice', self.f)
triggers = reg.relevant_triggers(['alice:p'])
self.assertEqual(triggers, set([p_trigger]))
# register 2nd table
q_trigger = reg.register_table('q', 'alice', self.f)
p_triggers = reg.relevant_triggers(['alice:p'])
self.assertEqual(p_triggers, set([p_trigger]))
q_triggers = reg.relevant_triggers(['alice:q'])
self.assertEqual(q_triggers, set([q_trigger]))
# register again with table p
p2_trigger = reg.register_table('p', 'alice', self.f)
p_triggers = reg.relevant_triggers(['alice:p'])
self.assertEqual(p_triggers, set([p_trigger, p2_trigger]))
q_triggers = reg.relevant_triggers(['alice:q'])
self.assertEqual(q_triggers, set([q_trigger]))
def test_unregister(self):
g = compile.RuleDependencyGraph()
reg = agnostic.TriggerRegistry(g)
p_trigger = reg.register_table('p', 'alice', self.f)
q_trigger = reg.register_table('q', 'alice', self.f)
self.assertEqual(reg.relevant_triggers(['alice:p']),
set([p_trigger]))
self.assertEqual(reg.relevant_triggers(['alice:q']),
set([q_trigger]))
# unregister p
reg.unregister(p_trigger)
self.assertEqual(reg.relevant_triggers(['alice:p']), set())
self.assertEqual(reg.relevant_triggers(['alice:q']),
set([q_trigger]))
# unregister q
reg.unregister(q_trigger)
self.assertEqual(reg.relevant_triggers(['alice:p']), set())
self.assertEqual(reg.relevant_triggers(['alice:q']), set())
# unregister nonexistent trigger
self.assertRaises(KeyError, reg.unregister, p_trigger)
self.assertEqual(reg.relevant_triggers(['alice:p']), set())
self.assertEqual(reg.relevant_triggers(['alice:q']), set())
def test_basic_dependency(self):
g = compile.RuleDependencyGraph()
reg = agnostic.TriggerRegistry(g)
g.formula_insert(compile.parse1('p(x) :- q(x)'), 'alice')
# register p
p_trigger = reg.register_table('p', 'alice', self.f)
self.assertEqual(reg.relevant_triggers(['alice:q']), set([p_trigger]))
self.assertEqual(reg.relevant_triggers(['alice:p']), set([p_trigger]))
# register q
q_trigger = reg.register_table('q', 'alice', self.f)
self.assertEqual(reg.relevant_triggers(['alice:q']),
set([p_trigger, q_trigger]))
self.assertEqual(reg.relevant_triggers(['alice:p']),
set([p_trigger]))
def test_complex_dependency(self):
g = compile.RuleDependencyGraph()
reg = agnostic.TriggerRegistry(g)
g.formula_insert(compile.parse1('p(x) :- q(x)'), 'alice')
g.formula_insert(compile.parse1('q(x) :- r(x), s(x)'), 'alice')
g.formula_insert(compile.parse1('r(x) :- t(x, y), u(y)'), 'alice')
g.formula_insert(compile.parse1('separate(x) :- separate2(x)'),
'alice')
g.formula_insert(compile.parse1('notrig(x) :- notrig2(x)'), 'alice')
p_trigger = reg.register_table('p', 'alice', self.f)
sep_trigger = reg.register_table('separate', 'alice', self.f)
# individual tables
self.assertEqual(reg.relevant_triggers(['alice:p']), set([p_trigger]))
self.assertEqual(reg.relevant_triggers(['alice:q']), set([p_trigger]))
self.assertEqual(reg.relevant_triggers(['alice:r']), set([p_trigger]))
self.assertEqual(reg.relevant_triggers(['alice:s']), set([p_trigger]))
self.assertEqual(reg.relevant_triggers(['alice:t']), set([p_trigger]))
self.assertEqual(reg.relevant_triggers(['alice:u']), set([p_trigger]))
self.assertEqual(reg.relevant_triggers(['alice:notrig']), set())
self.assertEqual(reg.relevant_triggers(['alice:notrig2']), set([]))
self.assertEqual(reg.relevant_triggers(['alice:separate']),
set([sep_trigger]))
self.assertEqual(reg.relevant_triggers(['alice:separate2']),
set([sep_trigger]))
# groups of tables
self.assertEqual(reg.relevant_triggers(['alice:p', 'alice:q']),
set([p_trigger]))
self.assertEqual(reg.relevant_triggers(['alice:separate', 'alice:p']),
set([p_trigger, sep_trigger]))
self.assertEqual(reg.relevant_triggers(['alice:notrig', 'alice:p']),
set([p_trigger]))
# events: data
event = compile.Event(compile.parse1('q(1)'), target='alice')
self.assertEqual(reg.relevant_triggers([event]), set([p_trigger]))
event = compile.Event(compile.parse1('u(1)'), target='alice')
self.assertEqual(reg.relevant_triggers([event]), set([p_trigger]))
event = compile.Event(compile.parse1('separate2(1)'), target='alice')
self.assertEqual(reg.relevant_triggers([event]), set([sep_trigger]))
event = compile.Event(compile.parse1('notrig2(1)'), target='alice')
self.assertEqual(reg.relevant_triggers([event]), set([]))
# events: rules
event = compile.Event(compile.parse1('separate(x) :- q(x)'),
target='alice')
self.assertEqual(reg.relevant_triggers([event]), set([sep_trigger]))
event = compile.Event(compile.parse1('notrig(x) :- q(x)'),
target='alice')
self.assertEqual(reg.relevant_triggers([event]), set([]))
event = compile.Event(compile.parse1('r(x) :- q(x)'), target='alice')
self.assertEqual(reg.relevant_triggers([event]), set([p_trigger]))
# events: multiple rules and data
event1 = compile.Event(compile.parse1('r(x) :- q(x)'), target='alice')
event2 = compile.Event(compile.parse1('separate2(1)'), target='alice')
self.assertEqual(reg.relevant_triggers([event1, event2]),
set([p_trigger, sep_trigger]))
event1 = compile.Event(compile.parse1('r(x) :- q(x)'), target='alice')
event2 = compile.Event(compile.parse1('notrigger2(1)'), target='alice')
self.assertEqual(reg.relevant_triggers([event1, event2]),
set([p_trigger]))
def test_triggers_by_table(self):
t1 = agnostic.Trigger('p', 'alice', lambda x: x)
t2 = agnostic.Trigger('p', 'alice', lambda x, y: x)
t3 = agnostic.Trigger('q', 'alice', lambda x: x)
triggers = [t1, t2, t3]
table_triggers = agnostic.TriggerRegistry.triggers_by_table(triggers)
self.assertEqual(2, len(table_triggers))
self.assertEqual(set(table_triggers[('p', 'alice', None)]),
set([t1, t2]))
self.assertEqual(set(table_triggers[('q', 'alice', None)]),
set([t3]))
def test_modals(self):
g = compile.RuleDependencyGraph()
reg = agnostic.TriggerRegistry(g)
# register
p_trigger = reg.register_table('p', 'alice', self.f, modal='exec')
triggers = reg.relevant_triggers(['alice:p'])
self.assertEqual(triggers, set([p_trigger]))
# register 2nd table
q_trigger = reg.register_table('q', 'alice', self.f)
p_triggers = reg.relevant_triggers(['alice:p'])
self.assertEqual(p_triggers, set([p_trigger]))
q_triggers = reg.relevant_triggers(['alice:q'])
self.assertEqual(q_triggers, set([q_trigger]))
class TestTriggers(base.TestCase):
class MyObject(object):
"""A class with methods that have side-effects."""
def __init__(self):
self.value = 0
self.equals = False
def increment(self):
"""Used for counting number of times function invoked."""
self.value += 1
def equal(self, realold, realnew, old, new):
"""Used for checking if function is invoked with correct args."""
self.equals = (realold == old and realnew == new)
def test_empty(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
run.insert('p(1)')
self.assertEqual(1, obj.value)
def test_empty2(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(1)')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
run.delete('p(1)')
self.assertEqual(1, obj.value)
def test_empty3(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(1)')
run.delete('p(1)')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
run.delete('p(1)')
self.assertEqual(0, obj.value)
def test_nochange(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(1)')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
run.insert('p(1)')
self.assertEqual(0, obj.value)
def test_batch_change_succeed(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
p1 = compile.parse1('p(1)')
p2 = compile.parse1('p(2)')
p3 = compile.parse1('p(3)')
result = run.update([compile.Event(p1, target='test'),
compile.Event(p2, target='test'),
compile.Event(p3, target='test')])
self.assertTrue(result[0], ("Update failed with errors: " +
";".join(str(x) for x in result[1])))
# IMPORTANT: 3 tuples inserted into p in a single batch triggers once
self.assertEqual(1, obj.value)
def test_batch_change_fail(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
p1 = compile.parse1('p(1)')
p2 = compile.parse1('p(x) :- q(x)')
p3 = compile.parse1('q(x) :- p(x)')
result = run.update([compile.Event(p1, target='test'),
compile.Event(p2, target='test'),
compile.Event(p3, target='test')])
self.assertFalse(result[0],
("Update should have failed with recursion: " +
";".join(str(x) for x in result[1])))
# IMPORTANT: trigger not activated even though initial events succeed
self.assertEqual(0, obj.value)
def test_dependency(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(x) :- q(x)')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
run.insert('q(1)')
self.assertEqual(1, obj.value)
def test_dependency_batch_insert(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
run.insert('q(1) p(x) :- q(x)')
self.assertEqual(1, obj.value)
def test_dependency_batch(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(x) :- q(x)')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
rule = compile.parse1('q(x) :- r(x)')
data = compile.parse1('r(1)')
run.update([compile.Event(rule, target='test'),
compile.Event(data, target='test')])
self.assertEqual(1, obj.value)
def test_dependency_batch_delete(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(x) :- q(x)')
run.insert('q(x) :- r(x)')
run.insert('r(1)')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
run.delete('q(x) :- r(x)')
self.assertEqual(1, obj.value)
def test_multi_dependency(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(x) :- q(x)')
run.insert('q(x) :- r(x), s(x)')
run.insert('s(1)')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
run.insert('r(1)')
self.assertEqual(1, obj.value)
def test_negation(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(x) :- q(x), not r(x)')
run.insert('q(1)')
run.insert('q(2)')
run.insert('r(2)')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
run.insert('r(1)')
self.assertEqual(1, obj.value)
run.register_trigger('p', lambda tbl, old, new: obj.increment())
run.delete('r(1)')
self.assertEqual(3, obj.value)
def test_anti_dependency(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(x) :- q(x)')
run.insert('r(1)')
run.register_trigger('r', lambda tbl, old, new: obj.increment())
run.insert('q(1)')
self.assertEqual(0, obj.value)
def test_old_new_correctness(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(x) :- q(x)')
run.insert('q(x) :- r(x), not s(x)')
run.insert('r(1) r(2) r(3)')
run.insert('s(2)')
oldp = set(compile.parse('p(1) p(3)'))
newp = set(compile.parse('p(1) p(2)'))
run.register_trigger('p',
lambda tbl, old, new:
obj.equal(oldp, newp, old, new))
run.update([compile.Event(compile.parse1('s(3)')),
compile.Event(compile.parse1('s(2)'), insert=False)])
self.assertTrue(obj.equals)
def test_unregister(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
trigger = run.register_trigger('p',
lambda tbl, old, new: obj.increment())
run.insert('p(1)')
self.assertEqual(1, obj.value)
run.unregister_trigger(trigger)
self.assertEqual(1, obj.value)
run.insert('p(2)')
self.assertEqual(1, obj.value)
self.assertRaises(KeyError, run.unregister_trigger, trigger)
self.assertEqual(1, obj.value)
def test_sequence(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
run.insert('p(x) :- q(x)')
run.insert('q(1)')
self.assertEqual(1, obj.value)
def test_delete_data(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.create_policy('test')
run.register_trigger('p', lambda tbl, old, new: obj.increment())
run.insert('p(x) :- q(x, y), equal(y, 1)')
run.insert('q(1, 1)')
self.assertEqual(1, obj.value)
run.delete('q(1, 1)')
self.assertEqual(2, obj.value)
def test_multi_policies(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.debug_mode()
run.create_policy('alice')
run.create_policy('bob')
run.register_trigger('p',
lambda tbl, old, new: obj.increment(), 'alice')
run.insert('p(x) :- bob:q(x)', target='alice')
run.insert('q(1)', target='bob')
self.assertEqual(1, obj.value)
run.delete('q(1)', target='bob')
self.assertEqual(2, obj.value)
def test_modal(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.debug_mode()
run.create_policy('alice')
run.register_trigger('p', lambda tbl, old, new:
obj.increment(), 'alice', 'execute')
run.insert('execute[p(x)] :- q(x)')
self.assertEqual(0, obj.value)
run.insert('q(1)')
self.assertEqual(1, obj.value)
run.insert('q(2)')
self.assertEqual(2, obj.value)
def test_initialize(self):
obj = self.MyObject()
run = agnostic.Runtime()
run.debug_mode()
run.create_policy('alice')
run.register_trigger('p', lambda tbl, old, new:
obj.increment(), 'alice', 'execute')
run.insert('execute[p(x)] :- q(x)')
self.assertEqual(obj.value, 0)
run.initialize_tables(['q'], [compile.Fact('q', [1])], 'alice')
self.assertEqual(obj.value, 1)
class TestMultipolicyRules(base.TestCase):
def test_external(self):
"""Test ability to write rules that span multiple policies."""
# External theory
run = agnostic.Runtime()
run.create_policy('test1')
run.insert('q(1)', target='test1')
run.insert('q(2)', target='test1')
run.create_policy('test2')
run.insert('p(x) :- test1:q(x)', target='test2')
actual = run.select('p(x)', target='test2')
e = helper.db_equal('p(1) p(2)', actual)
self.assertTrue(e, "Basic")
def test_multi_external(self):
"""Test multiple rules that span multiple policies."""
run = agnostic.Runtime()
run.debug_mode()
run.create_policy('test1')
run.create_policy('test2')
run.create_policy('test3')
run.insert('p(x) :- test2:p(x)', target='test1')
run.insert('p(x) :- test3:p(x)', target='test1')
run.insert('p(1)', target='test2')
run.insert('p(2)', target='test3')
actual = run.select('p(x)', target='test1')
e = helper.db_equal(actual, 'p(1) p(2)')
self.assertTrue(e, "Multiple external rules with multiple policies")
def test_external_current(self):
"""Test ability to write rules that span multiple policies."""
# External theory plus current theory
run = agnostic.Runtime()
run.create_policy('test1')
run.insert('q(1)', target='test1')
run.insert('q(2)', target='test1')
run.create_policy('test2')
run.insert('p(x) :- test1:q(x), r(x)', target='test2')
run.insert('r(1)', target='test2')
run.insert('r(2)', target='test2')
actual = run.select('p(x)', target='test2')
e = helper.db_equal(actual, 'p(1) p(2)')
self.assertTrue(e, "Mixing external theories with current theory")
def test_ignore_local(self):
"""Test ability to write rules that span multiple policies."""
# Local table ignored
run = agnostic.Runtime()
run.create_policy('test1')
run.insert('q(1)', target='test1')
run.insert('q(2)', target='test1')
run.create_policy('test2')
run.insert('p(x) :- test1:q(x), r(x)', target='test2')
run.insert('q(3)', 'test2')
run.insert('r(1)', target='test2')
run.insert('r(2)', target='test2')
run.insert('r(3)', target='test2')
actual = run.select('p(x)', target='test2')
e = helper.db_equal(actual, 'p(1) p(2)')
self.assertTrue(e, "Local table ignored")
def test_local(self):
"""Test ability to write rules that span multiple policies."""
# Local table used
run = agnostic.Runtime()
run.create_policy('test1')
run.insert('q(1)', target='test1')
run.insert('q(2)', target='test1')
run.create_policy('test2')
run.insert('p(x) :- test1:q(x), q(x)', target='test2')
run.insert('q(2)', 'test2')
actual = run.select('p(x)', target='test2')
e = helper.db_equal(actual, 'p(2)')
self.assertTrue(e, "Local table used")
def test_multiple_external(self):
"""Test ability to write rules that span multiple policies."""
# Multiple external theories
run = agnostic.Runtime()
run.create_policy('test1')
run.insert('q(1)', target='test1')
run.insert('q(2)', target='test1')
run.insert('q(3)', target='test1')
run.create_policy('test2')
run.insert('q(1)', target='test2')
run.insert('q(2)', target='test2')
run.insert('q(4)', target='test2')
run.create_policy('test3')
run.insert('p(x) :- test1:q(x), test2:q(x)', target='test3')
actual = run.select('p(x)', target='test3')
e = helper.db_equal(actual, 'p(1) p(2)')
self.assertTrue(e, "Multiple external theories")
def test_multiple_levels_external(self):
"""Test ability to write rules that span multiple policies."""
# Multiple levels of external theories
run = agnostic.Runtime()
run.debug_mode()
run.create_policy('test1')
run.insert('p(x) :- test2:q(x), test3:q(x)', target='test1')
run.insert('s(3) s(1) s(2) s(4)', target='test1')
run.create_policy('test2')
run.insert('q(x) :- test4:r(x)', target='test2')
run.create_policy('test3')
run.insert('q(x) :- test1:s(x)', target='test3')
run.create_policy('test4')
run.insert('r(1)', target='test4')
run.insert('r(2)', target='test4')
run.insert('r(5)', target='test4')
actual = run.select('p(x)', target='test1')
e = helper.db_equal(actual, 'p(1) p(2)')
self.assertTrue(e, "Multiple levels of external theories")
def test_multipolicy_head(self):
"""Test SELECT with different policy in the head."""
run = agnostic.Runtime()
run.debug_mode()
run.create_policy('test1', kind='action')
run.create_policy('test2', kind='action')
(permitted, errors) = run.insert('test2:p+(x) :- q(x)', 'test1')
self.assertTrue(permitted, "modals with policy names must be allowed")
run.insert('q(1)', 'test1')
run.insert('p(2)', 'test2')
actual = run.select('test2:p+(x)', 'test1')
e = helper.db_equal(actual, 'test2:p+(1)')
self.assertTrue(e, "Policy name in the head")
def test_multipolicy_normal_errors(self):
"""Test errors arising from rules in multiple policies."""
run = agnostic.Runtime()
run.debug_mode()
run.create_policy('test1')
# policy in head of rule
(permitted, errors) = run.insert('test2:p(x) :- q(x)', 'test1')
self.assertFalse(permitted)
self.assertIn("should not reference any policy", str(errors[0]))
# policy in head of rule with update
(permitted, errors) = run.insert('test2:p+(x) :- q(x)', 'test1')
self.assertFalse(permitted)
self.assertIn("should not reference any policy", str(errors[0]))
# policy in head of rule with update
(permitted, errors) = run.insert('test2:p-(x) :- q(x)', 'test1')
self.assertFalse(permitted)
self.assertIn("should not reference any policy", str(errors[0]))
# policy in head of fact
(permitted, errors) = run.insert('test2:p(1)', 'test1')
self.assertFalse(permitted)
self.assertIn("should not reference any policy", str(errors[0]))
# policy in head of fact
(permitted, errors) = run.insert('test2:p+(1)', 'test1')
self.assertFalse(permitted)
self.assertIn("should not reference any policy", str(errors[0]))
# policy in head of fact
(permitted, errors) = run.insert('test2:p-(1)', 'test1')
self.assertFalse(permitted)
self.assertIn("should not reference any policy", str(errors[0]))
# recursion across policies
run.insert('p(x) :- test2:q(x)', target='test1')
run.create_policy('test2')
(permit, errors) = run.insert('q(x) :- test1:p(x)', target='test2')
self.assertFalse(permit, "Recursion across theories should fail")
self.assertEqual(len(errors), 1)
self.assertIn("Rules are recursive", str(errors[0]))
def test_multipolicy_action_errors(self):
"""Test errors arising from rules in action policies."""
run = agnostic.Runtime()
run.debug_mode()
run.create_policy('test1', kind='action')
# policy in head of rule
(permitted, errors) = run.insert('test2:p(x) :- q(x)', 'test1')
self.assertFalse(permitted)
self.assertIn("should not reference any policy", str(errors[0]))
# policy in head of fact
(permitted, errors) = run.insert('test2:p(1)', 'test1')
self.assertFalse(permitted)
self.assertIn("should not reference any policy", str(errors[0]))
# recursion across policies
run.insert('p(x) :- test2:q(x)', target='test1')
run.create_policy('test2')
(permit, errors) = run.insert('q(x) :- test1:p(x)', target='test2')
self.assertFalse(permit, "Recursion across theories should fail")
self.assertEqual(len(errors), 1)
self.assertIn("Rules are recursive", str(errors[0]))
def test_dependency_graph_policy_deletion(self):
run = agnostic.Runtime()
g = run.global_dependency_graph
run.create_policy('test')
rule = 'execute[nova:flavors.delete(id)] :- nova:flavors(id)'
permitted, changes = run.insert(rule, target='test')
self.assertTrue(permitted)
run.create_policy('nova')
run.insert('flavors(1)', target="nova")
run.insert('flavors(2)', target="nova")
run.insert('flavors(3)', target="nova")
run.insert('flavors(4)', target="nova")
self.assertEqual(g.dependencies('test:nova:flavors.delete'),
set(['nova:flavors', 'test:nova:flavors.delete']))
run.delete_policy('nova')
self.assertTrue(g.node_in('nova:flavors'))
self.assertEqual(g.dependencies('test:nova:flavors.delete'),
set(['nova:flavors', 'test:nova:flavors.delete']))
def test_dependency_graph(self):
"""Test that dependency graph gets updated correctly."""
run = agnostic.Runtime()
run.debug_mode()
g = run.global_dependency_graph
run.create_policy('test')
run.insert('p(x) :- q(x), nova:q(x)', target='test')
self.assertTrue(g.edge_in('test:p', 'nova:q', False))
self.assertTrue(g.edge_in('test:p', 'test:q', False))
run.insert('p(x) :- s(x)', target='test')
self.assertTrue(g.edge_in('test:p', 'nova:q', False))
self.assertTrue(g.edge_in('test:p', 'test:q', False))
self.assertTrue(g.edge_in('test:p', 'test:s', False))
run.insert('q(x) :- nova:r(x)', target='test')
self.assertTrue(g.edge_in('test:p', 'nova:q', False))
self.assertTrue(g.edge_in('test:p', 'test:q', False))
self.assertTrue(g.edge_in('test:p', 'test:s', False))
self.assertTrue(g.edge_in('test:q', 'nova:r', False))
run.delete('p(x) :- q(x), nova:q(x)', target='test')
self.assertTrue(g.edge_in('test:p', 'test:s', False))
self.assertTrue(g.edge_in('test:q', 'nova:r', False))
run.update([compile.Event(helper.str2form('p(x) :- q(x), nova:q(x)'),
target='test')])
self.assertTrue(g.edge_in('test:p', 'nova:q', False))
self.assertTrue(g.edge_in('test:p', 'test:q', False))
self.assertTrue(g.edge_in('test:p', 'test:s', False))
self.assertTrue(g.edge_in('test:q', 'nova:r', False))
def test_negation(self):
"""Test that negation when applied to a different policy works."""
run = agnostic.Runtime()
run.debug_mode()
run.create_policy('alpha')
run.create_policy('beta')
run.insert('p(x) :- beta:q(x), not beta:q(x)', 'alpha')
run.insert('q(1)', 'beta')
self.assertEqual(run.select('p(x)', 'alpha'), '')
def test_built_in(self):
"""Test that built_in function works."""
run = agnostic.Runtime()
run.debug_mode()
run.create_policy('alpha')
run.create_policy('beta')
run.create_policy('sigma')
run.insert('p(x1, x2) :- '
'beta:q(x1), sigma:r(x2), not equal(x1, x2)', 'alpha')
run.insert('q(1)', 'beta')
run.insert('r(1)', 'sigma')
run.insert('r(3)', 'sigma')
self.assertEqual(run.select('p(x1,x2)', 'alpha'), 'p(1, 3)')
def test_schema_check(self):
"""Test that schema check in multiple policies works."""
run = agnostic.Runtime()
run.debug_mode()
run.create_policy('alpha')
run.create_policy('beta')
run.insert('p(x,y) :- beta:q(x,y)', 'alpha')
permitted, changes = run.insert('q(x) :- r(x)', 'beta')
self.assertFalse(permitted)
self.assertEqual(len(changes), 1)
def test_same_rules(self):
"""Test that same rule insertion can be correctly dealt with."""
run = agnostic.Runtime()
run.debug_mode()
policy = 'alpha'
run.create_policy(policy)
rulestr = 'p(x,y) :- q(x,y)'
rule = compile.parse1(rulestr)
run.insert(rulestr, policy)
self.assertIn(rule, run.policy_object(policy))
self.assertIn(
rule.head.table.table, run.policy_object(policy).schema)
run.insert(rulestr, policy)
run.delete(rulestr, policy)
self.assertNotIn(rule, run.policy_object(policy))
self.assertNotIn(
rule.head.table.table, run.policy_object(policy).schema)
class TestSelect(base.TestCase):
def test_no_dups(self):
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(x) :- q(x)')
run.insert('p(x) :- r(x)')
run.insert('q(1)')
run.insert('r(1)')
self.assertEqual(run.select('p(x)'), 'p(1)')
class TestPolicyCreationDeletion(base.TestCase):
def test_policy_creation_after_ref(self):
"""Test ability to write rules that span multiple policies."""
# Local table used
run = agnostic.Runtime()
run.create_policy('test1')
run.insert('p(x) :- test2:q(x)', 'test1')
run.create_policy('test2')
run.insert('q(1)', 'test2')
actual = run.select('p(x)', 'test1')
e = helper.db_equal(actual, 'p(1)')
self.assertTrue(e, "Creation after reference")
def test_policy_deletion_after_ref(self):
"""Test ability to write rules that span multiple policies."""
# Local table used
run = agnostic.Runtime()
run.create_policy('test1')
run.insert('p(x) :- test2:q(x)', 'test1')
# ensuring this code runs, without causing an error
run.create_policy('test2')
run.delete_policy('test2')
# add the policy back, this time checking for dangling refs
run.create_policy('test2')
self.assertRaises(exception.DanglingReference, run.delete_policy,
'test2', disallow_dangling_refs=True)
def test_policy_deletion_dependency_graph(self):
"""Ensure dependency graph is properly updated when deleting policy."""
run = agnostic.Runtime()
run.create_policy('alice')
run.insert('p(x) :- q(x)')
LOG.info("graph: \n%s", run.global_dependency_graph)
self.assertTrue(run.global_dependency_graph.edge_in(
'alice:p', 'alice:q', False))
# don't delete rules first--just delete policy
run.delete_policy('alice')
self.assertEqual(len(run.global_dependency_graph), 0)
class TestDependencyGraph(base.TestCase):
def test_fact_insert(self):
run = agnostic.Runtime()
run.create_policy('test')
facts = [compile.Fact('p', [1])]
run.initialize_tables([], facts)
self.assertFalse(run.global_dependency_graph.node_in('test:p'))
def test_atom_insert(self):
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(1)')
self.assertFalse(run.global_dependency_graph.node_in('test:p'))
def test_rule_noop(self):
run = agnostic.Runtime()
run.create_policy('test')
run.insert('q(1) :- p(1)')
run.delete('q(2) :- p(2)')
self.assertTrue(run.global_dependency_graph.node_in('test:p'))
self.assertTrue(run.global_dependency_graph.node_in('test:q'))
self.assertTrue(run.global_dependency_graph.edge_in(
'test:q', 'test:p', False))
def test_atom_deletion(self):
run = agnostic.Runtime()
run.create_policy('test')
run.insert('q(x) :- p(x)')
run.delete('p(1)')
run.delete('p(1)')
# actually just testing that no error is thrown
self.assertFalse(run.global_dependency_graph.has_cycle())
class TestSimulate(base.TestCase):
DEFAULT_THEORY = 'test_default'
ACTION_THEORY = 'test_action'
def prep_runtime(self, code=None, msg=None, target=None, theories=None):
if code is None:
code = ""
if target is None:
target = self.DEFAULT_THEORY
run = agnostic.Runtime()
run.create_policy(self.DEFAULT_THEORY, abbr='default')
run.create_policy(self.ACTION_THEORY, abbr='action', kind='action')
if theories:
for theory in theories:
run.create_policy(theory)
run.debug_mode()
run.insert(code, target=target)
return run
def create(self, action_code, class_code, theories=None):
run = self.prep_runtime(theories=theories)
actth = self.ACTION_THEORY
permitted, errors = run.insert(action_code, target=actth)
self.assertTrue(permitted, "Error in action policy: {}".format(
utility.iterstr(errors)))
defth = self.DEFAULT_THEORY
permitted, errors = run.insert(class_code, target=defth)
self.assertTrue(permitted, "Error in classifier policy: {}".format(
utility.iterstr(errors)))
return run
def check(self, run, action_sequence, query, correct, msg, delta=False):
original_db = str(run.theory[self.DEFAULT_THEORY])
actual = run.simulate(
query, self.DEFAULT_THEORY, action_sequence,
self.ACTION_THEORY, delta=delta)
e = helper.datalog_equal(actual, correct)
self.assertTrue(e, msg + " (Query results not correct)")
e = helper.db_equal(
str(run.theory[self.DEFAULT_THEORY]), original_db)
self.assertTrue(e, msg + " (Rollback failed)")
def test_multipolicy_state_1(self):
"""Test update sequence affecting datasources."""
run = self.prep_runtime(theories=['nova', 'neutron'])
run.insert('p(x) :- nova:p(x)', self.DEFAULT_THEORY)
sequence = 'nova:p+(1) neutron:p+(2)'
self.check(run, sequence, 'p(x)', 'p(1)', 'Separate theories')
def test_multipolicy_state_2(self):
"""Test update sequence affecting datasources."""
run = self.prep_runtime(theories=['nova', 'neutron'])
run.insert('p(x) :- neutron:p(x)', self.DEFAULT_THEORY)
run.insert('p(x) :- nova:p(x)', self.DEFAULT_THEORY)
sequence = 'nova:p+(1) neutron:p+(2)'
self.check(run, sequence, 'p(x)', 'p(1) p(2)', 'Separate theories 2')
def test_multipolicy_state_3(self):
"""Test update sequence affecting datasources."""
run = self.prep_runtime(theories=['nova', 'neutron'])
run.insert('p(x) :- neutron:p(x)', self.DEFAULT_THEORY)
run.insert('p(x) :- nova:p(x)', self.DEFAULT_THEORY)
run.insert('p(1)', 'nova')
sequence = 'nova:p+(1) neutron:p+(2)'
self.check(run, sequence, 'p(x)', 'p(1) p(2)', 'Separate theories 3')
self.check(run, '', 'p(x)', 'p(1)', 'Existing data separate theories')
def test_multipolicy_action_sequence(self):
"""Test sequence updates with actions that impact multiple policies."""
action_code = ('nova:p+(x) :- q(x)'
'neutron:p+(y) :- q(x), plus(x, 1, y)'
'ceilometer:p+(y) :- q(x), plus(x, 5, y)'
'action("q")')
classify_code = 'p(x) :- nova:p(x) p(x) :- neutron:p(x) p(3)'
run = self.create(action_code, classify_code,
theories=['nova', 'neutron', 'ceilometer'])
action_sequence = 'q(1)'
self.check(run, action_sequence, 'p(x)', 'p(1) p(2) p(3)',
'Multi-policy actions')
def test_action_sequence(self):
"""Test sequence updates with actions."""
# Simple
action_code = ('p+(x) :- q(x) action("q")')
classify_code = 'p(2)' # just some other data present
run = self.create(action_code, classify_code)
action_sequence = 'q(1)'
self.check(run, action_sequence, 'p(x)', 'p(1) p(2)', 'Simple')
# Noop does not break rollback
action_code = ('p-(x) :- q(x)'
'action("q")')
classify_code = ('')
run = self.create(action_code, classify_code)
action_sequence = 'q(1)'
self.check(run, action_sequence, 'p(x)', '',
"Rollback handles Noop")
# Add and delete
action_code = ('action("act") '
'p+(x) :- act(x) '
'p-(y) :- act(x), r(x, y) ')
classify_code = 'p(2) r(1, 2)'
run = self.create(action_code, classify_code)
action_sequence = 'act(1)'
self.check(run, action_sequence, 'p(x)', 'p(1)', 'Add and delete')
# insertion takes precedence over deletion
action_code = ('p+(x) :- q(x)'
'p-(x) :- r(x)'
'action("q")')
classify_code = ('')
run = self.create(action_code, classify_code)
# ordered so that consequences will be p+(1) p-(1)
action_sequence = 'q(1), r(1) :- true'
self.check(run, action_sequence, 'p(x)', 'p(1)',
"Deletion before insertion")
# multiple action sequences 1
action_code = ('p+(x) :- q(x)'
'p-(x) :- r(x)'
'action("q")'
'action("r")')
classify_code = ('')
run = self.create(action_code, classify_code)
action_sequence = 'q(1) r(1)'
self.check(run, action_sequence, 'p(x)', '',
"Multiple actions: inversion from {}")
# multiple action sequences 2
action_code = ('p+(x) :- q(x)'
'p-(x) :- r(x)'
'action("q")'
'action("r")')
classify_code = ('p(1)')
run = self.create(action_code, classify_code)
action_sequence = 'q(1) r(1)'
self.check(run, action_sequence, 'p(x)', '',
"Multiple actions: inversion from p(1), first is noop")
# multiple action sequences 3
action_code = ('p+(x) :- q(x)'
'p-(x) :- r(x)'
'action("q")'
'action("r")')
classify_code = ('p(1)')
run = self.create(action_code, classify_code)
action_sequence = 'r(1) q(1)'
self.check(run, action_sequence, 'p(x)', 'p(1)',
"Multiple actions: inversion from p(1), first is not noop")
# multiple action sequences 4
action_code = ('p+(x) :- q(x)'
'p-(x) :- r(x)'
'action("q")'
'action("r")')
classify_code = ('')
run = self.create(action_code, classify_code)
action_sequence = 'r(1) q(1)'
self.check(run, action_sequence, 'p(x)', 'p(1)',
"Multiple actions: inversion from {}, first is not noop")
# Action with additional info
action_code = ('p+(x,z) :- q(x,y), r(y,z)'
'action("q") action("r")')
classify_code = 'p(1,2)'
run = self.create(action_code, classify_code)
action_sequence = 'q(1,2), r(2,3) :- true'
self.check(run, action_sequence, 'p(x,y)', 'p(1,2) p(1,3)',
'Action with additional info')
def test_state_rule_sequence(self):
"""Test state and rule update sequences."""
# State update
action_code = ''
classify_code = 'p(1)'
run = self.create(action_code, classify_code)
action_sequence = 'p+(2)'
self.check(run, action_sequence, 'p(x)', 'p(1) p(2)',
'State update')
# Rule update
action_code = ''
classify_code = 'q(1)'
run = self.create(action_code, classify_code)
action_sequence = 'p+(x) :- q(x)'
self.check(run, action_sequence, 'p(x)', 'p(1)',
'Rule update')
def test_complex_sequence(self):
"""Test more complex sequences of updates."""
# action with query
action_code = ('p+(x, y) :- q(x, y)'
'action("q")')
classify_code = ('r(1)')
run = self.create(action_code, classify_code)
action_sequence = 'q(x, 0) :- r(x)'
self.check(run, action_sequence, 'p(x,y)', 'p(1,0)',
'Action with query')
# action sequence with results
action_code = ('p+(id, val) :- create(val)'
'p+(id, val) :- update(id, val)'
'p-(id, val) :- update(id, newval), p(id, val)'
'action("create")'
'action("update")'
'result(x) :- create(val), p+(x,val)')
classify_code = 'hasval(val) :- p(x, val)'
run = self.create(action_code, classify_code)
action_sequence = 'create(0) update(x,1) :- result(x)'
self.check(run, action_sequence, 'hasval(x)', 'hasval(1)',
'Action sequence with results')
def test_delta_add(self):
"""Test when asking for changes in query."""
action_code = ('action("q") '
'p+(x) :- q(x) ')
classify_code = 'p(2)' # just some other data present
run = self.create(action_code, classify_code)
action_sequence = 'q(1)'
self.check(run, action_sequence, 'p(x)', 'p+(1)', 'Add',
delta=True)
def test_delta_delete(self):
"""Test when asking for changes in query."""
action_code = ('action("q") '
'p-(x) :- q(x) ')
classify_code = 'p(1) p(2)' # p(2): just some other data present
run = self.create(action_code, classify_code)
action_sequence = 'q(1)'
self.check(run, action_sequence, 'p(x)', 'p-(1)', 'Delete',
delta=True)
def test_delta_add_delete(self):
"""Test when asking for changes in query."""
action_code = ('action("act") '
'p+(x) :- act(x) '
'p-(y) :- act(x), r(x, y) ')
classify_code = 'p(2) r(1, 2) p(3)' # p(3): just other data present
run = self.create(action_code, classify_code)
action_sequence = 'act(1)'
self.check(run, action_sequence, 'p(x)', 'p+(1) p-(2)',
'Add and delete', delta=True)
def test_key_value_schema(self):
"""Test action of key/value updates."""
action_code = (
'action("changeAttribute")'
'server_attributes+(uid, name, newvalue) :- '
'changeAttribute(uid, name, newvalue) '
'server_attributes-(uid, name, oldvalue) :- '
' changeAttribute(uid, name, newvalue), '
' server_attributes(uid, name, oldvalue)')
policy = 'error(uid) :- server_attributes(uid, name, 0)'
run = self.create(action_code, policy)
seq = 'changeAttribute(101, "cpu", 0)'
self.check(run, seq, 'error(x)', 'error(101)',
'Basic error')
run = self.create(action_code, policy)
seq = 'changeAttribute(101, "cpu", 1)'
self.check(run, seq, 'error(x)', '',
'Basic non-error')
data = ('server_attributes(101, "cpu", 1)')
run = self.create(action_code, policy + data)
seq = 'changeAttribute(101, "cpu", 0)'
self.check(run, seq, 'error(x)', 'error(101)',
'Overwrite existing to cause error')
data = ('server_attributes(101, "cpu", 0)')
run = self.create(action_code, policy + data)
seq = 'changeAttribute(101, "cpu", 1)'
self.check(run, seq, 'error(x)', '',
'Overwrite existing to eliminate error')
data = ('server_attributes(101, "cpu", 0)'
'server_attributes(101, "disk", 0)')
run = self.create(action_code, policy + data)
seq = 'changeAttribute(101, "cpu", 1)'
self.check(run, seq, 'error(x)', 'error(101)',
'Overwrite existing but still error')
def test_duplicates(self):
run = agnostic.Runtime()
run.create_policy('test')
run.insert('p(x) :- q(x)')
run.insert('p(x) :- r(x)')
run.insert('q(1)')
run.insert('r(1)')
self.assertEqual(run.simulate('p(x)', 'test', '', 'test'), 'p(1)')
class TestActionExecution(base.TestCase):
def setUp(self):
super(TestActionExecution, self).setUp()
self.run = agnostic.DseRuntime('test')
self.run.service_exists = mock.MagicMock()
self.run.service_exists.return_value = True
self.run._rpc = mock.MagicMock()
def test_insert_rule_insert_data(self):
self.run.create_policy('test')
self.run.debug_mode()
self.run.insert('execute[p(x)] :- q(x)')
self.assertEqual(len(self.run.logger.messages), 0,
"Improper action logged")
self.run.insert('q(1)')
self.assertEqual(len(self.run.logger.messages), 1,
"No action logged")
self.assertEqual(self.run.logger.messages[0], 'Executing test:p(1)')
expected_args = ('test', 'p')
expected_kwargs = {'args': {'positional': [1]}}
args, kwargs = self.run._rpc.call_args_list[0]
self.assertEqual(expected_args, args)
self.assertEqual(expected_kwargs, kwargs)
def test_insert_data_insert_rule(self):
run = self.run
run.create_policy('test')
run.debug_mode()
run.insert('q(1)')
self.assertEqual(len(run.logger.messages), 0, "Improper action logged")
run.insert('execute[p(x)] :- q(x)')
self.assertEqual(len(run.logger.messages), 1, "No action logged")
self.assertEqual(run.logger.messages[0], 'Executing test:p(1)')
expected_args = ('test', 'p')
expected_kwargs = {'args': {'positional': [1]}}
args, kwargs = run._rpc.call_args_list[0]
self.assertEqual(expected_args, args)
self.assertEqual(expected_kwargs, kwargs)
def test_insert_data_insert_rule_delete_data(self):
run = self.run
run.create_policy('test')
run.debug_mode()
run.insert('q(1)')
self.assertEqual(len(run.logger.messages), 0, "Improper action logged")
run.insert('execute[p(x)] :- q(x)')
self.assertEqual(len(run.logger.messages), 1, "No action logged")
self.assertEqual(run.logger.messages[0], 'Executing test:p(1)')
run.delete('q(1)')
self.assertEqual(len(run.logger.messages), 1, "Delete failure")
self.assertEqual(run.logger.messages[0], 'Executing test:p(1)')
expected_args = ('test', 'p')
expected_kwargs = {'args': {'positional': [1]}}
args, kwargs = run._rpc.call_args_list[0]
self.assertEqual(expected_args, args)
self.assertEqual(expected_kwargs, kwargs)
def test_insert_data_insert_rule_delete_rule(self):
run = self.run
run.create_policy('test')
run.debug_mode()
run.insert('q(1)')
self.assertEqual(len(run.logger.messages), 0, "Improper action logged")
run.insert('execute[p(x)] :- q(x)')
self.assertEqual(len(run.logger.messages), 1, "No action logged")
self.assertEqual(run.logger.messages[0], 'Executing test:p(1)')
run.delete('execute[p(x)] :- q(x)')
self.assertEqual(len(run.logger.messages), 1, "Delete failure")
self.assertEqual(run.logger.messages[0], 'Executing test:p(1)')
expected_args = ('test', 'p')
expected_kwargs = {'args': {'positional': [1]}}
args, kwargs = run._rpc.call_args_list[0]
self.assertEqual(expected_args, args)
self.assertEqual(expected_kwargs, kwargs)
def test_insert_data_insert_rule_noop_insert(self):
run = self.run
run.create_policy('test')
run.debug_mode()
run.insert('q(1)')
self.assertEqual(len(run.logger.messages), 0, "Improper action logged")
run.insert('execute[p(x)] :- q(x)')
self.assertEqual(len(run.logger.messages), 1, "No action logged")
self.assertEqual(run.logger.messages[0], 'Executing test:p(1)')
run.insert('q(1)')
self.assertEqual(len(run.logger.messages), 1, "Improper action logged")
self.assertEqual(run.logger.messages[0], 'Executing test:p(1)')
expected_args = ('test', 'p')
expected_kwargs = {'args': {'positional': [1]}}
args, kwargs = run._rpc.call_args_list[0]
self.assertEqual(expected_args, args)
self.assertEqual(expected_kwargs, kwargs)
def test_insert_multiple_rules(self):
# basic test
# test recursion caused at nth rule
# test transactional trigger activation:
# e.g.
# a(1)
# trigger(x) :- a(x), not b(x)
# b(x) :- a(x)
# trigger should not be activated
pass
def test_disjunction(self):
run = self.run
run.create_policy('test')
run.debug_mode()
run.insert('execute[p(x)] :- q(x)')
run.insert('execute[p(x)] :- r(x)')
self.assertEqual(len(run.logger.messages), 0, "Improper action logged")
run.insert('q(1)')
self.assertEqual(len(run.logger.messages), 1, "No action logged")
self.assertEqual(run.logger.messages[0], 'Executing test:p(1)')
run.insert('r(1)')
self.assertEqual(len(run.logger.messages), 1, "Improper action logged")
self.assertEqual(run.logger.messages[0], 'Executing test:p(1)')
expected_args = ('test', 'p')
expected_kwargs = {'args': {'positional': [1]}}
args, kwargs = run._rpc.call_args_list[0]
self.assertEqual(expected_args, args)
self.assertEqual(expected_kwargs, kwargs)
def test_multiple_instances(self):
run = self.run
run.create_policy('test')
run.debug_mode()
run.insert('q(1)')
run.insert('q(2)')
self.assertEqual(len(run.logger.messages), 0, "Improper action logged")
run.insert('execute[p(x)] :- q(x)')
self.assertEqual(len(run.logger.messages), 2, "No action logged")
actualset = set([u'Executing test:p(1)', u'Executing test:p(2)'])
self.assertEqual(actualset, set(run.logger.messages))
expected_args_list = [
[('test', 'p'), {'args': {'positional': [1]}}],
[('test', 'p'), {'args': {'positional': [2]}}],
]
for args, kwargs in run._rpc.call_args_list:
self.assertIn([args, kwargs], expected_args_list)
expected_args_list.remove([args, kwargs])
def test_disabled_execute_action(self):
cfg.CONF.set_override('enable_execute_action', False)
run = agnostic.DseRuntime('test')
run._rpc = mock.MagicMock()
run.service_exists = mock.MagicMock()
service_name = 'test-service'
action = 'non_executable_action'
action_args = {'positional': ['p_arg1'],
'named': {'key1': 'value1'}}
run.execute_action(service_name, action, action_args)
self.assertFalse(run._rpc.called)
class TestDisabledRules(base.SqlTestCase):
"""Tests for Runtime's ability to enable/disable rules."""
# insertions
def test_insert_enabled(self):
run = agnostic.Runtime()
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
schema = compile.Schema({'q': ('id', 'name', 'status')})
run.set_schema('test', schema)
obj = run.policy_object('test')
run.insert('p(x) :- q(id=x)')
self.assertEqual(len(run.error_events), 0)
self.assertEqual(len(run.disabled_events), 0)
self.assertEqual(len(obj.content()), 1)
def test_insert_disabled(self):
run = agnostic.Runtime()
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
obj = run.policy_object('test')
run.insert('p(x) :- q(id=x)')
self.assertEqual(len(run.disabled_events), 1)
self.assertEqual(len(obj.content()), 0)
def test_persistent_insert_disabled(self):
"""Test that persistent_insert_rule errors on IncompleteSchemaException
When a table schema is not available, named column references are
permitted but disabled in non-persistent rule insert to allow for
late-arriving schema when importing rules already in DB.
This behavior is not necessary in persistent_insert.
"""
run = agnostic.DseRuntime('dse')
run.synchronizer = synchronizer.PolicyRuleSynchronizer(
run, run.node)
run.create_policy('data', kind=datalog_base.DATASOURCE_POLICY_TYPE)
run.persistent_create_policy('policy')
obj = run.policy_object('policy')
run.insert('p(x) :- data:q(id=x)')
try:
run.persistent_insert_rule('policy', 'p(x) :- data:q(id=x)',
'', '')
except exception.PolicyException as e:
self.assertTrue(
'Literal data:q(id=x) uses unknown table q'
in str(e),
'Named column reference on unknown table '
'should be disallowed in persistent insert')
self.assertEqual(len(run.disabled_events), 0)
self.assertEqual(len(obj.content()), 0)
try:
run.persistent_insert_rule('policy', 'p(x) :- unknown:q(id=x)',
'', '')
except exception.PolicyException as e:
self.assertTrue(
'Literal unknown:q(id=x) uses named arguments, but the '
'schema is unknown.'
in str(e),
'Named column reference on unknown table '
'should be disallowed in persistent insert')
self.assertEqual(len(run.disabled_events), 0)
self.assertEqual(len(obj.content()), 0)
def test_insert_errors(self):
run = agnostic.Runtime()
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
schema = compile.Schema({'q': ('name', 'status')})
run.set_schema('test', schema)
obj = run.policy_object('test')
permitted, errors = run.insert('p(x) :- q(id=x)')
self.assertFalse(permitted)
errstring = " ".join(str(x) for x in errors)
self.assertIn("column name id does not exist", errstring)
self.assertEqual(len(run.error_events), 0)
self.assertEqual(len(run.disabled_events), 0)
self.assertEqual(len(obj.content()), 0)
def test_insert_set_schema_disabled(self):
run = agnostic.Runtime()
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
obj = run.policy_object('test')
run.insert('p(x) :- q(id=x)') # rule is disabled
self.assertEqual(len(run.disabled_events), 1)
schema = compile.Schema({'q': ('id', 'name', 'status')})
run.set_schema('test', schema)
self.assertEqual(len(run.error_events), 0)
self.assertEqual(len(run.disabled_events), 0)
self.assertEqual(len(obj.content()), 1)
def test_insert_set_schema_disabled_multiple(self):
# insert rule that gets disabled
run = agnostic.Runtime()
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
run.create_policy('nova', kind=datalog_base.DATASOURCE_POLICY_TYPE)
obj = run.policy_object('test')
run.insert('p(x) :- q(id=x), nova:r(id=x)', 'test')
self.assertEqual(len(run.disabled_events), 1)
# set first schema
schema = compile.Schema({'q': ('id', 'name', 'status')})
run.set_schema('test', schema)
self.assertEqual(len(run.error_events), 0)
self.assertEqual(len(run.disabled_events), 1)
self.assertEqual(len(obj.content()), 0)
# set second schema
schema = compile.Schema({'r': ('id', 'name', 'status')})
run.set_schema('nova', schema)
self.assertEqual(len(run.error_events), 0)
self.assertEqual(len(run.disabled_events), 0)
self.assertEqual(len(obj.content()), 1)
def test_insert_set_schema_errors(self):
run = agnostic.Runtime()
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
obj = run.policy_object('test')
run.insert('p(x) :- q(id=x)') # rule is disabled
self.assertEqual(len(run.disabled_events), 1)
schema = compile.Schema({'q': ('name', 'status')},)
run.set_schema('test', schema)
self.assertEqual(len(run.error_events), 1)
self.assertEqual(len(run.disabled_events), 0)
self.assertEqual(len(obj.content()), 0)
def test_insert_inferred_schema_errors(self):
run = agnostic.Runtime()
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
run.insert('p(x) :- q(x)')
permitted, errs = run.insert('q(1,2)')
self.assertFalse(permitted)
# deletions
def test_delete_enabled(self):
run = agnostic.Runtime()
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
schema = compile.Schema({'q': ('id', 'name', 'status')})
run.set_schema('test', schema)
obj = run.policy_object('test')
run.insert('p(x) :- q(id=x)')
self.assertEqual(len(obj.content()), 1)
run.delete('p(x) :- q(id=x)')
self.assertEqual(len(run.error_events), 0)
self.assertEqual(len(run.disabled_events), 0)
self.assertEqual(len(obj.content()), 0)
def test_delete_set_schema_disabled(self):
run = agnostic.Runtime()
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
obj = run.policy_object('test')
run.insert('p(x) :- q(id=x)')
run.delete('p(x) :- q(id=x)')
self.assertEqual(len(run.disabled_events), 2)
self.assertEqual(len(obj.content()), 0)
schema = compile.Schema({'q': ('id', 'name', 'status')})
run.set_schema('test', schema)
self.assertEqual(len(run.disabled_events), 0)
self.assertEqual(len(obj.content()), 0)
def test_delete_errors(self):
run = agnostic.Runtime()
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
schema = compile.Schema({'q': ('name', 'status')})
run.set_schema('test', schema)
obj = run.policy_object('test')
permitted, errors = run.delete('p(x) :- q(id=x)')
self.assertFalse(permitted)
errstring = " ".join(str(x) for x in errors)
self.assertIn("column name id does not exist", errstring)
self.assertEqual(len(run.error_events), 0)
self.assertEqual(len(run.disabled_events), 0)
self.assertEqual(len(obj.content()), 0)
def test_delete_set_schema_errors(self):
run = agnostic.Runtime()
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
obj = run.policy_object('test')
run.delete('p(x) :- q(id=x)') # rule is disabled
self.assertEqual(len(run.disabled_events), 1)
schema = compile.Schema({'q': ('name', 'status')})
run.set_schema('test', schema)
self.assertEqual(len(run.error_events), 1)
self.assertEqual(len(run.disabled_events), 0)
self.assertEqual(len(obj.content()), 0)
# errors in set_schema
def test_set_schema_unknown_policy(self):
run = agnostic.Runtime()
schema = compile.Schema({'q': ('name', 'status')})
try:
run.set_schema('test', schema)
self.fail("Error not thrown on unknown policy")
except exception.CongressException as e:
self.assertIn("not been created", str(e))
def test_disallow_schema_change(self):
# Ensures that cannot change schema once it is set.
# Can be removed once we support schema changes (e.g. for upgrade).
run = agnostic.Runtime()
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
schema = compile.Schema({'q': ('name', 'status')})
run.set_schema('test', schema)
schema = compile.Schema({'q': ('id', 'name', 'status')})
try:
run.set_schema('test', schema)
self.fail("Error not thrown on schema change")
except exception.CongressException as e:
self.assertIn("Schema for test already set", str(e))
def test_insert_without_datasource_policy(self):
run = agnostic.Runtime()
run.create_policy('beta') # not datasource policy
# exception because col refs over non-datasource policy
permitted, errors = run.insert('p(x) :- beta:q(name=x)')
self.assertFalse(permitted)
self.assertTrue(
any("does not reference a datasource policy" in str(e)
for e in errors))
def test_delete_policy_while_disabled_events_outstanding(self):
"""Test deleting policy while there are disabled_events outstanding."""
run = agnostic.Runtime()
# generate disabled event
run.create_policy('test', kind=datalog_base.DATASOURCE_POLICY_TYPE)
obj = run.policy_object('test')
run.insert('p(x) :- q(id=x)')
self.assertEqual(len(run.disabled_events), 1)
self.assertEqual(len(obj.content()), 0)
# create and delete another policy
run.create_policy('to_delete')
run.delete_policy('to_delete')
class TestDelegation(base.TestCase):
"""Tests for Runtime's delegation functionality."""
def test_subpolicy(self):
run = agnostic.Runtime()
run.create_policy('test')
policy = 'error(x) :- q(x), r(x)'
run.insert(policy)
subpolicy = run.find_subpolicy(
set(['q']), set(), set(['error', 'warning']))
e = helper.datalog_equal(subpolicy, policy)
self.assertTrue(e)
def test_subpolicy_multiple(self):
run = agnostic.Runtime()
run.create_policy('test')
policy = ('error(x) :- q(x), r(x) '
'error(x) :- q(x), s(x) '
'warning(x) :- t(x), q(x)')
run.insert(policy)
subpolicy = run.find_subpolicy(
set(['q']), set(), set(['error', 'warning']))
e = helper.datalog_equal(subpolicy, policy)
self.assertTrue(e)
def test_subpolicy_prohibited(self):
run = agnostic.Runtime()
run.create_policy('test')
policy1 = 'error(x) :- q(x), r(x) '
policy2 = 'error(x) :- q(x), s(x) '
policy3 = 'error(x) :- q(x), prohibit(x, y) '
policy4 = 'warning(x) :- t(x), q(x)'
run.insert(policy1 + policy2 + policy3 + policy4)
subpolicy = run.find_subpolicy(
set(['q']), set(['prohibit']), set(['error', 'warning']))
e = helper.datalog_equal(subpolicy, policy1 + policy2 + policy4)
self.assertTrue(e)
def test_subpolicy_layers(self):
run = agnostic.Runtime()
run.create_policy('test')
policy1 = 'error(x) :- t(x), u(x) '
policy2 = ' t(x) :- q(x), s(x) '
policy3 = 'error(x) :- p(x) '
policy4 = ' p(x) :- prohibit(x, y)'
policy5 = 'warning(x) :- t(x), q(x)'
run.insert(policy1 + policy2 + policy3 + policy4 + policy5)
subpolicy = run.find_subpolicy(
set(['q']), set(['prohibit']), set(['error', 'warning']))
e = helper.datalog_equal(subpolicy, policy1 + policy2 + policy5)
self.assertTrue(e)
|
{
"content_hash": "90ea04d0859ec4265b1c06a9bc0f532f",
"timestamp": "",
"source": "github",
"line_count": 1910,
"max_line_length": 79,
"avg_line_length": 41.31413612565445,
"alnum_prop": 0.5638702319097706,
"repo_name": "openstack/congress",
"id": "4b2cb7b53307ae3f1665100da8062ba7f923c436",
"size": "79542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "congress/tests/policy_engines/test_agnostic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "7778"
},
{
"name": "Makefile",
"bytes": "228"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "2614028"
},
{
"name": "Shell",
"bytes": "45786"
}
],
"symlink_target": ""
}
|
__author__ = "Jerome Leclanche"
__email__ = "jerome@leclan.ch"
__version__ = "1.4.0"
class NotificationError(Exception):
pass
|
{
"content_hash": "e722e700eafcc2bfd3590973a3ce750f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 35,
"avg_line_length": 18.428571428571427,
"alnum_prop": 0.6589147286821705,
"repo_name": "nnseva/django-push-notifications",
"id": "d7303fc882a1e1457402117538085d05baaa80f4",
"size": "130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "push_notifications/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55744"
}
],
"symlink_target": ""
}
|
import numpy
import math
cache = {}
def run(world):
global cache
# New iteration, free the cache
cache = {}
def query(world, matrix_name, initial_position, search_radius_in_zones=2, zone_size=8, search_for_max=True, wrap=False, return_value_if_same_zone=None):
global cache
matrix = world.get_matrix(matrix_name)
if (matrix_name, zone_size, wrap) not in cache:
# Reinterpret cast from bool (stored as uint8) to uint8
mint = numpy.matrix(matrix, dtype=numpy.uint8, copy=False)
# Temporary reduction along y axis
mtmp = numpy.ndarray((matrix.shape[0]/zone_size,matrix.shape[1]),dtype=int)
mtmp.fill(0)
# Final reduction
mout = numpy.ndarray((matrix.shape[0]/zone_size,matrix.shape[1]/zone_size),dtype=int)
mout.fill(0)
if wrap == False:
for offset in xrange(zone_size):
numpy.add(mtmp,numpy.take(mint,xrange(offset,matrix.shape[0],zone_size),axis=0),mtmp)
for offset in xrange(zone_size):
numpy.add(mout,numpy.take(mtmp,xrange(offset,matrix.shape[0],zone_size),axis=1),mout)
else:
for offset in xrange(zone_size):
numpy.add(mtmp,numpy.take(numpy.roll(mint,offset,axis=0),xrange(0,matrix.shape[0],zone_size),axis=0),mtmp)
for offset in xrange(zone_size):
numpy.add(mout,numpy.take(numpy.roll(mtmp,offset,axis=1),xrange(0,matrix.shape[0],zone_size),axis=1),mout)
m = mout
cache[(matrix_name, zone_size, wrap)] = m
else:
m = cache[(matrix_name, zone_size, wrap)]
# Transform the initial position into the new coordinate system, in zones
if wrap == False:
position_in_zone = (int(math.floor(initial_position[0] / float(zone_size))), int(math.floor(initial_position[1] / float(zone_size))))
else:
position_in_zone = (int(math.floor(initial_position[0] / float(zone_size))) % (matrix.shape[0]/zone_size), int(math.floor(initial_position[1] / float(zone_size))) % (matrix.shape[1]/zone_size))
# Search for the optimal value
# Initialize with the initial position
bestPos = list(position_in_zone)
bestValue = m[tuple(bestPos)]
pos = [0,0]
# For each possible dy (easy to find : -R,+R, with dx forcefully at 0)
for dy in xrange(-search_radius_in_zones, search_radius_in_zones+1):
# Set first coordinate, adjust according to the wrapping
pos[0] = position_in_zone[0]+dy
if wrap:
pos[0] %= (matrix.shape[0]/zone_size)
elif pos[0] < 0 or pos[0] >= matrix.shape[0]/zone_size:
# If we cannot wrap and we're outside, skip
continue
# Test first with a dx equal to 0
pos[1] = position_in_zone[1]
val = m[tuple(pos)]
if (search_for_max and val > bestValue) or (not search_for_max and val < bestValue):
bestValue = val
bestPos = list(pos)
# Now go as far as possible in 2 directions
for xstep in -1,1:
dx = xstep # dx=0 already done
# Stay in the search radius
while math.hypot(dy,dx) < search_radius_in_zones:
# Set second coordinate, adjust according to the wrapping
pos[1] = position_in_zone[1]+dx
if wrap:
pos[1] %= (matrix.shape[1]/zone_size)
elif pos[1] < 0 or pos[1] >= matrix.shape[1]/zone_size:
# If no wrapping and outside, that's over for this direction
break
# Test the new value
val = m[tuple(pos)]
if (search_for_max and val > bestValue) or (not search_for_max and val < bestValue):
bestValue = val
bestPos = list(pos)
# Go further in that direction
dx += xstep
if bestPos == list(position_in_zone):
return return_value_if_same_zone
# Return the position in the original coordinate system, take the center
rtn = [int(round((bestPos[0]+0.5)*zone_size)), int(round((bestPos[1]+0.5)*zone_size))]
if wrap:
rtn[0] %= matrix.shape[0]
rtn[1] %= matrix.shape[1]
else:
rtn[0] = min(rtn[0], matrix.shape[0])
rtn[1] = min(rtn[1], matrix.shape[1])
return rtn, bestValue
|
{
"content_hash": "fbc598c8923c170115c80b43c0bc32d8",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 201,
"avg_line_length": 47.67032967032967,
"alnum_prop": 0.5919778699861687,
"repo_name": "ofavre/cellulart",
"id": "3eb91aed3edfab3ecfa59c4061a69c152935aa8e",
"size": "4392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/queries/densezoneof_boolean.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "134000"
},
{
"name": "Tcl",
"bytes": "4273724"
}
],
"symlink_target": ""
}
|
from google.cloud.logging_v2.gapic import enums
import logging
import mock
import rdr_service.services.gcp_logging as gcp_logging
from tests.helpers.unittest_base import BaseTestCase
class GCPLoggingTest(BaseTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.uses_database = False
def test_published_severity_level(self):
"""Ensure that the severity level used is the highest of the individual logs being published"""
with mock.patch('rdr_service.services.gcp_logging.os') as mock_os,\
mock.patch('rdr_service.services.gcp_logging.gcp_logging_v2') as mock_gcp_logging:
# Trick the logger into thinking it's on the server and should initialize
mock_os.environ = {
'GAE_ENV': 'TEST'
}
# Initialize to have the log handler start buffering logs
gcp_logging.initialize_logging()
# Make some logs
logging.info('test info message')
logging.error('error')
logging.warning('warning')
logging.info('one last info')
# Force the logs to 'publish' to the mock object
gcp_logging.flush_request_logs()
# Check that the highest severity of the logs was used for the published entry
mock_final_log_entry_call = mock_gcp_logging.types.log_entry_pb2.LogEntry
_, kwargs = mock_final_log_entry_call.call_args
logged_severity = kwargs.get('severity')
self.assertEqual(enums.LogSeverity.ERROR, logged_severity)
def test_handle_missing_severity_when_finding_highest(self):
lines = [
{'severity': logging.INFO},
{'msg': 'This one has no severity'},
{'severity': None},
{'severity': logging.CRITICAL},
{'severity': logging.ERROR},
]
highest_severity = gcp_logging.get_highest_severity_level_from_lines(lines)
self.assertEqual(logging.CRITICAL, highest_severity)
def test_handle_no_severities_when_finding_highest(self):
lines = [
{'msg': 'This one has no severity'},
{'severity': None},
]
highest_severity = gcp_logging.get_highest_severity_level_from_lines(lines)
self.assertEqual(enums.LogSeverity.INFO, highest_severity)
|
{
"content_hash": "16c41d393c5548b4d45a3c6c9890ca73",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 103,
"avg_line_length": 39,
"alnum_prop": 0.6233711643547709,
"repo_name": "all-of-us/raw-data-repository",
"id": "afb723f8698b17b1ee7269e2d54c051b8aae6caf",
"size": "2379",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "tests/service_tests/test_gcp_logging.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
}
|
"""Infrastructure for registering and firing callbacks on application events.
Unlike :mod:`IPython.core.hooks`, which lets end users set single functions to
be called at specific times, or a collection of alternative methods to try,
callbacks are designed to be used by extension authors. A number of callbacks
can be registered for the same event without needing to be aware of one another.
The functions defined in this module are no-ops indicating the names of available
events and the arguments which will be passed to them.
.. note::
This API is experimental in IPython 2.0, and may be revised in future versions.
"""
from backcall import callback_prototype
class EventManager(object):
"""Manage a collection of events and a sequence of callbacks for each.
This is attached to :class:`~IPython.core.interactiveshell.InteractiveShell`
instances as an ``events`` attribute.
.. note::
This API is experimental in IPython 2.0, and may be revised in future versions.
"""
def __init__(self, shell, available_events):
"""Initialise the :class:`CallbackManager`.
Parameters
----------
shell
The :class:`~IPython.core.interactiveshell.InteractiveShell` instance
available_events
An iterable of names for callback events.
"""
self.shell = shell
self.callbacks = {n:[] for n in available_events}
def register(self, event, function):
"""Register a new event callback.
Parameters
----------
event : str
The event for which to register this callback.
function : callable
A function to be called on the given event. It should take the same
parameters as the appropriate callback prototype.
Raises
------
TypeError
If ``function`` is not callable.
KeyError
If ``event`` is not one of the known events.
"""
if not callable(function):
raise TypeError('Need a callable, got %r' % function)
callback_proto = available_events.get(event)
if function not in self.callbacks[event]:
self.callbacks[event].append(callback_proto.adapt(function))
def unregister(self, event, function):
"""Remove a callback from the given event."""
if function in self.callbacks[event]:
return self.callbacks[event].remove(function)
# Remove callback in case ``function`` was adapted by `backcall`.
for callback in self.callbacks[event]:
try:
if callback.__wrapped__ is function:
return self.callbacks[event].remove(callback)
except AttributeError:
pass
raise ValueError('Function {!r} is not registered as a {} callback'.format(function, event))
def trigger(self, event, *args, **kwargs):
"""Call callbacks for ``event``.
Any additional arguments are passed to all callbacks registered for this
event. Exceptions raised by callbacks are caught, and a message printed.
"""
for func in self.callbacks[event][:]:
try:
func(*args, **kwargs)
except (Exception, KeyboardInterrupt):
print("Error in callback {} (for {}):".format(func, event))
self.shell.showtraceback()
# event_name -> prototype mapping
available_events = {}
def _define_event(callback_function):
callback_proto = callback_prototype(callback_function)
available_events[callback_function.__name__] = callback_proto
return callback_proto
# ------------------------------------------------------------------------------
# Callback prototypes
#
# No-op functions which describe the names of available events and the
# signatures of callbacks for those events.
# ------------------------------------------------------------------------------
@_define_event
def pre_execute():
"""Fires before code is executed in response to user/frontend action.
This includes comm and widget messages and silent execution, as well as user
code cells.
"""
pass
@_define_event
def pre_run_cell(info):
"""Fires before user-entered code runs.
Parameters
----------
info : :class:`~IPython.core.interactiveshell.ExecutionInfo`
An object containing information used for the code execution.
"""
pass
@_define_event
def post_execute():
"""Fires after code is executed in response to user/frontend action.
This includes comm and widget messages and silent execution, as well as user
code cells.
"""
pass
@_define_event
def post_run_cell(result):
"""Fires after user-entered code runs.
Parameters
----------
result : :class:`~IPython.core.interactiveshell.ExecutionResult`
The object which will be returned as the execution result.
"""
pass
@_define_event
def shell_initialized(ip):
"""Fires after initialisation of :class:`~IPython.core.interactiveshell.InteractiveShell`.
This is before extensions and startup scripts are loaded, so it can only be
set by subclassing.
Parameters
----------
ip : :class:`~IPython.core.interactiveshell.InteractiveShell`
The newly initialised shell.
"""
pass
|
{
"content_hash": "302864e2fac46f03ac11a893ed5bec96",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 100,
"avg_line_length": 33.22360248447205,
"alnum_prop": 0.6318938119274631,
"repo_name": "ipython/ipython",
"id": "73fc181ae5ab0f300b9860b6538beddd0018e6f2",
"size": "5349",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "IPython/core/events.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "425"
},
{
"name": "Jupyter Notebook",
"bytes": "958133"
},
{
"name": "Makefile",
"bytes": "4675"
},
{
"name": "Python",
"bytes": "2318171"
},
{
"name": "Shell",
"bytes": "12155"
}
],
"symlink_target": ""
}
|
""" Code for simulating proof and tally servers for split-vote election.
"""
# MIT open-source license.
# (See https://github.com/ron-rivest/split-value-voting.git)
import time
import sv
class SBB:
""" Implement secure bulletin board.
Messages are always lists: [ "msg_type", ... ]
Convention is that a msg_type starting with "(" is private,
and not intended to be part of the "public" SBB. But in this
code we are using the SBB also a form of event-log, so values
might be posted here that would not be posted in real election.
"""
def __init__(self, election_id):
""" Initialize (simulated) secure bulletin board.
"""
self.board = [] # list of posted messages
self.closed = False
self.start_time = time.time()
self.post("sbb:open", {"election_id": election_id})
def close(self):
""" Close the SBB. No more posting is allowed. """
self.post("sbb:close")
self.closed = True
def post(self, msg_header, msg_dict=None, time_stamp=True):
""" Append a message to the sbb.
Here msg_type is a string, used as a header, and
msg_dict is a dict with fields for that message.
Add digital signature here as an option.
(sign all previous contents of sbb.)
"""
assert not self.closed
assert isinstance(msg_header, str)
if not msg_dict:
msg_dict = dict()
assert isinstance(msg_dict, dict)
assert "time" not in msg_dict
assert "time_str" not in msg_dict
if time_stamp:
# msg_dict['time_seconds'] = time.time()
msg_dict['time'] = time.strftime("%Y-%m-%dT%H:%M:%S%z")
if msg_dict:
msg = [msg_header, msg_dict]
else:
msg = [msg_header]
self.board.append(msg)
def print_sbb(self, public=True, sbb_filename=None):
""" Print out contents of sbb to file with name sbb_filename.
if public is True, then only print out public portion of sbb
"""
if sbb_filename is None:
print("Contents of secure bulletin board:")
else:
print("Saving contents of secure bulletin board...")
# if not public and sbb_file is sys.stdout:
# print("(lines w/ header in parens are not part of public SBB).")
board = self.board
# following not needed in current code:
if False:
if public:
board = [item for item in board if item[0][0] != "("]
sv.dump(board, sbb_filename)
if sbb_filename is not None:
print("Secure bulletin board saved on file:", sbb_filename)
def hash_sbb(self, public):
""" Return a (tweaked) hash of the sbb contents. """
board = self.board
# next is commented out since we have no no-public posting
# in the current code.
if False:
if public:
board = [item for item in board if item[0][0] != "("]
board_str = sv.dumps(board)
hash_tweak = "hash_sbb"
return sv.secure_hash(board_str, hash_tweak, iterate=True)
|
{
"content_hash": "7ab6d66c7b20cef4c48382cbe6c4ae29",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 78,
"avg_line_length": 30.64423076923077,
"alnum_prop": 0.580483213053028,
"repo_name": "msoltysik/split-value-voting",
"id": "2f001e5293bc5a3837d018c8ba959269f104e5b5",
"size": "3242",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sv_sbb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "107540"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import sys
import shutil
import argparse
import subprocess
from distutils.dir_util import copy_tree
def main(argv):
arg_parser = argparse.ArgumentParser(
description='Update the Python Project Template using diff3.')
arg_parser.add_argument(
'generation_path',
help='Directory in which to copy the template and run generation')
arg_parser.add_argument(
'--metadata-path',
help='Metadata file to use in project generation')
arg_parser.add_argument(
'--revision',
help='Revision of PPT to checkout')
args = arg_parser.parse_args(args=argv[1:])
# Get the project root directory.
project_root = os.path.dirname(os.path.dirname(
os.path.realpath(__file__)))
# Copy files to the generation directory.
temp_dir = args.generation_path
print('Copying files to', temp_dir)
# shutil.copytree requires that the destination directory not exist. Since
# we are dealing with mostly temporary directories, this creates race
# conditions _and_ is simply annoying. distutils.dir_util.copy_tree works
# nicely for this.
copy_tree(project_root, temp_dir)
# Get the metadata source file abspath if that was requested.
if args.metadata_path:
source_metadata_path = os.path.abspath(args.metadata_path)
# Switch to the newly-created generation directory.
old_cwd = os.getcwd()
print('Switching to', temp_dir)
os.chdir(temp_dir)
# Checkout the old revision if that was requested.
if args.revision:
print('Checking out revision', args.revision)
subprocess.check_call(['git', 'checkout', args.revision])
# Copy the metadata file if that was requested. Must happen after the git
# checkout.
if args.metadata_path:
dest_metadata_path = os.path.join('my_module', 'metadata.py')
shutil.copyfile(source_metadata_path, dest_metadata_path)
# Run generation. Instead of importing we run directly with python. Too
# many things could go wrong with importing.
subprocess.check_call(['python', os.path.join('internal', 'generate.py')])
# Don't run tox for now, because there are reasons to generate the project
# and not run tox immediately, such as testing sdist, testing Paver tasks,
# etc. Just suggest running `tox' or `detox' manually afterward in the
# shell script.
# Run tox.
# import tox
# tox will raise SystemExit() and try to exit. We don't want that.
# try:
# tox.cmdline(argv[2:])
# except SystemExit:
# pass
# Print out the directory name for the shell script.
print(temp_dir)
os.chdir(old_cwd)
if __name__ == '__main__':
main(sys.argv)
|
{
"content_hash": "7e5b70608c77103b4406d841e0d3eb24",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 78,
"avg_line_length": 34.135802469135804,
"alnum_prop": 0.6759493670886076,
"repo_name": "shekkbuilder/python-project-template",
"id": "995671039aa725f0d9f563c8ad47543b16d83b90",
"size": "2961",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "internal/test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14461"
},
{
"name": "Shell",
"bytes": "567"
},
{
"name": "Smarty",
"bytes": "17824"
}
],
"symlink_target": ""
}
|
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from pocketlint.formatcheck import XMLChecker
from pocketlint.tests import CheckerTestCase
from pocketlint.tests.test_text import TestAnyTextMixin
good_markup = """\
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<root>
<child>hello world</child>
</root>
"""
missing_dtd_and_xml = """\
<root>
<child>hello world</child>
</root>
"""
html5_dtd_and_entity = """\
<!DOCTYPE html>
<html>
<title>hello world</title>
</html>
"""
ill_formed_markup = """\
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<root>
<child>hello world<
</root>
"""
utf8_xml_markup = """\
\xa0<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<root>
<child>hello world</child>
</root>
"""
utf8_html_markup = """\
\xa0<!DOCTYPE html PUBLIC
"-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
</head>
<body>
<p>hello world</p>
</body>
</html>
"""
zpt_without_namespace = """\
<metal:root>
<p tal:condition="has_hello">hello world</p>
</metal:root>
"""
class TestXML(CheckerTestCase):
"""Verify XML integration."""
def test_good_markup(self):
checker = XMLChecker('bogus', good_markup, self.reporter)
checker.check()
self.assertEqual([], self.reporter.messages)
def test_missing_dtd_and_xml(self):
checker = XMLChecker('bogus', missing_dtd_and_xml, self.reporter)
checker.check()
self.assertEqual([], self.reporter.messages)
def test_html5_dtd(self):
checker = XMLChecker('bogus', html5_dtd_and_entity, self.reporter)
checker.check()
self.assertEqual([], self.reporter.messages)
def test_zpt_without_namespace(self):
checker = XMLChecker('bogus.pt', zpt_without_namespace, self.reporter)
checker.check()
self.assertEqual([], self.reporter.messages)
def test_ill_formed_markup(self):
checker = XMLChecker('bogus', ill_formed_markup, self.reporter)
checker.check()
self.assertEqual(
[(3, 'not well-formed (invalid token)')], self.reporter.messages)
def test_utf8_xml_markup(self):
checker = XMLChecker('bogus', utf8_xml_markup, self.reporter)
checker.check()
self.assertEqual([], self.reporter.messages)
def test_utf8_html_markup(self):
checker = XMLChecker('bogus', utf8_html_markup, self.reporter)
checker.check()
self.assertEqual([], self.reporter.messages)
class TestText(CheckerTestCase, TestAnyTextMixin):
"""Verify text integration."""
def create_and_check(self, file_name, text, options=None):
"""Used by the TestAnyTextMixin tests."""
checker = XMLChecker(file_name, text, self.reporter, options)
checker.check_text()
def test_long_length(self):
pass
def test_with_tabs(self):
pass
|
{
"content_hash": "0477047f7ed081237db1d5135ebe6225",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 78,
"avg_line_length": 25.84873949579832,
"alnum_prop": 0.6446684005201561,
"repo_name": "chevah/pocket-lint",
"id": "cf48aa28dc140abcbc3d5f95923e07319655e159",
"size": "3219",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "pocketlint/tests/test_xml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "242144"
},
{
"name": "Python",
"bytes": "175755"
}
],
"symlink_target": ""
}
|
"""ADB protocol implementation.
Implements the ADB protocol as seen in android's adb/adbd binaries, but only the
host side.
"""
import collections
import stat
import struct
import time
from . import adb_protocol
from . import usb_exceptions
# Default mode for pushed files.
DEFAULT_PUSH_MODE = stat.S_IFREG | stat.S_IRWXU | stat.S_IRWXG
# Maximum size of a filesync DATA packet.
MAX_PUSH_DATA = 2*1024
class InvalidChecksumError(Exception):
"""Checksum of data didn't match expected checksum."""
class InterleavedDataError(Exception):
"""We only support command sent serially."""
class PushFailedError(Exception):
"""Pushing a file failed for some reason."""
DeviceFile = collections.namedtuple('DeviceFile', [
'filename', 'mode', 'size', 'mtime'])
class FilesyncProtocol(object):
"""Implements the FileSync protocol as described in sync.txt."""
@staticmethod
def Stat(connection, filename):
cnxn = FileSyncConnection(connection, '<4I')
cnxn.Send('STAT', filename)
command, (mode, size, mtime) = cnxn.Read(('STAT',), read_data=False)
if command != 'STAT':
raise adb_protocol.InvalidResponseError(
'Expected STAT response to STAT, got %s' % command)
return mode, size, mtime
@classmethod
def List(cls, connection, path):
cnxn = FileSyncConnection(connection, '<5I')
cnxn.Send('LIST', path)
files = []
for cmd_id, header, filename in cnxn.ReadUntil(('DENT',), 'DONE'):
if cmd_id == 'DONE':
break
mode, size, mtime = header
files.append(DeviceFile(filename, mode, size, mtime))
return files
@classmethod
def Pull(cls, connection, filename, dest_file):
"""Pull a file from the device into the file-like dest_file."""
cnxn = FileSyncConnection(connection, '<2I')
cnxn.Send('RECV', filename)
for cmd_id, _, data in cnxn.ReadUntil(('DATA',), 'DONE'):
if cmd_id == 'DONE':
break
dest_file.write(data)
@classmethod
def Push(cls, connection, datafile, filename,
st_mode=DEFAULT_PUSH_MODE, mtime=0):
"""Push a file-like object to the device.
Args:
connection: ADB connection
datafile: File-like object for reading from
filename: Filename to push to
st_mode: stat mode for filename
mtime: modification time
Raises:
PushFailedError: Raised on push failure.
"""
fileinfo = '%s,%s' % (filename, st_mode)
cnxn = FileSyncConnection(connection, '<2I')
cnxn.Send('SEND', fileinfo)
while True:
data = datafile.read(MAX_PUSH_DATA)
if not data:
break
cnxn.Send('DATA', data)
if mtime == 0:
mtime = int(time.time())
# DONE doesn't send data, but it hides the last bit of data in the size
# field.
cnxn.Send('DONE', size=mtime)
for cmd_id, _, data in cnxn.ReadUntil((), 'OKAY', 'FAIL'):
if cmd_id == 'OKAY':
return
raise PushFailedError(data)
class FileSyncConnection(object):
"""Encapsulate a FileSync service connection."""
ids = [
'STAT', 'LIST', 'SEND', 'RECV', 'DENT', 'DONE', 'DATA', 'OKAY',
'FAIL', 'QUIT',
]
id_to_wire, wire_to_id = adb_protocol.MakeWireIDs(ids)
def __init__(self, adb_connection, recv_header_format):
self.adb = adb_connection
# Sending
self.send_buffer = ''
self.send_header_len = struct.calcsize('<2I')
# Receiving
self.recv_buffer = ''
self.recv_header_format = recv_header_format
self.recv_header_len = struct.calcsize(recv_header_format)
def Send(self, command_id, data='', size=0):
"""Send/buffer FileSync packets.
Packets are buffered and only flushed when this connection is read from. All
messages have a response from the device, so this will always get flushed.
Args:
command_id: Command to send.
data: Optional data to send, must set data or size.
size: Optionally override size from len(data).
"""
if data:
size = len(data)
if not self._CanAddToSendBuffer(len(data)):
self._Flush()
header = struct.pack('<2I', self.id_to_wire[command_id], size)
self.send_buffer += header + data
def Read(self, expected_ids, read_data=True):
"""Read ADB messages and return FileSync packets."""
if self.send_buffer:
self._Flush()
# Read one filesync packet off the recv buffer.
header_data = self._ReadBuffered(self.recv_header_len)
header = struct.unpack(self.recv_header_format, header_data)
# Header is (ID, ...).
command_id = self.wire_to_id[header[0]]
if command_id not in expected_ids:
if command_id == 'FAIL':
raise usb_exceptions.AdbCommandFailureException('Command failed.')
raise adb_protocol.InvalidResponseError(
'Expected one of %s, got %s' % (expected_ids, command_id))
if not read_data:
return command_id, header[1:]
# Header is (ID, ..., size).
size = header[-1]
data = self._ReadBuffered(size)
return command_id, header[1:-1], data
def ReadUntil(self, expected_ids, *finish_ids):
"""Useful wrapper around Read."""
while True:
cmd_id, header, data = self.Read(expected_ids + finish_ids)
yield cmd_id, header, data
if cmd_id in finish_ids:
break
def _CanAddToSendBuffer(self, data_len):
added_len = self.send_header_len + data_len
return len(self.send_buffer) + added_len < adb_protocol.MAX_ADB_DATA
def _Flush(self):
self.adb.Write(self.send_buffer)
self.send_buffer = ''
def _ReadBuffered(self, size):
# Ensure recv buffer has enough data.
while len(self.recv_buffer) < size:
_, data = self.adb.ReadUntil('WRTE')
self.recv_buffer += data
result = self.recv_buffer[:size]
self.recv_buffer = self.recv_buffer[size:]
return result
|
{
"content_hash": "5dcb4ffc581bdb6ae161a5639904c6d0",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 80,
"avg_line_length": 28.82587064676617,
"alnum_prop": 0.6487745944080083,
"repo_name": "chenm001/connectal",
"id": "8f2f658fb9dea8fb2fb4ed70eb3d7e8ed4ace8ed",
"size": "6390",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/adb/filesync_protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "91"
},
{
"name": "Bluespec",
"bytes": "2804224"
},
{
"name": "C",
"bytes": "573090"
},
{
"name": "C++",
"bytes": "540166"
},
{
"name": "Cuda",
"bytes": "5931"
},
{
"name": "Dockerfile",
"bytes": "174"
},
{
"name": "MATLAB",
"bytes": "478"
},
{
"name": "Makefile",
"bytes": "77079"
},
{
"name": "Python",
"bytes": "409739"
},
{
"name": "Shell",
"bytes": "42490"
},
{
"name": "SystemVerilog",
"bytes": "52674"
},
{
"name": "Tcl",
"bytes": "160477"
},
{
"name": "Verilog",
"bytes": "483043"
},
{
"name": "sed",
"bytes": "55"
}
],
"symlink_target": ""
}
|
"""Convenience functions for dealing with instance templates."""
from googlecloudsdk.api_lib.compute import alias_ip_range_utils
from googlecloudsdk.api_lib.compute import constants
from googlecloudsdk.api_lib.compute import image_utils
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.networks.subnets import flags as subnet_flags
EPHEMERAL_ADDRESS = object()
# TODO(user): Add unit tests for utilities
def CreateNetworkInterfaceMessage(
resources, scope_lister, messages, network, region, subnet, address,
alias_ip_ranges_string=None):
"""Creates and returns a new NetworkInterface message.
Args:
resources: generates resource references,
scope_lister: function, provides scopes for prompting subnet region,
messages: GCE API messages,
network: network,
region: region for subnetwork,
subnet: regional subnetwork,
address: specify static address for instance template
* None - no address,
* EPHEMERAL_ADDRESS - ephemeral address,
* string - address name to be fetched from GCE API.
alias_ip_ranges_string: command line string specifying a list of alias
IP ranges.
Returns:
network_interface: a NetworkInterface message object
"""
# By default interface is attached to default network. If network or subnet
# are specified they're used instead.
network_interface = messages.NetworkInterface()
if subnet is not None:
subnet_ref = subnet_flags.SubnetworkResolver().ResolveResources(
[subnet], compute_scope.ScopeEnum.REGION, region, resources,
scope_lister=scope_lister)[0]
network_interface.subnetwork = subnet_ref.SelfLink()
if network is not None:
network_ref = resources.Parse(network, collection='compute.networks')
network_interface.network = network_ref.SelfLink()
elif subnet is None:
network_ref = resources.Parse(
constants.DEFAULT_NETWORK, collection='compute.networks')
network_interface.network = network_ref.SelfLink()
if address:
access_config = messages.AccessConfig(
name=constants.DEFAULT_ACCESS_CONFIG_NAME,
type=messages.AccessConfig.TypeValueValuesEnum.ONE_TO_ONE_NAT)
# If the user provided an external IP, populate the access
# config with it.
if address != EPHEMERAL_ADDRESS:
access_config.natIP = address
network_interface.accessConfigs = [access_config]
if alias_ip_ranges_string:
network_interface.aliasIpRanges = (
alias_ip_range_utils.CreateAliasIpRangeMessagesFromString(
messages, False, alias_ip_ranges_string))
return network_interface
def CreateNetworkInterfaceMessages(
resources, scope_lister, messages, network_interface_arg, region):
"""Create network interface messages.
Args:
resources: generates resource references,
scope_lister: function, provides scopes for prompting subnet region,
messages: creates resources.
network_interface_arg: CLI argument specifying network interfaces.
region: region of the subnetwork.
Returns:
list, items are NetworkInterfaceMessages.
"""
result = []
if network_interface_arg:
for interface in network_interface_arg:
address = interface.get('address', None)
# pylint: disable=g-explicit-bool-comparison
if address == '':
address = EPHEMERAL_ADDRESS
result.append(CreateNetworkInterfaceMessage(
resources, scope_lister, messages, interface.get('network', None),
region,
interface.get('subnet', None),
address,
interface.get('aliases', None)))
return result
def CreatePersistentAttachedDiskMessages(messages, disks):
"""Returns a list of AttachedDisk messages and the boot disk's reference.
Args:
messages: GCE API messages,
disks: disk objects - contains following properties
* name - the name of disk,
* mode - 'rw' (R/W), 'ro' (R/O) access mode,
* boot - whether it is a boot disk,
* autodelete - whether disks is deleted when VM is deleted,
* device-name - device name on VM.
Returns:
list of API messages for attached disks
"""
disks_messages = []
for disk in disks:
name = disk['name']
# Resolves the mode.
mode_value = disk.get('mode', 'rw')
if mode_value == 'rw':
mode = messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE
else:
mode = messages.AttachedDisk.ModeValueValuesEnum.READ_ONLY
boot = disk.get('boot') == 'yes'
auto_delete = disk.get('auto-delete') == 'yes'
attached_disk = messages.AttachedDisk(
autoDelete=auto_delete,
boot=boot,
deviceName=disk.get('device-name'),
mode=mode,
source=name,
type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
# The boot disk must end up at index 0.
if boot:
disks_messages = [attached_disk] + disks_messages
else:
disks_messages.append(attached_disk)
return disks_messages
def CreatePersistentCreateDiskMessages(scope_prompter, messages, create_disks):
"""Returns a list of AttachedDisk messages.
Args:
scope_prompter: Scope prompter object,
messages: GCE API messages,
create_disks: disk objects - contains following properties
* name - the name of disk,
* mode - 'rw' (R/W), 'ro' (R/O) access mode,
* disk-size - the size of the disk,
* disk-type - the type of the disk (HDD or SSD),
* image - the name of the image to initialize from,
* image-family - the image family name,
* image-project - the project name that has the image,
* auto-delete - whether disks is deleted when VM is deleted,
* device-name - device name on VM.
Returns:
list of API messages for attached disks
"""
disks_messages = []
for disk in create_disks or []:
name = disk.get('name')
# Resolves the mode.
mode_value = disk.get('mode', 'rw')
if mode_value == 'rw':
mode = messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE
else:
mode = messages.AttachedDisk.ModeValueValuesEnum.READ_ONLY
auto_delete = disk.get('auto-delete') == 'yes'
disk_size_gb = utils.BytesToGb(disk.get('size'))
image_expander = image_utils.ImageExpander(scope_prompter.compute_client,
scope_prompter.resources)
image_uri, _ = image_expander.ExpandImageFlag(
user_project=scope_prompter.project,
image=disk.get('image'),
image_family=disk.get('image-family'),
image_project=disk.get('image-project'),
return_image_resource=False)
create_disk = messages.AttachedDisk(
autoDelete=auto_delete,
boot=False,
deviceName=disk.get('device-name'),
initializeParams=messages.AttachedDiskInitializeParams(
diskName=name,
sourceImage=image_uri,
diskSizeGb=disk_size_gb,
diskType=disk.get('type')),
mode=mode,
type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
disks_messages.append(create_disk)
return disks_messages
def CreateDefaultBootAttachedDiskMessage(
messages, disk_type, disk_device_name, disk_auto_delete, disk_size_gb,
image_uri):
"""Returns an AttachedDisk message for creating a new boot disk."""
return messages.AttachedDisk(
autoDelete=disk_auto_delete,
boot=True,
deviceName=disk_device_name,
initializeParams=messages.AttachedDiskInitializeParams(
sourceImage=image_uri,
diskSizeGb=disk_size_gb,
diskType=disk_type),
mode=messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE,
type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
|
{
"content_hash": "0eeace963eae55aa61d855699b2bda63",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 85,
"avg_line_length": 36.26146788990825,
"alnum_prop": 0.6805819101834282,
"repo_name": "KaranToor/MA450",
"id": "1c67955e89507d6bb2be11e0c21e3b6b90a40bce",
"size": "8500",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/instance_template_utils.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
}
|
from rest_framework.permissions import IsAuthenticated
class IsAuthenticatedOwner(IsAuthenticated):
"""
Allows access only to authenticated users and also owners of the object.
"""
def has_object_permission(self, request, view, obj):
"""Return True if permission is granted to the user."""
return obj.user == request.user
|
{
"content_hash": "1fb53087a046074408c16c551632baf7",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 76,
"avg_line_length": 32.45454545454545,
"alnum_prop": 0.7086834733893558,
"repo_name": "maru/fiubar",
"id": "139b30ca74d0e23898a8bda9fb66564812877b7c",
"size": "357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fiubar/api/permissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15115"
},
{
"name": "Dockerfile",
"bytes": "85"
},
{
"name": "HTML",
"bytes": "68752"
},
{
"name": "JavaScript",
"bytes": "20080"
},
{
"name": "Python",
"bytes": "233798"
},
{
"name": "Shell",
"bytes": "286"
}
],
"symlink_target": ""
}
|
import pytest
from concepts import matrices
@pytest.fixture(scope='module')
def relation():
xname = 'Condition'
yname = 'Symbol'
xmembers = 'TT', 'TF', 'FT', 'FF'
ymembers = '->', '<-'
xbools = [(True, False, True, True), (True, True, False, True)]
return matrices.Relation(xname, yname, xmembers, ymembers, xbools)
def test_pair_with(relation):
vx, vy = relation
with pytest.raises(RuntimeError, match=r'attempt _pair_with'):
vx._pair_with(relation, 1, vy)
def test_prime_infimum(relation):
vx, vy = relation
assert vx.prime(0) == vy.BitSet.supremum
assert vy.prime(0) == vx.BitSet.supremum
|
{
"content_hash": "e7b170cd6ebfca329622ee857e95193c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 70,
"avg_line_length": 25.076923076923077,
"alnum_prop": 0.6441717791411042,
"repo_name": "xflr6/concepts",
"id": "73f3361f00039a8248e479b1dbd1cb3af4ba2c9c",
"size": "652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_matrices.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "218127"
}
],
"symlink_target": ""
}
|
"""Add agent management extension model support
Revision ID: 511471cc46b
Revises: 363468ac592c
Create Date: 2013-02-18 05:09:32.523460
"""
# revision identifiers, used by Alembic.
revision = '511471cc46b'
down_revision = '363468ac592c'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2',
'quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2',
'quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2',
'quantum.plugins.nec.nec_plugin.NECPluginV2',
'quantum.plugins.brocade.QuantumPlugin.BrocadePluginV2',
]
from alembic import op
import sqlalchemy as sa
from quantum.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
### commands auto generated by Alembic - please adjust! ###
op.create_table(
'agents',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('agent_type', sa.String(length=255), nullable=False),
sa.Column('binary', sa.String(length=255), nullable=False),
sa.Column('topic', sa.String(length=255), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('started_at', sa.DateTime(), nullable=False),
sa.Column('heartbeat_timestamp', sa.DateTime(), nullable=False),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('configurations', sa.String(length=4095), nullable=False),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
### commands auto generated by Alembic - please adjust! ###
op.drop_table('agents')
### end Alembic commands ###
|
{
"content_hash": "815e0fbcb3aed78235bf06502eb35d93",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 76,
"avg_line_length": 35.152542372881356,
"alnum_prop": 0.6933461909353905,
"repo_name": "wallnerryan/quantum_migrate",
"id": "fc6898e47659d659682afa3e861ad75bbffd350d",
"size": "2735",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "quantum/db/migration/alembic_migrations/versions/511471cc46b_agent_ext_model_supp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from tastypie.resources import Resource, DeclarativeMetaclass, ResourceOptions
from tastypie import fields
from tastypie.paginator import Paginator
from tastypie.exceptions import InvalidFilterError, InvalidSortError
from haystack.query import SearchQuerySet, AutoQuery, SQ
from .utils import stemmer, uclean
from django.db import connection
from django.core.urlresolvers import reverse
class UCharField(fields.ApiField):
dehydrated_type = 'string'
help_text = 'Unicode string data. Ex: "Hello World"'
def convert(self, value):
if value is None:
return None
return uclean(value)
class IDPaginator(Paginator):
def get_slice(self, limit, offset):
if limit == 0:
return list(self.objects.order_by('id').filter(id__gte=offset))
return list(self.objects.order_by('id').filter(id__gte=offset)[:limit])
def get_count(self):
query = self.objects.query
query, params = query.sql_with_params()
query = 'EXPLAIN %s' % query
cursor = connection.cursor()
cursor.execute(query, params)
count = cursor.fetchone()[-2]
return count
def get_next(self, limit, offset):
return self._generate_uri(limit, offset)
def page(self):
limit = self.get_limit()
offset = self.get_offset()
count = self.get_count()
objects = self.get_slice(limit, offset)
next_offset = objects[-1].id
meta = {
'offset': offset,
'limit': limit,
'total_count': count,
}
if limit:
meta['next'] = self.get_next(limit, next_offset)
return {
self.collection_name: objects,
'meta': meta,
}
LOOKUP_SEP = '__'
class SearchOptions(ResourceOptions):
resource_name = 'search'
object_class = SearchQuerySet
object_query = SQ
detail_uri_name = 'django_id'
index = None
model = None
autoquery_fields = []
autocomplete_fields = []
stem_fields = []
max_limit = 100
class SearchDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
new_class = super(SearchDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = SearchOptions(opts)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
excludes.append('text')
field_names = new_class.base_fields.keys()
if getattr(new_class._meta, 'index', None):
new_class._meta.model = new_class.Meta.index.get_model()
for field_name in field_names:
if field_name == 'resource_uri':
continue
if field_name in new_class.declared_fields:
continue
if len(include_fields) and not field_name in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if not 'absolute_url' in new_class.base_fields:
new_class.base_fields['absolute_url'] = fields.CharField(
attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and not 'absolute_url' in attrs:
del(new_class.base_fields['absolute_url'])
return new_class
class BaseSearchResource(Resource):
__metaclass__ = SearchDeclarativeMetaclass
django_id = fields.IntegerField(attribute='django_id')
@classmethod
def api_field_from_haystack_field(cls, f, default=fields.CharField):
result = default
internal_type = f.__class__.__name__
if internal_type in ('DateTimeField', 'DateField'):
result = fields.DateTimeField
elif internal_type in ('BooleanField',):
result = fields.BooleanField
elif internal_type in ('FloatField',):
result = fields.FloatField
elif internal_type in ('DecimalField',):
result = fields.DecimalField
elif internal_type in ('IntegerField',):
result = fields.IntegerField
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.index:
return final_fields
for f, f_class in cls._meta.index.fields.items():
if f in cls.base_fields:
continue
if fields and f not in fields:
continue
if excludes and f in excludes:
continue
api_field_class = cls.api_field_from_haystack_field(f_class)
final_fields[f] = api_field_class(**{'attribute': f})
final_fields[f].instance_name = f
return final_fields
def detail_uri_kwargs(self, bundle_or_obj):
kwargs = {}
if isinstance(bundle_or_obj, SearchQuerySet):
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj, self._meta.detail_uri_name)
else:
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj.obj, self._meta.detail_uri_name)
return kwargs
def check_filtering(self, field_name, filter_type='contains'):
if not field_name in self._meta.filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % field_name)
if not filter_type in self._meta.filtering[field_name]:
raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_type, field_name))
return True
def filter_value_to_python(self, filter_type, value):
if value in ['true', 'True', True]:
value = True
elif value in ['false', 'False', False]:
value = False
elif value in ('none', 'None', None):
value = None
if filter_type in ('in', 'range') and len(value):
value = value.replace('[', '').replace(']', '')
value = value.split(',')
return value
def build_filters(self, filters=None, stem_lang=''):
if filters is None:
filters = {}
applicable_filters = {}
for filter_expr, value in filters.items():
lookup_bits = filter_expr.split(LOOKUP_SEP)
field_name = lookup_bits.pop(0)
filter_type = lookup_bits.pop() if lookup_bits else 'contains'
filter_expr = LOOKUP_SEP.join([field_name, filter_type])
filter_value = self.filter_value_to_python(filter_type, value)
if field_name in self._meta.stem_fields and stem_lang:
filter_value = stemmer.stem(filter_value, stem_lang)
if field_name in self._meta.autoquery_fields:
filter_value = AutoQuery(filter_value)
if self.check_filtering(field_name, filter_type):
applicable_filters[filter_expr] = filter_value
return applicable_filters
def apply_filters(self, request, filters=None, join_op='and'):
SQ = self._meta.object_query
query = SQ()
if join_op == 'and':
for fltr, val in filters.items():
query = query & SQ(**{fltr: val})
if join_op == 'or':
for fltr, val in filters.items():
query = query | SQ(**{fltr: val})
if join_op == 'not':
for fltr, val in filters.items():
query = query & ~SQ(**{fltr: val})
result = self.get_object_list(request).filter(query)
return result
def apply_sort(self, obj_list, sort_expr):
field_name = sort_expr[1:] if sort_expr.startswith('-') else sort_expr
if not field_name in self.fields:
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if not field_name in self._meta.ordering:
InvalidSortError("The '%s' field does not allow ordering." % field_name)
return obj_list.order_by(sort_expr)
def get_object_list(self, request):
return self._meta.object_class().models(self._meta.model)
def obj_get_list(self, request=None, **kwargs):
filters = {}
request = kwargs['bundle'].request
if hasattr(request, 'GET'):
filters = request.GET.copy()
and_filters = {}
or_filters = {}
not_filters = {}
sort_expr = filters.get('order_by')
if sort_expr: del filters['order_by']
del filters['format']
if 'offset' in filters.keys(): del filters['offset']
if 'limit' in filters.keys(): del filters['limit']
autocomp_filters = {}
for fltr, val in filters.items():
if fltr in self._meta.autocomplete_fields:
autocomp_filters[fltr] = val
del filters[fltr]
for fltr, val in filters.items():
if fltr[0] == '|':
or_filters[fltr[1:]] = val
elif fltr[0] == '~':
not_filters[fltr[1:]] = val
else:
and_filters[fltr] = val
del filters[fltr]
result = self.get_object_list(request)
if autocomp_filters:
for fltr, val in autocomp_filters.items():
result = result.autocomplete(**{fltr: val})
if and_filters:
stem_lang = and_filters.get('lang') or ''
applicable_filters = self.build_filters(and_filters, stem_lang)
result = self.apply_filters(request, applicable_filters)
if or_filters:
applicable_filters = self.build_filters(or_filters)
result = result | self.apply_filters(request, applicable_filters, 'or')
if not_filters:
applicable_filters = self.build_filters(not_filters)
result = result & self.apply_filters(request, applicable_filters, 'not')
if sort_expr:
result = self.apply_sort(result, sort_expr)
return result
def obj_get(self, request=None, **kwargs):
pk_fld = self._meta.detail_uri_name
pk = kwargs.get(pk_fld)
sqs = self.get_object_list(request)
if pk:
sqs = sqs.filter(**{pk_fld: pk})
if sqs:
return sqs[0]
else:
return sqs
|
{
"content_hash": "2c0a0d681699f8bb935f3ec80a862324",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 116,
"avg_line_length": 32.63076923076923,
"alnum_prop": 0.5823668081093824,
"repo_name": "Tatoeba/tatoeba-api",
"id": "fac6d2519162e716fa15426f0c674019f05d3279",
"size": "10605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tatoeba2-django/tatoeba2/api_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48679"
},
{
"name": "Shell",
"bytes": "312"
}
],
"symlink_target": ""
}
|
"""
OpenStack Cinder driver - interface to Open vStorage
- uses Open vStorage library calls (VDiskController)
- uses Cinder logging
"""
import socket
import time
# External libs: Open vStorage
try:
from ovs.dal.hybrids import vdisk as vdiskhybrid
from ovs.dal.lists import pmachinelist
from ovs.dal.lists import vdisklist
from ovs.dal.lists import vpoollist
from ovs.lib import vdisk as vdisklib
except ImportError:
# CI Testing, all external libs are mocked
# or using the driver without all required libs
vdiskhybrid = None
pmachinelist = None
vdisklist = None
vpoollist = None
vdisklib = None
from oslo_config import cfg
import six
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.volume import driver
LOG = logging.getLogger(__name__)
HELP = 'Vpool to use for volumes - backend is defined by vpool not by us.'
OPTS = [cfg.StrOpt('vpool_name',
default = '',
help = HELP)]
CONF = cfg.CONF
CONF.register_opts(OPTS)
class OVSVolumeDriver(driver.VolumeDriver):
"""Open vStorage Volume Driver plugin for Cinder"""
VERSION = '1.0.5'
def __init__(self, *args, **kwargs):
"""Init: args, kwargs pass through;
Options come from CONF
"""
super(OVSVolumeDriver, self).__init__(*args, **kwargs)
LOG.debug('INIT %s %s %s ', CONF.vpool_name, str(args),
str(kwargs))
self.configuration.append_config_values(OPTS)
self._vpool_name = self.configuration.vpool_name
if vpoollist is not None:
self._vp = vpoollist.VPoolList.get_vpool_by_name(self._vpool_name)
else:
self._vp = None
# Volume operations
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
Volume is a .raw file on a virtual filesystem.
No specific action required, connection is allowed based
on POSIX permissions
"""
return {'driver_volume_type': 'local',
'data': {'vpoolname': self._vpool_name,
'device_path': volume.provider_location}}
def create_volume(self, volume):
"""Creates a volume.
Called on "cinder create ..." or "nova volume-create ..."
:param volume: volume reference (sqlalchemy Model)
"""
hostname = str(volume.host)
name = volume.display_name
if not name:
name = volume.name
mountpoint = self._get_hostname_mountpoint(hostname)
location = '{}/{}.raw'.format(mountpoint, name)
size = volume.size
LOG.debug('DO_CREATE_VOLUME %s %s', location, size)
vdisklib.VDiskController.create_volume(location = location,
size = size)
volume['provider_location'] = location
try:
ovs_disk = self._find_ovs_model_disk_by_location(location,
hostname)
except exception.VolumeBackendAPIException:
vdisklib.VDiskController.delete_volume(location = location)
raise
ovs_disk.cinder_id = volume.id
ovs_disk.name = name
ovs_disk.save()
return {'provider_location': volume['provider_location']}
def delete_volume(self, volume):
"""Deletes a logical volume.
Called on "cinder delete ... "
:param volume: volume reference (sqlalchemy Model)
"""
location = volume.provider_location
if location is not None:
LOG.debug('DO_DELETE_VOLUME %s', location)
vdisklib.VDiskController.delete_volume(location = location)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Copy image to volume
Called on "nova volume-create --image-id ..."
or "cinder create --image-id"
Downloads image from glance server into local .raw
:param volume: volume reference (sqlalchemy Model)
"""
LOG.debug("CP_IMG_TO_VOL %s %s", image_service, image_id)
name = volume.display_name
if not name:
name = volume.name
volume.display_name = volume.name
# Downloading from an existing image
destination_path = volume.provider_location
if destination_path:
LOG.debug('CP_IMG_TO_VOL Deleting existing empty raw file %s ',
destination_path)
vdisklib.VDiskController.delete_volume(location = destination_path)
LOG.debug('CP_IMG_TO_VOL Downloading image to %s',
destination_path)
image_utils.fetch_to_raw(context,
image_service,
image_id,
destination_path,
'1M',
size = volume['size'],
run_as_root = False)
LOG.debug('CP_IMG_TO_VOL Resizing volume to size %s',
volume['size'])
self.extend_volume(volume = volume, size_gb = volume['size'])
ovs_disk = self._find_ovs_model_disk_by_location(
volume.provider_location, str(volume.host))
ovs_disk.name = name
ovs_disk.save()
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image.
Called on "cinder upload-to-image ...volume... ...image-name..."
:param volume: volume reference (sqlalchemy Model)
"""
LOG.debug("CP_VOL_TO_IMG %s %s", image_service, image_meta)
super(OVSVolumeDriver, self).copy_volume_to_image(
context, volume, image_service, image_meta)
def create_cloned_volume(self, volume, src_vref):
"""Create a cloned volume from another volume.
Called on "cinder create --source-volid ... "
:param volume: volume reference - target volume (sqlalchemy Model)
:param src_vref: volume reference - source volume (sqlalchemy Model)
OVS: Create clone from template if the source is a template
Create volume from snapshot if the source is a volume
- create snapshot of source volume if it doesn't have snapshots
"""
mountpoint = self._get_hostname_mountpoint(str(volume.host))
name = volume.display_name
if not name:
name = volume.name
volume.display_name = volume.name
pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname(
six.text_type(volume.host))
# Source
source_ovs_disk = self._find_ovs_model_disk_by_location(
str(src_vref.provider_location), src_vref.host)
if source_ovs_disk.info['object_type'] == 'TEMPLATE':
LOG.debug('[CREATE_FROM_TEMPLATE] VDisk %s is a template',
source_ovs_disk.devicename)
# Cloning from a template
LOG.debug('[CREATE FROM TEMPLATE] ovs_disk %s ',
source_ovs_disk.devicename)
disk_meta = vdisklib.VDiskController.create_from_template(
diskguid = source_ovs_disk.guid,
machinename = "",
devicename = str(name),
pmachineguid = pmachineguid,
machineguid = None,
storagedriver_guid = None)
volume['provider_location'] = '{}{}'.format(
mountpoint, disk_meta['backingdevice'])
LOG.debug('[CREATE FROM TEMPLATE] New volume %s',
volume['provider_location'])
vdisk = vdiskhybrid.VDisk(disk_meta['diskguid'])
vdisk.cinder_id = volume.id
vdisk.name = name
LOG.debug('[CREATE FROM TEMPLATE] Updating meta %s %s',
volume.id, name)
vdisk.save()
else:
LOG.debug('[THIN CLONE] VDisk %s is not a template',
source_ovs_disk.devicename)
# We do not support yet full volume clone
# - requires "emancipate" functionality
# So for now we'll take a snapshot
# (or the latest snapshot existing) and clone from that snapshot
if not len(source_ovs_disk.snapshots):
metadata = {'label': "Cinder clone snapshot {0}".format(name),
'is_consistent': False,
'timestamp': time.time(),
'machineguid': source_ovs_disk.vmachine_guid,
'is_automatic': False}
LOG.debug('CREATE_SNAP %s %s', name, str(metadata))
snapshotid = vdisklib.VDiskController.create_snapshot(
diskguid = source_ovs_disk.guid,
metadata = metadata,
snapshotid = None)
LOG.debug('CREATE_SNAP OK')
else:
snapshotid = source_ovs_disk.snapshots[-1]['guid']
LOG.debug('[CREATE CLONE FROM SNAP] %s ', snapshotid)
disk_meta = vdisklib.VDiskController.clone(
diskguid = source_ovs_disk.guid,
snapshotid = snapshotid,
devicename = str(name),
pmachineguid = pmachineguid,
machinename = "",
machineguid=None)
volume['provider_location'] = '{}{}'.format(
mountpoint, disk_meta['backingdevice'])
LOG.debug('[CLONE FROM SNAP] Meta: %s', str(disk_meta))
LOG.debug('[CLONE FROM SNAP] New volume %s',
volume['provider_location'])
vdisk = vdiskhybrid.VDisk(disk_meta['diskguid'])
vdisk.cinder_id = volume.id
vdisk.name = name
vdisk.save()
return {'provider_location': volume['provider_location'],
'display_name': volume['display_name']}
# Volumedriver stats
def get_volume_stats(self, refresh=False):
"""Get volumedriver stats
Refresh not implemented
"""
data = {}
data['volume_backend_name'] = self._vpool_name
data['vendor_name'] = 'Open vStorage'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'OVS'
data['total_capacity_gb'] = 'unknown'
data['free_capacity_gb'] = 'unknown'
data['reserved_percentage'] = 0
data['QoS_support'] = False
return data
# Snapshots operations
def create_snapshot(self, snapshot):
"""Creates a snapshot.
Called on "nova image-create " or "cinder snapshot-create "
:param snapshot: snapshot reference (sqlalchemy Model)
"""
volume = snapshot.volume
hostname = volume.host
location = volume.provider_location
ovs_disk = self._find_ovs_model_disk_by_location(location, hostname)
metadata = {'label': "{0} (OpenStack)".format(snapshot.display_name),
'is_consistent': False,
'timestamp': time.time(),
'machineguid': ovs_disk.vmachine_guid,
'is_automatic': False}
LOG.debug('CREATE_SNAP %s %s', snapshot.display_name,
str(metadata))
vdisklib.VDiskController.create_snapshot(diskguid = ovs_disk.guid,
metadata = metadata,
snapshotid =
str(snapshot.id))
LOG.debug('CREATE_SNAP OK')
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: snapshot reference (sqlalchemy Model)
"""
volume = snapshot.volume
hostname = volume.host
location = volume.provider_location
ovs_disk = self._find_ovs_model_disk_by_location(location, hostname)
LOG.debug('DELETE_SNAP %s', snapshot.id)
vdisklib.VDiskController.delete_snapshot(diskguid = ovs_disk.guid,
snapshotid =
str(snapshot.id))
LOG.debug('DELETE_SNAP OK')
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
Called on "cinder create --snapshot-id ..."
:param snapshot: snapshot reference (sqlalchemy Model)
:param volume: volume reference (sqlalchemy Model)
Volume here is just a ModelObject, it doesn't exist physically,
it will be created by OVS.
Diskguid to be passed to the clone method is the ovs diskguid of the
parent of the snapshot with snapshot.id
OVS: Clone from arbitrary volume,
requires volumedriver 3.6 release > 15.08.2014
"""
mountpoint = self._get_hostname_mountpoint(str(volume.host))
ovs_snap_disk = self._find_ovs_model_disk_by_snapshot_id(snapshot.id)
devicename = volume.display_name
if not devicename:
devicename = volume.name
pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname(
six.text_type(volume.host))
LOG.debug('[CLONE FROM SNAP] %s %s %s %s',
ovs_snap_disk.guid, snapshot.id, devicename,
pmachineguid)
disk_meta = vdisklib.VDiskController.clone(
diskguid = ovs_snap_disk.guid,
snapshotid = snapshot.id,
devicename = devicename,
pmachineguid = pmachineguid,
machinename = "",
machineguid=None)
volume['provider_location'] = '{}{}'.format(
mountpoint, disk_meta['backingdevice'])
LOG.debug('[CLONE FROM SNAP] Meta: %s', six.text_type(disk_meta))
LOG.debug('[CLONE FROM SNAP] New volume %s',
volume['provider_location'])
vdisk = vdiskhybrid.VDisk(disk_meta['diskguid'])
vdisk.cinder_id = volume.id
vdisk.name = devicename
vdisk.save()
return {'provider_location': volume['provider_location'],
'display_name': volume['display_name']}
# Attach/detach volume to instance/host
def attach_volume(self, context, volume, instance_uuid, host_name,
mountpoint):
"""Callback for volume attached to instance or host."""
pass
def detach_volume(self, context, volume):
"""Callback for volume detached."""
pass
# Extend
def extend_volume(self, volume, size_gb):
"""Extend volume to new size size_gb"""
LOG.debug('EXTEND_VOL Size %s', size_gb)
location = volume.provider_location
if location is not None:
LOG.debug('DO_EXTEND_VOLUME %s', location)
vdisklib.VDiskController.extend_volume(location = location,
size = size_gb)
# Prevent NotImplementedError being raised
# Not actually implemented, these actions do not make sense for this driver
def create_export(self, context, volume):
"""Exports the volume.
The volume is a .raw file on a virtual filesystem.
Nothing to export.
"""
pass
def remove_export(self, context, volume):
"""Removes an export for a volume.
The volume is a .raw file on a virtual filesystem.
Removed when delete is called.
"""
pass
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume.
The volume is a .raw file on a virtual filesystem.
Nothing to export.
"""
pass
def terminate_connection(self, volume, connector, force):
"""Disallow connection from connector
The volume is a .raw file on a virtual filesystem.
Connection is always allowed based on POSIX permissions.
"""
LOG.debug('TERM_CONN %s %s ', six.text_type(connector), force)
def check_for_setup_error(self):
"""Validate driver setup"""
if (vdiskhybrid is None or pmachinelist is None or vdisklist is None or
vpoollist is None or vdisklib is None):
msg = (_('Open vStorage libraries not found'))
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
def do_setup(self, context):
"""Any initialization the volume driver does while starting"""
pass
# Internal
def _get_real_hostname(self, hostname):
LOG.debug('[_GET REAL HOSTNAME] Hostname %s', hostname)
if not hostname:
return socket.gethostname()
if "#" in hostname:
hostname, backend_name = hostname.split('#')
if "@" in hostname:
hostname, driver = hostname.split('@')
return hostname
return hostname
def _get_hostname_mountpoint(self, hostname):
"""Find OVS vsr mountpoint for self._vp and hostname
:return mountpoint: string, mountpoint
"""
hostname = self._get_real_hostname(hostname)
LOG.debug('[_GET HOSTNAME MOUNTPOINT] Hostname %s', hostname)
if self._vp is None:
msg = (_('Open vStorage libraries not found'))
raise exception.VolumeBackendAPIException(data=msg)
storagedrivers = [vsr for vsr in self._vp.storagedrivers
if str(vsr.storagerouter.name) ==
str(hostname)]
if len(storagedrivers) == 1:
LOG.debug('[_GET HOSTNAME MOUNTPOINT] Mountpoint %s',
storagedrivers[0].mountpoint)
return six.text_type(storagedrivers[0].mountpoint)
elif not storagedrivers:
msg = (_('No vsr mountpoint found for Vpool %(vpool_name)s'
'and hostname %(hostname)s')
% {'vpool_name': self._vpool_name, 'hostname': hostname})
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _find_ovs_model_disk_by_location(self, location, hostname, retry=3,
timeout=3):
"""Find OVS disk object based on location and hostname
:return VDisk: OVS DAL model object
"""
hostname = self._get_real_hostname(hostname)
LOG.debug('[_FIND OVS DISK] Location %s, hostname %s',
location, hostname)
attempt = 0
while attempt <= retry:
for vd in vdisklist.VDiskList.get_vdisks():
if vd.vpool:
for vsr in vd.vpool.storagedrivers:
if vsr.storagerouter.name == hostname:
_location = "{0}/{1}".format(vsr.mountpoint,
vd.devicename)
if _location == location:
LOG.debug('Location %s Disk found %s',
(location, vd.guid))
disk = vdiskhybrid.VDisk(vd.guid)
return disk
msg = 'NO RESULT Attempt %s timeout %s max attempts %s'
LOG.debug(msg, attempt, timeout, retry)
if timeout:
time.sleep(timeout)
attempt += 1
msg = (_('No disk found for location %s') % location)
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _find_ovs_model_pmachine_guid_by_hostname(self, hostname):
"""Find OVS pmachine guid based on storagerouter name
:return guid: GUID
"""
hostname = self._get_real_hostname(hostname)
LOG.debug('[_FIND OVS PMACHINE] Hostname %s' % (hostname))
mapping = [(pm.guid, six.text_type(sr.name))
for pm in pmachinelist.PMachineList.get_pmachines()
for sr in pm.storagerouters]
for item in mapping:
if item[1] == str(hostname):
msg = 'Found pmachineguid %s for Hostname %s'
LOG.debug(msg, item[0], hostname)
return item[0]
msg = (_('No PMachine guid found for Hostname %s'), hostname)
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _find_ovs_model_disk_by_snapshot_id(self, snapshotid):
"""Find OVS disk object based on snapshot id
:return VDisk: OVS DAL model object
"""
LOG.debug('[_FIND OVS DISK] Snapshotid %s' % snapshotid)
for disk in vdisklist.VDiskList.get_vdisks():
snaps_guid = [s['guid'] for s in disk.snapshots]
if str(snapshotid) in snaps_guid:
LOG.debug('[_FIND OVS DISK] Snapshot id %s Disk found %s',
(snapshotid, disk))
return disk
msg = (_('No disk found for snapshotid %s'), snapshotid)
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
|
{
"content_hash": "e2fa750638bf21e512859fce3839fbd5",
"timestamp": "",
"source": "github",
"line_count": 526,
"max_line_length": 79,
"avg_line_length": 40.42205323193917,
"alnum_prop": 0.5663625246919387,
"repo_name": "Akrog/cinder",
"id": "ee361c152ffba1a32b2380d1c7706565d5bf8998",
"size": "21843",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/openvstorage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "3555"
},
{
"name": "Gettext Catalog",
"bytes": "483634"
},
{
"name": "Python",
"bytes": "11055908"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
}
|
__author__ = 'baranbartu'
import logging
import threading
from memory_profiler import LineProfiler
from memgraph.profile import determine_memory_info
logger = logging.getLogger(__name__)
def observe(func=None, precision=1):
if func is not None:
def wrapper(*args, **kwargs):
prof = LineProfiler()
val = prof(func)(*args, **kwargs)
logger.info(
'Please wait... Line graph will be ready in few seconds.')
job = threading.Thread(target=determine_memory_info, args=(prof,),
kwargs={'precision': precision})
job.start()
return val
return wrapper
else:
def inner_wrapper(f):
return observe(f, precision=precision)
return inner_wrapper
|
{
"content_hash": "d78a3328ec3101e9972e9dcb1e1c022e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 28.928571428571427,
"alnum_prop": 0.5888888888888889,
"repo_name": "baranbartu/memgraph",
"id": "f624c31f709126e9a6d3c4d0da519e551ffd0589",
"size": "810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "memgraph/decorator.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "5514"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name = 'django_lookup_dict',
packages = ['django_lookup_dict'],
version = '0.1',
description = 'Django Lookup Dict is a django app that enables you use a django model the Python dict way',
author = 'Mohamed Hendawy',
author_email = 'hendawy@outlook.com',
url = 'https://github.com/hendawy/django_lookup_dict',
download_url = 'https://github.com/hendawy/django_lookup_dict/releases',
keywords = [
'Python',
'Django',
'Model',
'Dictionary',
'Django Model',
'Lookup',
'Model Lookup'
],
classifiers = [],
)
|
{
"content_hash": "4f37856129dd580d69f09d2663bc7b2e",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 108,
"avg_line_length": 27.142857142857142,
"alnum_prop": 0.6807017543859649,
"repo_name": "hendawy/django_lookup_dict",
"id": "3178e99d6cd1fbffd46b5d4853de33f71d66cb65",
"size": "570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4733"
}
],
"symlink_target": ""
}
|
import subprocess as sp
import os
class VideoWriter(object):
# Vid_start is passed in if the times the videos are taken at and the times generated
# from Tweet analytics have a constant offset from each other
def range_to_format(self, ranges, vidStart):
times = []
for i in xrange(len(ranges)):
vMin, vMax = ranges[i]
vMin = max(vMin - vidStart, 0)
vMax = (vMax - vidStart)
h_min = vMin / 3600
m_min = vMin % 3600 / 60
s_min = vMin % 60
vTime = vMax - vMin
h_time = vTime / 3600
m_time = vTime % 3600 / 60
s_time = vTime % 60
min_str = "%02d:%02d:%02d" %(h_min, m_min, s_min)
time_str = "%02d:%02d:%02d" %(h_time, m_time, s_time)
times.append((min_str, time_str))
return times
def get_video_clips(self, video_file, output_dirname, s_ranges, vidStart):
formats = self.range_to_format(s_ranges, vidStart)
clip_names = [output_dirname + "/" + "clip_" + str(i) + ".mp4" for i in xrange(len(formats))]
for i in xrange(len(formats)):
print "Clipping Video %d" %i
self.clip_video(clip_names[i], video_file, formats[i][0], formats[i][1])
return clip_names
def clip_video(self, output_file_name, video_file, cut_start, cut_time):
print cut_start, cut_time
ffmpeg_bin = "ffmpeg"
pipe = sp.Popen([ffmpeg_bin, "-v", "quiet", "-y", "-i", video_file, "-vcodec", "copy", "-acodec", "copy", "-ss", cut_start, "-t", cut_time, "-sn", output_file_name])
pipe.wait()
return True
|
{
"content_hash": "8707bda65b3cf024414c0f2bf8e8a159",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 173,
"avg_line_length": 31,
"alnum_prop": 0.5483870967741935,
"repo_name": "vrkrishn/FBHacks",
"id": "100473f8a3a8830f510a90dcade3e87f9afb0a49",
"size": "1674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Backend/VideoWriter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "19894"
},
{
"name": "Python",
"bytes": "40866"
},
{
"name": "Shell",
"bytes": "115"
}
],
"symlink_target": ""
}
|
import json
import pytest
from unittest import mock
from unittest.mock import ANY
from mlflow.entities.model_registry import ModelVersion
from mlflow.exceptions import MlflowException
from mlflow.store.artifact.databricks_models_artifact_repo import DatabricksModelsArtifactRepository
from mlflow import MlflowClient
DATABRICKS_MODEL_ARTIFACT_REPOSITORY_PACKAGE = (
"mlflow.store.artifact.databricks_models_artifact_repo"
)
DATABRICKS_MODEL_ARTIFACT_REPOSITORY = (
DATABRICKS_MODEL_ARTIFACT_REPOSITORY_PACKAGE + ".DatabricksModelsArtifactRepository"
)
MOCK_MODEL_ROOT_URI_WITH_PROFILE = "models://profile@databricks/MyModel/12"
MOCK_MODEL_ROOT_URI_WITHOUT_PROFILE = "models:/MyModel/12"
MOCK_PROFILE = "databricks://profile"
MOCK_MODEL_NAME = "MyModel"
MOCK_MODEL_VERSION = "12"
REGISTRY_LIST_ARTIFACTS_ENDPOINT = "/api/2.0/mlflow/model-versions/list-artifacts"
REGISTRY_ARTIFACT_PRESIGNED_URI_ENDPOINT = "/api/2.0/mlflow/model-versions/get-signed-download-uri"
@pytest.fixture()
def databricks_model_artifact_repo():
return DatabricksModelsArtifactRepository(MOCK_MODEL_ROOT_URI_WITH_PROFILE)
class TestDatabricksModelArtifactRepository:
def test_init_with_version_uri_containing_profile(self):
repo = DatabricksModelsArtifactRepository(MOCK_MODEL_ROOT_URI_WITH_PROFILE)
assert repo.artifact_uri == MOCK_MODEL_ROOT_URI_WITH_PROFILE
assert repo.model_name == MOCK_MODEL_NAME
assert repo.model_version == MOCK_MODEL_VERSION
@pytest.mark.parametrize(
"stage_uri_with_profile",
[
"models://profile@databricks/MyModel/Staging",
"models://profile@databricks/MyModel/Production",
],
)
def test_init_with_stage_uri_containing_profile(self, stage_uri_with_profile):
model_version_detailed = ModelVersion(
MOCK_MODEL_NAME,
MOCK_MODEL_VERSION,
"2345671890",
"234567890",
"some description",
"UserID",
"Production",
"source",
"run12345",
)
get_latest_versions_patch = mock.patch.object(
MlflowClient, "get_latest_versions", return_value=[model_version_detailed]
)
with get_latest_versions_patch:
repo = DatabricksModelsArtifactRepository(stage_uri_with_profile)
assert repo.artifact_uri == stage_uri_with_profile
assert repo.model_name == MOCK_MODEL_NAME
assert repo.model_version == MOCK_MODEL_VERSION
assert repo.databricks_profile_uri == MOCK_PROFILE
@pytest.mark.parametrize(
"invalid_artifact_uri",
[
"s3://test",
"dbfs:/databricks/mlflow/MV-id/models",
"dbfs://scope:key@notdatabricks/databricks/mlflow-regisry/123/models",
"models:/MyModel/12",
"models://scope:key@notdatabricks/MyModel/12",
],
)
def test_init_with_invalid_artifact_uris(self, invalid_artifact_uri):
with pytest.raises(
MlflowException,
match="A valid databricks profile is required to instantiate this repository",
):
DatabricksModelsArtifactRepository(invalid_artifact_uri)
def test_init_with_version_uri_and_profile_is_inferred(self):
# First mock for `is_using_databricks_registry` to pass
# Second mock to set `databricks_profile_uri` during instantiation
with mock.patch(
"mlflow.store.artifact.utils.models.mlflow.get_registry_uri",
return_value=MOCK_PROFILE,
), mock.patch("mlflow.tracking.get_registry_uri", return_value=MOCK_PROFILE):
repo = DatabricksModelsArtifactRepository(MOCK_MODEL_ROOT_URI_WITHOUT_PROFILE)
assert repo.artifact_uri == MOCK_MODEL_ROOT_URI_WITHOUT_PROFILE
assert repo.model_name == MOCK_MODEL_NAME
assert repo.model_version == MOCK_MODEL_VERSION
assert repo.databricks_profile_uri == MOCK_PROFILE
@pytest.mark.parametrize(
"stage_uri_without_profile",
["models:/MyModel/Staging", "models:/MyModel/Production"],
)
def test_init_with_stage_uri_and_profile_is_inferred(self, stage_uri_without_profile):
model_version_detailed = ModelVersion(
MOCK_MODEL_NAME,
MOCK_MODEL_VERSION,
"2345671890",
"234567890",
"some description",
"UserID",
"Production",
"source",
"run12345",
)
get_latest_versions_patch = mock.patch.object(
MlflowClient, "get_latest_versions", return_value=[model_version_detailed]
)
with get_latest_versions_patch, mock.patch(
"mlflow.store.artifact.utils.models.mlflow.get_registry_uri",
return_value=MOCK_PROFILE,
), mock.patch("mlflow.tracking.get_registry_uri", return_value=MOCK_PROFILE):
repo = DatabricksModelsArtifactRepository(stage_uri_without_profile)
assert repo.artifact_uri == stage_uri_without_profile
assert repo.model_name == MOCK_MODEL_NAME
assert repo.model_version == MOCK_MODEL_VERSION
assert repo.databricks_profile_uri == MOCK_PROFILE
@pytest.mark.parametrize(
"valid_profileless_artifact_uri",
["models:/MyModel/12", "models:/MyModel/Staging"],
)
def test_init_with_valid_uri_but_no_profile(self, valid_profileless_artifact_uri):
# Mock for `is_using_databricks_registry` fail when calling `get_registry_uri`
with mock.patch(
"mlflow.store.artifact.utils.models.mlflow.get_registry_uri",
return_value=None,
):
with pytest.raises(
MlflowException,
match="A valid databricks profile is required to instantiate this repository",
):
DatabricksModelsArtifactRepository(valid_profileless_artifact_uri)
def test_list_artifacts(self, databricks_model_artifact_repo):
status_code = 200
def _raise_for_status():
if status_code == 404:
raise Exception(
"404 Client Error: Not Found for url: https://shard-uri/api/2.0/mlflow/model-versions/list-artifacts?name=model&version=1"
)
list_artifact_dir_response_mock = mock.MagicMock()
list_artifact_dir_response_mock.status_code = status_code
list_artifact_dir_json_mock = {
"files": [
{"path": "MLmodel", "is_dir": False, "file_size": 294},
{"path": "data", "is_dir": True, "file_size": None},
]
}
list_artifact_dir_response_mock.text = json.dumps(list_artifact_dir_json_mock)
list_artifact_dir_response_mock.raise_for_status.side_effect = _raise_for_status
with mock.patch(
DATABRICKS_MODEL_ARTIFACT_REPOSITORY + "._call_endpoint"
) as call_endpoint_mock:
call_endpoint_mock.return_value = list_artifact_dir_response_mock
artifacts = databricks_model_artifact_repo.list_artifacts("")
assert isinstance(artifacts, list)
assert len(artifacts) == 2
assert artifacts[0].path == "MLmodel"
assert artifacts[0].is_dir is False
assert artifacts[0].file_size == 294
assert artifacts[1].path == "data"
assert artifacts[1].is_dir is True
assert artifacts[1].file_size is None
call_endpoint_mock.assert_called_once_with(ANY, REGISTRY_LIST_ARTIFACTS_ENDPOINT)
# errors from API are propagated through to cli response
list_artifact_dir_bad_response_mock = mock.MagicMock()
status_code = 404
list_artifact_dir_bad_response_mock.status_code = status_code
list_artifact_dir_bad_response_mock.text = "An error occurred"
list_artifact_dir_bad_response_mock.raise_for_status.side_effect = _raise_for_status
with mock.patch(
DATABRICKS_MODEL_ARTIFACT_REPOSITORY + "._call_endpoint"
) as call_endpoint_mock:
call_endpoint_mock.return_value = list_artifact_dir_bad_response_mock
with pytest.raises(
MlflowException,
match=r"API request to list files under path `` failed with status code 404. "
"Response body: An error occurred",
):
databricks_model_artifact_repo.list_artifacts("")
call_endpoint_mock.assert_called_once_with(ANY, REGISTRY_LIST_ARTIFACTS_ENDPOINT)
def test_list_artifacts_for_single_file(self, databricks_model_artifact_repo):
list_artifact_file_response_mock = mock.MagicMock()
list_artifact_file_response_mock.status_code = 200
list_artifact_file_json_mock = {
"files": [{"path": "MLmodel", "is_dir": False, "file_size": 294}]
}
list_artifact_file_response_mock.text = json.dumps(list_artifact_file_json_mock)
with mock.patch(
DATABRICKS_MODEL_ARTIFACT_REPOSITORY + "._call_endpoint"
) as call_endpoint_mock:
# Calling list_artifacts() on a path that's a file should return an empty list
call_endpoint_mock.return_value = list_artifact_file_response_mock
artifacts = databricks_model_artifact_repo.list_artifacts("MLmodel")
assert len(artifacts) == 0
@pytest.mark.parametrize(
("remote_file_path", "local_path"),
[
("test_file.txt", ""),
("test_file.txt", None),
("output/test_file", None),
("test_file.txt", ""),
],
)
def test_download_file(self, databricks_model_artifact_repo, remote_file_path, local_path):
signed_uri_response_mock = mock.MagicMock()
signed_uri_response_mock.status_code = 200
signed_uri_mock = {
"signed_uri": "https://my-amazing-signed-uri-to-rule-them-all.com/1234-numbers-yay-567",
"headers": [{"name": "header_name", "value": "header_value"}],
}
expected_headers = {"header_name": "header_value"}
signed_uri_response_mock.text = json.dumps(signed_uri_mock)
with mock.patch(
DATABRICKS_MODEL_ARTIFACT_REPOSITORY + "._call_endpoint"
) as call_endpoint_mock, mock.patch(
DATABRICKS_MODEL_ARTIFACT_REPOSITORY_PACKAGE + ".download_file_using_http_uri"
) as download_mock:
call_endpoint_mock.return_value = signed_uri_response_mock
download_mock.return_value = None
databricks_model_artifact_repo.download_artifacts(remote_file_path, local_path)
call_endpoint_mock.assert_called_with(ANY, REGISTRY_ARTIFACT_PRESIGNED_URI_ENDPOINT)
download_mock.assert_called_with(
signed_uri_mock["signed_uri"],
ANY,
ANY,
expected_headers,
)
def test_download_file_get_request_fail(self, databricks_model_artifact_repo):
with mock.patch(
DATABRICKS_MODEL_ARTIFACT_REPOSITORY + "._call_endpoint"
) as call_endpoint_mock:
call_endpoint_mock.side_effect = MlflowException("MOCK ERROR")
with pytest.raises(MlflowException, match=r".+"):
databricks_model_artifact_repo.download_artifacts("Something")
def test_log_artifact_fail(self, databricks_model_artifact_repo):
with pytest.raises(
MlflowException, match="This repository does not support logging artifacts"
):
databricks_model_artifact_repo.log_artifact("Some file")
def test_log_artifacts_fail(self, databricks_model_artifact_repo):
with pytest.raises(
MlflowException, match="This repository does not support logging artifacts"
):
databricks_model_artifact_repo.log_artifacts("Some dir")
def test_delete_artifacts_fail(self, databricks_model_artifact_repo):
with pytest.raises(
NotImplementedError,
match="This artifact repository does not support deleting artifacts",
):
databricks_model_artifact_repo.delete_artifacts()
|
{
"content_hash": "3c6c5b603bdfcb801db6dc22ecc63de2",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 142,
"avg_line_length": 45.233333333333334,
"alnum_prop": 0.6355522803569966,
"repo_name": "mlflow/mlflow",
"id": "bd8c8f6d331f6d0eda68ad9e04fd0188100da800",
"size": "12213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/store/artifact/test_databricks_models_artifact_repo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24965"
},
{
"name": "Dockerfile",
"bytes": "1206"
},
{
"name": "HTML",
"bytes": "16439"
},
{
"name": "Java",
"bytes": "276538"
},
{
"name": "JavaScript",
"bytes": "3606345"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "6057051"
},
{
"name": "R",
"bytes": "202454"
},
{
"name": "Scala",
"bytes": "39353"
},
{
"name": "Shell",
"bytes": "27246"
},
{
"name": "TSQL",
"bytes": "211"
},
{
"name": "TypeScript",
"bytes": "313772"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.