text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import httplib
from authorize import gen_xml as xml
class BaseApi(object):
"""
Base Api object.
NOTE:
It's important that you make sure that your Authorize dashboard
uses the same delimiter and encapsulator that you are using in
your API objects. If you don't check this it could happen that the
direct_response cannot be parsed even in those cases where it's
absolutely necessary, like in the AIM API.
"""
responses = None
def __init__(self, login, key, delimiter=u",", encapsulator=u"",
is_test=False, do_raise=False, async=False, uniform=False):
"""
@param login: login key given by authorize.net
@type login: L{unicode}
@param key: transaction key given by authorize.net
@type key: L{unicode}
@param delimiter: The delimiter character you have set
in your authorize.net account for
direct response parsing
@type delimiter: C{str} of len() 1, defaults to ','
@param encapsulator: The encapsulator character for each
field that you have set in your
authorize.net account for direct
response parsing
@type encapsulator: C{str} of len() <= 1, defaults to ''
@param uniform: Tell the library to use a uniform return
type for direct_response.messages that will
always be a list even with a single message.
@type uniform: C{boolean},C{False} by default
@param is_test: Use the test sandbox from authroize.net
@type is_test: L{bool}
"""
if is_test:
self.server = 'apitest.authorize.net'
else:
self.server = 'api.authorize.net'
self.path = "/xml/v1/request.api"
self.is_test = is_test
self.login = login
self.key = key
self.do_raise = do_raise
self.async = async
self.headers = {'Content-Type': 'text/xml'}
self.delimiter = delimiter
self.encapsulator = encapsulator
self.uniform = uniform
def request(self, body):
"""
@param body: An XML formatted message for Authorize.net services.
@type body: L{str}
"""
if self.async:
return self.asyncrequest(body)
conn = httplib.HTTPSConnection(self.server)
conn.request("POST", self.path, body, headers=self.headers)
return self.parse_response(conn.getresponse().read())
def asyncrequest(self, body):
"""
Runs the request inside twisted matrix in an asynchronous way.
@param body: An XML formatted message for Authorize.net services.
@type body: L{str}
"""
from twisted.web import client
return client.getPage("https://"+self.server+self.path,
method="POST",
postdata=body,
headers=self.headers
).addCallback(self.parse_response)
def parse_response(self, response):
"""
Parse the response from the web service, check also if we want
to raise the error as opposed to return an error object.
"""
return xml.to_dict(response, self.responses, self.do_raise,
self.delimiter, self.encapsulator, self.uniform)
|
{
"content_hash": "b222deeaa8b61bd426b9683eb7afa036",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 76,
"avg_line_length": 37.89010989010989,
"alnum_prop": 0.58207656612529,
"repo_name": "imtapps/authorize",
"id": "344f9cfd630bc10175c7e7df8051ea9ef616e85c",
"size": "3448",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "authorize/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "136891"
}
],
"symlink_target": ""
}
|
"""Tests for the Policy."""
from absl.testing import absltest
from group_testing import policy
from group_testing import state
from group_testing.group_selectors import random
from group_testing.group_selectors import split
import jax
import jax.numpy as np
import jax.test_util
class PolicyTest(jax.test_util.JaxTestCase):
def setUp(self):
super().setUp()
self.rng = jax.random.PRNGKey(0)
self.policy = policy.Policy(
[random.RandomSelector(), split.SplitSelector()])
def test_next_selector(self):
self.assertEqual(self.policy.index, 0)
self.assertIsInstance(self.policy.get_selector(), random.RandomSelector)
self.assertIsInstance(self.policy.next_selector, split.SplitSelector)
def test_act(self):
num_patients = 40
num_tests_per_cycle = 4
s = state.State(num_patients, num_tests_per_cycle,
max_group_size=5, prior_infection_rate=0.05,
prior_specificity=0.95, prior_sensitivity=0.80)
self.assertEqual(np.size(s.groups_to_test), 0)
self.assertEqual(self.policy.index, 0)
self.policy.act(self.rng, s)
self.assertGreater(np.size(s.groups_to_test), 0)
self.assertEqual(s.groups_to_test.shape[1], num_patients)
self.assertGreater(s.groups_to_test.shape[0], 0)
self.assertEqual(self.policy.index, 1)
if __name__ == '__main__':
absltest.main()
|
{
"content_hash": "d96a63a0f0c3ac1fe8d512e6088ea89c",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 76,
"avg_line_length": 32.57142857142857,
"alnum_prop": 0.7010233918128655,
"repo_name": "google-research/group_testing",
"id": "8ba2d2b46e24c79317f5af00f6f3ae1b023ef6fb",
"size": "1978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "group_testing/policy_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "186940"
},
{
"name": "Shell",
"bytes": "742"
}
],
"symlink_target": ""
}
|
"""Class for bitcoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
import shlex
import sys
from .authproxy import JSONRPCException
from .util import (
MAX_NODES,
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, chain, rpchost, timewait, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.datadir = datadir
self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf")
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.chain = chain
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
self.cwd = cwd
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
# This means that starting a bitcoind using the temp dir to debug a failed test won't
# spam debug.log.
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-logthreadnames",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
]
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
]
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
assert len(self.PRIV_KEYS) == MAX_NODES
return self.PRIV_KEYS[self.index]
def get_mem_rss_kilobytes(self):
"""Get the memory usage (RSS) per `ps`.
Returns None if `ps` is unavailable.
"""
assert self.running
try:
return int(subprocess.check_output(
["ps", "h", "-o", "rss", "{}".format(self.process.pid)],
stderr=subprocess.DEVNULL).split()[-1])
# Avoid failing on platforms where ps isn't installed.
#
# We could later use something like `psutils` to work across platforms.
except (FileNotFoundError, subprocess.SubprocessError):
self.log.exception("Unable to get memory usage")
return None
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir, self.chain)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
self.running = True
self.log.debug("bitcoind started, waiting for RPC to come up")
if self.start_perf:
self._start_perf()
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
try:
rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.chain, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.log.debug("RPC successfully started")
if self.use_cli:
return
self.rpc = rpc
self.rpc_connected = True
self.url = self.rpc.url
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to bitcoind")
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr='', wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop(wait=wait)
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# If there are any running perf processes, stop them.
for profile_name in tuple(self.perf_subprocesses.keys()):
self._stop_perf(profile_name)
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs, timeout=2):
time_end = time.time() + timeout
debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
yield
while True:
found = True
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
found = False
if found:
return
if time.time() >= time_end:
break
time.sleep(0.05)
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
@contextlib.contextmanager
def assert_memory_usage_stable(self, *, increase_allowed=0.03):
"""Context manager that allows the user to assert that a node's memory usage (RSS)
hasn't increased beyond some threshold percentage.
Args:
increase_allowed (float): the fractional increase in memory allowed until failure;
e.g. `0.12` for up to 12% increase allowed.
"""
before_memory_usage = self.get_mem_rss_kilobytes()
yield
after_memory_usage = self.get_mem_rss_kilobytes()
if not (before_memory_usage and after_memory_usage):
self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.")
return
perc_increase_memory_usage = (after_memory_usage / before_memory_usage) - 1
if perc_increase_memory_usage > increase_allowed:
self._raise_assertion_error(
"Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format(
increase_allowed * 100, before_memory_usage, after_memory_usage,
perc_increase_memory_usage * 100))
@contextlib.contextmanager
def profile_with_perf(self, profile_name):
"""
Context manager that allows easy profiling of node activity using `perf`.
See `test/functional/README.md` for details on perf usage.
Args:
profile_name (str): This string will be appended to the
profile data filename generated by perf.
"""
subp = self._start_perf(profile_name)
yield
if subp:
self._stop_perf(profile_name)
def _start_perf(self, profile_name=None):
"""Start a perf process to profile this node.
Returns the subprocess running perf."""
subp = None
def test_success(cmd):
return subprocess.call(
# shell=True required for pipe use below
cmd, shell=True,
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
if not sys.platform.startswith('linux'):
self.log.warning("Can't profile with perf; only available on Linux platforms")
return None
if not test_success('which perf'):
self.log.warning("Can't profile with perf; must install perf-tools")
return None
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
self.log.warning(
"perf output won't be very useful without debug symbols compiled into bitcoind")
output_path = tempfile.NamedTemporaryFile(
dir=self.datadir,
prefix="{}.perf.data.".format(profile_name or 'test'),
delete=False,
).name
cmd = [
'perf', 'record',
'-g', # Record the callgraph.
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
'-F', '101', # Sampling frequency in Hz.
'-p', str(self.process.pid),
'-o', output_path,
]
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.perf_subprocesses[profile_name] = subp
return subp
def _stop_perf(self, profile_name):
"""Stop (and pop) a perf subprocess."""
subp = self.perf_subprocesses.pop(profile_name)
output_path = subp.args[subp.args.index('-o') + 1]
subp.terminate()
subp.wait(timeout=10)
stderr = subp.stderr.read().decode()
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
self.log.warning(
"perf couldn't collect data! Try "
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
else:
report_cmd = "perf report -i {}".format(output_path)
self.log.info("See perf output by running '{}'".format(report_cmd))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoind
expected_msg: regex that stderr should match when bitcoind fails
Will throw if bitcoind starts without an error.
Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('bitcoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
def arg_to_cli(arg):
if isinstance(arg, bool):
return str(arg).lower()
elif isinstance(arg, dict) or isinstance(arg, list):
return json.dumps(arg)
else:
return str(arg)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [arg_to_cli(arg) for arg in args]
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except json.JSONDecodeError:
return cli_stdout.rstrip("\n")
|
{
"content_hash": "be0ee3a1469ec176fd0565ae5c1458af",
"timestamp": "",
"source": "github",
"line_count": 583,
"max_line_length": 173,
"avg_line_length": 41.37907375643225,
"alnum_prop": 0.6094760404576356,
"repo_name": "CryptArc/bitcoin",
"id": "55e6d4caa6aa64769d52bb2cf125160229b40a42",
"size": "24338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/test_framework/test_node.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "769662"
},
{
"name": "C++",
"bytes": "6596589"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "205090"
},
{
"name": "Makefile",
"bytes": "121613"
},
{
"name": "Objective-C",
"bytes": "123749"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "1649745"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Scheme",
"bytes": "6045"
},
{
"name": "Shell",
"bytes": "130233"
}
],
"symlink_target": ""
}
|
from celery.task import task
from make_mozilla.bsd import BSDRegisterConstituent
import commonware.log
import funfactory.log_settings # Magic voodoo required to make logging work.
log = commonware.log.getLogger('mk.tasks')
@task
def register_email_address_as_constituent(email_address, group):
log.info('Running register_email_address_as_constituent')
BSDRegisterConstituent.add_email_to_group(email_address, group)
|
{
"content_hash": "6dcd919caa235eefd182279d7b784aa1",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 76,
"avg_line_length": 38.72727272727273,
"alnum_prop": 0.8028169014084507,
"repo_name": "mozilla/make.mozilla.org",
"id": "18a5302175cdbe3061a9545a4b882e343d85c9ea",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "make_mozilla/events/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "334625"
},
{
"name": "Puppet",
"bytes": "14621"
},
{
"name": "Python",
"bytes": "3683223"
},
{
"name": "Ruby",
"bytes": "1462"
},
{
"name": "Shell",
"bytes": "4446"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from openstack_dashboard.dashboards.project.vpn import views
urlpatterns = patterns('openstack_dashboard.dashboards.project.vpn.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^addikepolicy$',
views.AddIKEPolicyView.as_view(), name='addikepolicy'),
url(r'^update_ikepolicy/(?P<ikepolicy_id>[^/]+)/$',
views.UpdateIKEPolicyView.as_view(), name='update_ikepolicy'),
url(r'^addipsecpolicy$',
views.AddIPSecPolicyView.as_view(), name='addipsecpolicy'),
url(r'^update_ipsecpolicy/(?P<ipsecpolicy_id>[^/]+)/$',
views.UpdateIPSecPolicyView.as_view(), name='update_ipsecpolicy'),
url(r'^addipsecsiteconnection$',
views.AddIPSecSiteConnectionView.as_view(),
name='addipsecsiteconnection'),
url(r'^update_ipsecsiteconnection/(?P<ipsecsiteconnection_id>[^/]+)/$',
views.UpdateIPSecSiteConnectionView.as_view(),
name='update_ipsecsiteconnection'),
url(r'^addvpnservice$',
views.AddVPNServiceView.as_view(), name='addvpnservice'),
url(r'^update_vpnservice/(?P<vpnservice_id>[^/]+)/$',
views.UpdateVPNServiceView.as_view(), name='update_vpnservice'),
url(r'^ikepolicy/(?P<ikepolicy_id>[^/]+)/$',
views.IKEPolicyDetailsView.as_view(), name='ikepolicydetails'),
url(r'^ipsecpolicy/(?P<ipsecpolicy_id>[^/]+)/$',
views.IPSecPolicyDetailsView.as_view(), name='ipsecpolicydetails'),
url(r'^vpnservice/(?P<vpnservice_id>[^/]+)/$',
views.VPNServiceDetailsView.as_view(), name='vpnservicedetails'),
url(r'^ipsecsiteconnection/(?P<ipsecsiteconnection_id>[^/]+)/$',
views.IPSecSiteConnectionDetailsView.as_view(),
name='ipsecsiteconnectiondetails'))
|
{
"content_hash": "5e0537cba04300fd27bca65ba51a0b9d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 53.088235294117645,
"alnum_prop": 0.6747922437673131,
"repo_name": "aaronorosen/horizon-congress",
"id": "0de3a3836097d6a30b67f564d513fb45d8ac048d",
"size": "2438",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/vpn/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "282571"
},
{
"name": "JavaScript",
"bytes": "697632"
},
{
"name": "Python",
"bytes": "3559404"
},
{
"name": "Shell",
"bytes": "15387"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='TodoItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('text', models.TextField(default='')),
('done', models.BooleanField(default=False)),
('order', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='TodoList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(max_length=255, default='')),
('order', models.IntegerField(default=0)),
],
),
migrations.AddField(
model_name='todoitem',
name='todo_list',
field=models.ForeignKey(related_name='items', to='todo_api.TodoList'),
),
]
|
{
"content_hash": "a7a2c1af0008d707be8183cb33a11af5",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 114,
"avg_line_length": 35,
"alnum_prop": 0.5444444444444444,
"repo_name": "pauloromeira/todo_lab",
"id": "7ad2eebe0b9ce64710385acc17f4a3e5aeabd7e1",
"size": "1284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todo_api/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1109"
},
{
"name": "HTML",
"bytes": "4726"
},
{
"name": "JavaScript",
"bytes": "2215"
},
{
"name": "Python",
"bytes": "8101"
}
],
"symlink_target": ""
}
|
from malcolm.yamlutil import check_yaml_names, make_block_creator
attribute_block = make_block_creator(__file__, "attribute_block.yaml")
directory_monitor_block = make_block_creator(__file__, "directory_monitor_block.yaml")
double_trigger_block = make_block_creator(__file__, "double_trigger_block.yaml")
scan_runner_block = make_block_creator(__file__, "scan_runner_block.yaml")
shutter_block = make_block_creator(__file__, "shutter_block.yaml")
unrolling_block = make_block_creator(__file__, "unrolling_block.yaml")
__all__ = check_yaml_names(globals())
|
{
"content_hash": "4a06e7a720168c3b2f8f94d126545d50",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 86,
"avg_line_length": 55.8,
"alnum_prop": 0.7365591397849462,
"repo_name": "dls-controls/pymalcolm",
"id": "c0d308ba7395ca5fa75fc5dc0ba0f03375680150",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "malcolm/modules/scanning/blocks/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "549"
},
{
"name": "Python",
"bytes": "1583458"
},
{
"name": "Shell",
"bytes": "580"
}
],
"symlink_target": ""
}
|
import json
import logging
import os
import re
import sys
import urllib
from collections import defaultdict
from HTMLParser import HTMLParser
from tests.common.impala_connection import ImpalaConnection, create_connection
from tests.common.impala_connection import create_ldap_connection
from time import sleep, time
logging.basicConfig(level=logging.ERROR, format='%(threadName)s: %(message)s')
LOG = logging.getLogger('impala_service')
LOG.setLevel(level=logging.DEBUG)
# Base class for all Impala services
# TODO: Refactor the retry/timeout logic into a common place.
class BaseImpalaService(object):
def __init__(self, hostname, webserver_port):
self.hostname = hostname
self.webserver_port = webserver_port
def open_debug_webpage(self, page_name, timeout=10, interval=1):
start_time = time()
while (time() - start_time < timeout):
try:
return urllib.urlopen("http://%s:%d/%s" %
(self.hostname, int(self.webserver_port), page_name))
except Exception:
LOG.info("Debug webpage not yet available.")
sleep(interval)
assert 0, 'Debug webpage did not become available in expected time.'
def read_debug_webpage(self, page_name, timeout=10, interval=1):
return self.open_debug_webpage(page_name, timeout=timeout, interval=interval).read()
def get_metric_value(self, metric_name, default_value=None):
"""Returns the value of the the given metric name from the Impala debug webpage"""
metrics = json.loads(self.read_debug_webpage('jsonmetrics?json'))
return metrics.get(metric_name, default_value)
def wait_for_metric_value(self, metric_name, expected_value, timeout=10, interval=1):
start_time = time()
while (time() - start_time < timeout):
LOG.info("Getting metric: %s from %s:%s" %
(metric_name, self.hostname, self.webserver_port))
value = None
try:
value = self.get_metric_value(metric_name)
except Exception, e:
LOG.error(e)
if value == expected_value:
LOG.info("Metric '%s' has reach desired value: %s" % (metric_name, value))
return value
else:
LOG.info("Waiting for metric value '%s'=%s. Current value: %s" %
(metric_name, expected_value, value))
LOG.info("Sleeping %ds before next retry." % interval)
sleep(interval)
assert 0, 'Metric value %s did not reach value %s in %ss' %\
(metric_name, expected_value, timeout)
# Allows for interacting with an Impalad instance to perform operations such as creating
# new connections or accessing the debug webpage.
class ImpaladService(BaseImpalaService):
def __init__(self, hostname, webserver_port=25000, beeswax_port=21000, be_port=22000,
hs2_port=21050):
super(ImpaladService, self).__init__(hostname, webserver_port)
self.beeswax_port = beeswax_port
self.be_port = be_port
self.hs2_port = hs2_port
def get_num_known_live_backends(self, timeout=30, interval=1):
LOG.info("Getting num_known_live_backends from %s:%s" %
(self.hostname, self.webserver_port))
result = json.loads(self.read_debug_webpage('backends?json', timeout, interval))
num = len(result['backends'])
return None if num is None else int(num)
def get_num_in_flight_queries(self, timeout=30, interval=1):
LOG.info("Getting num_in_flight_queries from %s:%s" %
(self.hostname, self.webserver_port))
result = self.read_debug_webpage('inflight_query_ids?raw', timeout, interval)
return None if result is None else len([l for l in result.split('\n') if l])
def wait_for_num_in_flight_queries(self, expected_val, timeout=10):
"""Waits for the number of in-flight queries to reach a certain value"""
start_time = time()
while (time() - start_time < timeout):
num_in_flight_queries = self.get_num_in_flight_queries()
if num_in_flight_queries == expected_val: return True
sleep(1)
LOG.info("The number of in flight queries: %s, expected: %s" %
(num_in_flight_queries, expected_val))
return False
def wait_for_num_known_live_backends(self, expected_value, timeout=30, interval=1):
start_time = time()
while (time() - start_time < timeout):
value = None
try:
value = self.get_num_known_live_backends(timeout=timeout, interval=interval)
except Exception, e:
LOG.error(e)
if value == expected_value:
LOG.info("num_known_live_backends has reached value: %s" % value)
return value
else:
LOG.info("Waiting for num_known_live_backends=%s. Current value: %s" %\
(expected_value, value))
sleep(1)
assert 0, 'num_known_live_backends did not reach expected value in time'
def read_query_profile_page(self, query_id, timeout=10, interval=1):
"""Fetches the raw contents of the query's runtime profile webpage.
Fails an assertion if Impala's webserver is unavailable or the query's
profile page doesn't exist."""
return self.read_debug_webpage("query_profile?query_id=%s&raw" % (query_id))
def get_query_status(self, query_id):
"""Gets the 'Query Status' section of the query's runtime profile."""
page = self.read_query_profile_page(query_id)
status_line =\
next((x for x in page.split('\n') if re.search('Query Status:', x)), None)
return status_line.split('Query Status:')[1].strip()
def wait_for_query_state(self, client, query_handle, target_state,
timeout=10, interval=1):
"""Keeps polling for the query's state using client in the given interval until
the query's state reaches the target state or the given timeout has been reached."""
start_time = time()
while (time() - start_time < timeout):
try:
query_state = client.get_state(query_handle)
except Exception as e:
pass
if query_state == target_state:
return
sleep(interval)
assert target_state == query_state, 'Did not reach query state in time'
return
def wait_for_query_status(self, client, query_id, expected_content,
timeout=30, interval=1):
"""Polls for the query's status in the query profile web page to contain the
specified content. Returns False if the timeout was reached before a successful
match, True otherwise."""
start_time = time()
query_status = ""
while (time() - start_time < timeout):
try:
query_status = self.get_query_status(query_id)
if query_status is None:
assert False, "Could not find 'Query Status' section in profile of "\
"query with id %s:\n%s" % (query_id)
except Exception as e:
pass
if expected_content in query_status:
return True
sleep(interval)
return False
def create_beeswax_client(self, use_kerberos=False):
"""Creates a new beeswax client connection to the impalad"""
client = create_connection('%s:%d' % (self.hostname, self.beeswax_port), use_kerberos)
client.connect()
return client
def create_ldap_beeswax_client(self, user, password, use_ssl=False):
client = create_ldap_connection('%s:%d' % (self.hostname, self.beeswax_port),
user=user, password=password, use_ssl=use_ssl)
client.connect()
return client
def get_catalog_object_dump(self, object_type, object_name):
return self.read_debug_webpage('catalog_objects?object_type=%s&object_name=%s' %\
(object_type, object_name))
# Allows for interacting with the StateStore service to perform operations such as
# accessing the debug webpage.
class StateStoredService(BaseImpalaService):
def __init__(self, hostname, webserver_port):
super(StateStoredService, self).__init__(hostname, webserver_port)
def wait_for_live_subscribers(self, num_subscribers, timeout=15, interval=1):
self.wait_for_metric_value('statestore.live-backends', num_subscribers,
timeout=timeout, interval=interval)
# Allows for interacting with the Catalog service to perform operations such as
# accessing the debug webpage.
class CatalogdService(BaseImpalaService):
def __init__(self, hostname, webserver_port, service_port):
super(CatalogdService, self).__init__(hostname, webserver_port)
self.service_port = service_port
def get_catalog_object_dump(self, object_type, object_name):
return self.read_debug_webpage('catalog_objects?object_type=%s&object_name=%s' %\
(object_type, object_name))
|
{
"content_hash": "c7d3cecb14500f6aeeb513436db68a88",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 90,
"avg_line_length": 41.65853658536585,
"alnum_prop": 0.6734192037470726,
"repo_name": "ibmsoe/ImpalaPPC",
"id": "17dc37cffa7f87696a72afe89aafd6d24ea6b9dc",
"size": "9387",
"binary": false,
"copies": "2",
"ref": "refs/heads/Impala2.6-main",
"path": "tests/common/impala_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "420783"
},
{
"name": "C++",
"bytes": "8255778"
},
{
"name": "CMake",
"bytes": "114921"
},
{
"name": "CSS",
"bytes": "89516"
},
{
"name": "Groff",
"bytes": "1633"
},
{
"name": "HTML",
"bytes": "56"
},
{
"name": "Java",
"bytes": "3979123"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Lex",
"bytes": "22598"
},
{
"name": "Objective-C",
"bytes": "990"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Protocol Buffer",
"bytes": "630"
},
{
"name": "Python",
"bytes": "2093634"
},
{
"name": "Shell",
"bytes": "181651"
},
{
"name": "Thrift",
"bytes": "259756"
}
],
"symlink_target": ""
}
|
from keystoneclient import base
class DomainQuota(base.Resource):
"""Represents an domain Quota.
"""
pass
class DomainQuotaManager(base.CrudManager):
"""Manager class for manipulating domains Quota."""
resource_class = DomainQuota
collection_key = 'domains'
key = 'domain'
def get(self, domain, region=None, services=None):
return super(DomainQuotaManager, self).get_with_body(
domain_id=base.getid(domain),
region=region,
services=services,
complement='quotas')
|
{
"content_hash": "d6d538d45faa8bf149f2d338d7aa0016",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 61,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.6464285714285715,
"repo_name": "raildo/python-keystoneclient",
"id": "48af532d675ae9a698b3b1e163db9b1da59fcf1d",
"size": "1226",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystoneclient/v3/quota.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "32004"
},
{
"name": "JavaScript",
"bytes": "14806"
},
{
"name": "Python",
"bytes": "1344730"
},
{
"name": "Shell",
"bytes": "22768"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class AccountLogout(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the AccountLogout Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(AccountLogout, self).__init__(temboo_session, '/Library/FilesAnywhere/AccountLogout')
def new_input_set(self):
return AccountLogoutInputSet()
def _make_result_set(self, result, path):
return AccountLogoutResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return AccountLogoutChoreographyExecution(session, exec_id, path)
class AccountLogoutInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the AccountLogout
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Token(self, value):
"""
Set the value of the Token input for this Choreo. ((required, string) The token retrieved from authentication. Can be passed from the AccountLogin Choreo.)
"""
super(AccountLogoutInputSet, self)._set_input('Token', value)
class AccountLogoutResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the AccountLogout Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from FilesAnywhere.)
"""
return self._output.get('Response', None)
class AccountLogoutChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return AccountLogoutResultSet(response, path)
|
{
"content_hash": "354643064f9571c3b9a111fb244e0b93",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 164,
"avg_line_length": 37.375,
"alnum_prop": 0.7142857142857143,
"repo_name": "jordanemedlock/psychtruths",
"id": "c267b622229a223947d3c7fa558589f5f2e56856",
"size": "2926",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/core/Library/FilesAnywhere/AccountLogout.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from twisted.internet.defer import inlineCallbacks
from twisted.internet.endpoints import clientFromString
from twisted.words.protocols import irc
from autobahn import wamp
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
from wampirc.client import IRCClientFactory
class Bot:
"""
Tracks currently running bot instances.
"""
def __init__(self, id, factory, client):
self.id = id
self.factory = factory
self.client = client
class IRCComponent(ApplicationSession):
"""
IRC bot services component.
"""
def __init__(self, config):
ApplicationSession.__init__(self)
self.config = config
self._bots = {}
self._bot_no = 0
@wamp.register('com.myapp.start_bot')
def start_bot(self, nick, channels):
self._bot_no += 1
id = self._bot_no
factory = IRCClientFactory(self, nick, channels)
from twisted.internet import reactor
client = clientFromString(reactor, self.config.extra['server'])
d = client.connect(factory)
def onconnect(res):
self._bots[id] = Bot(id, factory, client)
return id
d.addCallback(onconnect)
return d
@wamp.register('com.myapp.stop_bot')
def stop_bot(self, id):
if id in self._bots:
f = self._bots[id].factory
if f.proto:
f.proto.transport.loseConnection()
f.stopFactory()
del self._bots[id]
else:
raise ApplicationError('com.myapp.error.no_such_bot')
@inlineCallbacks
def onJoin(self, details):
try:
regs = yield self.register(self)
print("Ok, registered {} procedures.".format(len(regs)))
except Exception as e:
print("Failed to register procedures: {}".format(e))
print("IRC Bot Backend ready!")
def onDisconnect(self):
reactor.stop()
def make(config):
if config:
return IRCComponent(config)
else:
# if no config given, return a description of this WAMPlet ..
return {'label': 'An IRC bot service component',
'description': 'This component provides IRC bot services via WAMP.'}
if __name__ == '__main__':
from autobahn.twisted.wamp import ApplicationRunner
extra = {
"server": "tcp:irc.freenode.net:6667"
}
# test drive the component during development ..
runner = ApplicationRunner(
url="ws://localhost:8080/ws",
realm="realm1",
extra=extra,
debug=False, # low-level WebSocket debugging
debug_wamp=False, # WAMP protocol-level debugging
debug_app=True) # app-level debugging
runner.run(make)
|
{
"content_hash": "d6025914044f298d02dfb9f9fb5fee1b",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 84,
"avg_line_length": 26.952380952380953,
"alnum_prop": 0.6159010600706714,
"repo_name": "dash-dash/AutobahnPython",
"id": "a78c836491d1706a575da21151b3932b4dd5403a",
"size": "4107",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/twisted/wamp/wamplet/wampirc/wampirc/service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2711"
},
{
"name": "HTML",
"bytes": "86275"
},
{
"name": "JavaScript",
"bytes": "104724"
},
{
"name": "Makefile",
"bytes": "4770"
},
{
"name": "Python",
"bytes": "1304866"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
}
|
"""
This module provides classes and methods used to describe deformations and
strains, including applying those deformations to structure objects and
generating deformed structure sets for further calculations.
"""
import numpy as np
import scipy
import itertools
import collections
from monty.dev import deprecated
from pymatgen.core.lattice import Lattice
from pymatgen.core.tensors import SquareTensor, symmetry_reduce
__author__ = "Joseph Montoya"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Maarten de Jong, Mark Asta, Anubhav Jain"
__version__ = "1.0"
__maintainer__ = "Joseph Montoya"
__email__ = "montoyjh@lbl.gov"
__status__ = "Production"
__date__ = "July 24, 2018"
class Deformation(SquareTensor):
"""
Subclass of SquareTensor that describes the deformation gradient tensor
"""
symbol = "d"
def __new__(cls, deformation_gradient):
"""
Create a Deformation object. Note that the constructor uses __new__
rather than __init__ according to the standard method of subclassing
numpy ndarrays.
Args:
deformation_gradient (3x3 array-like): the 3x3 array-like
representing the deformation gradient
"""
obj = super(Deformation, cls).__new__(cls, deformation_gradient)
return obj.view(cls)
def is_independent(self, tol=1e-8):
"""
checks to determine whether the deformation is independent
"""
return len(self.get_perturbed_indices(tol)) == 1
def get_perturbed_indices(self, tol=1e-8):
"""
Gets indices of perturbed elements of the deformation gradient,
i. e. those that differ from the identity
"""
indices = list(zip(*np.where(abs(self - np.eye(3)) > tol)))
return indices
@property
def green_lagrange_strain(self):
"""
calculates the euler-lagrange strain from
the deformation gradient
"""
return Strain.from_deformation(self)
def apply_to_structure(self, structure):
"""
Apply the deformation gradient to a structure.
Args:
structure (Structure object): the structure object to
be modified by the deformation
"""
def_struct = structure.copy()
old_latt = def_struct.lattice.matrix
new_latt = np.transpose(np.dot(self, np.transpose(old_latt)))
def_struct.lattice = Lattice(new_latt)
return def_struct
@classmethod
def from_index_amount(cls, matrixpos, amt):
"""
Factory method for constructing a Deformation object
from a matrix position and amount
Args:
matrixpos (tuple): tuple corresponding the matrix position to
have a perturbation added
amt (float): amount to add to the identity matrix at position
matrixpos
"""
f = np.identity(3)
f[matrixpos] += amt
return cls(f)
class DeformedStructureSet(collections.abc.Sequence):
"""
class that generates a set of independently deformed structures that
can be used to calculate linear stress-strain response
"""
def __init__(self, structure, norm_strains=None, shear_strains=None,
symmetry=False):
"""
constructs the deformed geometries of a structure. Generates
m + n deformed structures according to the supplied parameters.
Args:
structure (Structure): structure to undergo deformation
norm_strains (list of floats): strain values to apply
to each normal mode.
shear_strains (list of floats): strain values to apply
to each shear mode.
symmetry (bool): whether or not to use symmetry reduction.
"""
norm_strains = norm_strains or [-0.01, -0.005, 0.005, 0.01]
shear_strains = shear_strains or [-0.06, -0.03, 0.03, 0.06]
self.undeformed_structure = structure
self.deformations = []
self.def_structs = []
# Generate deformations
for ind in [(0, 0), (1, 1), (2, 2)]:
for amount in norm_strains:
strain = Strain.from_index_amount(ind, amount)
self.deformations.append(strain.get_deformation_matrix())
for ind in [(0, 1), (0, 2), (1, 2)]:
for amount in shear_strains:
strain = Strain.from_index_amount(ind, amount)
self.deformations.append(strain.get_deformation_matrix())
# Perform symmetry reduction if specified
if symmetry:
self.sym_dict = symmetry_reduce(self.deformations, structure)
self.deformations = list(self.sym_dict.keys())
self.deformed_structures = [defo.apply_to_structure(structure)
for defo in self.deformations]
def __iter__(self):
return iter(self.deformed_structures)
def __len__(self):
return len(self.deformed_structures)
def __getitem__(self, ind):
return self.deformed_structures[ind]
class Strain(SquareTensor):
"""
Subclass of SquareTensor that describes the Green-Lagrange strain tensor.
"""
symbol = "e"
def __new__(cls, strain_matrix):
"""
Create a Strain object. Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays. Note also that the default constructor
does not include the deformation gradient
Args:
strain_matrix (3x3 array-like): the 3x3 array-like
representing the Green-Lagrange strain
"""
vscale = np.ones((6,))
vscale[3:] *= 2
obj = super(Strain, cls).__new__(cls, strain_matrix, vscale=vscale)
if not obj.is_symmetric():
raise ValueError("Strain objects must be initialized "
"with a symmetric array or a voigt-notation "
"vector with six entries.")
return obj.view(cls)
def __array_finalize__(self, obj):
if obj is None:
return
self.rank = getattr(obj, "rank", None)
self._vscale = getattr(obj, "_vscale", None)
@classmethod
def from_deformation(cls, deformation):
"""
Factory method that returns a Strain object from a deformation
gradient
Args:
deformation (3x3 array-like):
"""
dfm = Deformation(deformation)
return cls(0.5 * (np.dot(dfm.trans, dfm) - np.eye(3)))
@classmethod
def from_index_amount(cls, idx, amount):
"""
Like Deformation.from_index_amount, except generates
a strain from the zero 3x3 tensor or voigt vector with
the amount specified in the index location. Ensures
symmetric strain.
Args:
idx (tuple or integer): index to be perturbed, can be voigt or
full-tensor notation
amount (float): amount to perturb selected index
"""
if np.array(idx).ndim == 0:
v = np.zeros(6)
v[idx] = amount
return cls.from_voigt(v)
elif np.array(idx).ndim == 1:
v = np.zeros((3, 3))
for i in itertools.permutations(idx):
v[i] = amount
return cls(v)
else:
raise ValueError("Index must either be 2-tuple or integer "
"corresponding to full-tensor or voigt index")
@property
@deprecated(message="the deformation_matrix property is deprecated, and "
"will be removed in pymatgen v2019.1.1, please use the "
"get_deformation_matrix method instead.")
def deformation_matrix(self):
return self.get_deformation_matrix()
def get_deformation_matrix(self, shape="upper"):
"""
returns the deformation matrix
"""
return convert_strain_to_deformation(self, shape=shape)
@property
def von_mises_strain(self):
"""
Equivalent strain to Von Mises Stress
"""
eps = self - 1/3 * np.trace(self) * np.identity(3)
return np.sqrt(np.sum(eps * eps) * 2/3)
def convert_strain_to_deformation(strain, shape="upper"):
"""
This function converts a strain to a deformation gradient that will
produce that strain. Supports three methods:
Args:
strain (3x3 array-like): strain matrix
shape: (string): method for determining deformation, supports
"upper" produces an upper triangular defo
"lower" produces a lower triangular defo
"symmetric" produces a symmetric defo
"""
strain = SquareTensor(strain)
ftdotf = 2*strain + np.eye(3)
if shape == "upper":
result = scipy.linalg.cholesky(ftdotf)
elif shape == "symmetric":
result = scipy.linalg.sqrtm(ftdotf)
else:
raise ValueError("shape must be \"upper\" or \"symmetric\"")
return Deformation(result)
|
{
"content_hash": "17126d4ac4b5c9bc08d940d0628ad3e4",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 80,
"avg_line_length": 34.142322097378276,
"alnum_prop": 0.6036638876700308,
"repo_name": "montoyjh/pymatgen",
"id": "03ecd8a8a830a9def897f99cf6c6b5523693bb53",
"size": "9227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/elasticity/strain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Dockerfile",
"bytes": "275"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "15152267"
},
{
"name": "Python",
"bytes": "7718850"
},
{
"name": "Roff",
"bytes": "1898220"
}
],
"symlink_target": ""
}
|
import codecs
import sys
import os
import base64
import re
import datetime
import subprocess
import traceback
from smtpd import SMTPServer
from django.core.management.base import BaseCommand
from django.utils.encoding import force_unicode, force_str
from dju_common.settings import DJU_EMAIL_DEBUG_PATH, DJU_EMAIL_DEBUG_IN_CONSOLE, DJU_EMAIL_DEBUG_IN_FILES
class DebuggingServer(SMTPServer):
def __init__(self, *args, **kwargs):
SMTPServer.__init__(self, *args, **kwargs)
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
print('Debug email server is running. Now you can send emails to SMTP localhost:10250.')
@staticmethod
def _get_subject(data):
subject_re = re.compile(ur'^Subject: (.+)$', re.IGNORECASE | re.U)
base64_re = re.compile(ur'^=\?(.+)\?b\?(.+)\?=$', re.IGNORECASE | re.U)
for line in data.split('\n'):
m = subject_re.match(line)
if m:
subject = m.group(1).strip()
m = base64_re.match(subject)
if m:
charset, content = m.groups()
subject = force_unicode(base64.b64decode(content))
return subject
return ''
@staticmethod
def _get_fn(fn_base, n=None):
if n is None:
return os.path.join(DJU_EMAIL_DEBUG_PATH, '{}.eml'.format(fn_base)).replace('\\', '/')
else:
return os.path.join(DJU_EMAIL_DEBUG_PATH, '{}_{}.eml'.format(fn_base, n)).replace('\\', '/')
def process_message(self, peer, mailfrom, rcpttos, data):
try:
if DJU_EMAIL_DEBUG_IN_FILES:
if not os.path.exists(DJU_EMAIL_DEBUG_PATH):
os.makedirs(DJU_EMAIL_DEBUG_PATH)
fn_base = u'{}_{}_{}_{}'.format(
u'_'.join(rcpttos),
self._get_subject(data),
mailfrom,
datetime.datetime.now().strftime(u'%Y-%m-%d_%H-%M-%S')
)
fn_base = re.sub(ur'[:\*\?"<>\| ]+', '_', fn_base, re.U)
fn_base = force_str(fn_base)
fn = self._get_fn(fn_base)
n = 1
while os.path.exists(fn):
fn = self._get_fn(fn_base, n)
n += 1
f = codecs.open(fn, 'w', encoding='utf-8')
inheaders = 1
for line in data.split('\n'):
if inheaders and not line:
if DJU_EMAIL_DEBUG_IN_FILES:
f.write(u'X-Peer: {}\n'.format(force_unicode(peer[0])))
if DJU_EMAIL_DEBUG_IN_CONSOLE:
print(u'X-Peer: {}'.format(force_unicode(peer[0])))
inheaders = 0
line = force_unicode(line)
if DJU_EMAIL_DEBUG_IN_FILES:
f.write(u'{}\n'.format(line))
if DJU_EMAIL_DEBUG_IN_CONSOLE:
print(line)
except Exception, e:
traceback.print_exc()
print('DebuggingServer error: {}'.format(force_unicode(e)))
class Command(BaseCommand):
help = 'Run debug smtp server'
def handle(self, *args, **options):
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
if os.name == 'nt':
for k in env:
env[k] = env[k].encode('utf-8')
subprocess.call([
sys.executable,
'-m', 'smtpd', '-n', '-c', 'dju_common.management.commands.dju_debug_email_server.DebuggingServer',
'localhost:10250'
], env=env)
|
{
"content_hash": "26bbba6505b6077e9e25c710f7ee1af0",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 111,
"avg_line_length": 39.191489361702125,
"alnum_prop": 0.5171009771986971,
"repo_name": "liminspace/dju-common",
"id": "543317724ea8c951e0f2929170854e7fc4d4fc83",
"size": "3684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dju_common/management/commands/dju_debug_email_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "86777"
},
{
"name": "CoffeeScript",
"bytes": "3455"
},
{
"name": "HTML",
"bytes": "6934"
},
{
"name": "JavaScript",
"bytes": "46773"
},
{
"name": "Python",
"bytes": "117388"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
import oslo_messaging
import time
class ServerControlEndpoint(object):
# `target` is used to match the endpoint to handle the message.
# Only the namespace and the version of the message match that of this `target`,
# this `Endpoint` handle the message.
# Generally, please set the attribution.
target = oslo_messaging.Target(namespace='control',
version='2.0')
def __init__(self, server):
self.server = server
def stop(self, ctx):
if self.server:
self.server.stop()
class TestEndpoint(object):
# If no the attribution, `target`, the default is
# Target(exchange=None, topic=None, namespace=None, version=None,
# server=None, fanout=None, legacy_namespaces=None)
def test(self, ctx, arg):
return arg
# `Transport` is the capsulation of the Message Queue, such as RabbitMQ.
# If no @url, it will use `CONF.transport_url`.
# The specification of `url` is "transport://user:pass@host1:port[,userN:passN@hostN:portN]/virtual_host"
#transport = oslo_messaging.get_transport(cfg.CONF, url="rabbit://me:passwd@host:5672/virtual_host")
transport = oslo_messaging.get_transport(cfg.CONF)
# This is used to create the exchange, the message queue, for example, the message queue based on topic, fanout, or direct.
# It'll create two the topic message queue, whose routing_keys are "test" and "test.server1", and one the fanout queue,
# whose routing_key is "test".
target = oslo_messaging.Target(topic='test', server='server1')
endpoints = [
ServerControlEndpoint(None),
TestEndpoint(),
]
server = oslo_messaging.get_rpc_server(transport, target, endpoints,
executor='blocking')
try:
server.start()
while True:
time.sleep(1)
except KeyboardInterrupt:
print("Stopping server")
server.stop()
server.wait()
|
{
"content_hash": "55a488aea8cbf0d0a3584324f8b5736f",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 123,
"avg_line_length": 34.32142857142857,
"alnum_prop": 0.6784599375650364,
"repo_name": "xgfone/snippet",
"id": "b4dc422f5a6fa6d725e7b2f24496b5cb16c4fbaf",
"size": "1940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snippet/example/python/oslo_messaging/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "148347"
},
{
"name": "C++",
"bytes": "35742"
},
{
"name": "CSS",
"bytes": "2950"
},
{
"name": "Dockerfile",
"bytes": "1091"
},
{
"name": "Go",
"bytes": "31975"
},
{
"name": "HTML",
"bytes": "1667"
},
{
"name": "JavaScript",
"bytes": "39413"
},
{
"name": "Makefile",
"bytes": "6499"
},
{
"name": "Python",
"bytes": "291037"
},
{
"name": "Shell",
"bytes": "33942"
}
],
"symlink_target": ""
}
|
import logging
import krakenex
import sys
from datetime import datetime, timedelta
from bot import Bot
logger = logging.getLogger(__name__)
class Trader(Bot):
RISE = 'rise'
FALL = 'fall'
kapi = None
trade_amount = 10.0 # Euro
profit_percent = 10
change = {'type': None, 'counter': 0}
rates = {'ask': 0.0, 'bid': 0.0, 'low': 0.0, 'high': 0.0, 'timestamp': None}
bitcoin = {'amount': 0, 'purchased': 0.0, 'price': 1.0, 'timestamp': None}
euro = {'amount': 0, 'timestamp': None}
def __init__(self, args=[]):
if 'balance_only' in args and args.balance_only:
self.stop()
if 'bitcoin' in args and args.bitcoin:
self.bitcoin['purchased'] = args.bitcoin
if 'price' in args and args.price:
self.bitcoin['price'] = args.price
if 'trade_amount' in args and args.trade_amount:
self.trade_amount = args.trade_amount
if 'profit_percent' in args and args.profit_percent:
self.profit_percent = args.profit_percent
logger.debug("bitcoin: %s, price: %s, trade_amount: %s, profit_percent:"
"%s" % (self.bitcoin['purchased'], self.bitcoin['price'],
self.trade_amount, self.profit_percent))
self.sleep = 30
def _log_balance(self):
"""Balance logger
Log current balance and calculate actual value of the balace.
"""
btc = self.bitcoin['amount']
eur = self.euro['amount']
low = self.rates['low']
high = self.rates['high']
ask = self.rates['ask']
bid = self.rates['bid']
logger.info('Balance %s BTC in EUR low : %s' % (btc, btc * low))
logger.info('Balance %s BTC in EUR high: %s' % (btc, btc * high))
logger.info('Balance %s BTC in EUR ask : %s' % (btc, btc * ask))
logger.info('Balance %s BTC in EUR bid: %s' % (btc, btc * bid))
logger.info('Balance %s EUR in BTC low : %s' % (eur, eur / low))
logger.info('Balance %s EUR in BTC high: %s' % (eur, eur / high))
logger.info('Balance %s EUR in BTC ask : %s' % (eur, eur / ask))
logger.info('Balance %s EUR in BTC bid : %s' % (eur, eur / bid))
def get_exchange_balance(self, force=False):
now = datetime.now()
delta = timedelta(seconds=1800)
self.get_exchange_rates(force=force)
if self.bitcoin['timestamp'] == None or force \
or self.bitcoin['timestamp'] + delta < now:
self.bitcoin['timestamp'] = now
self.euro['timestamp'] = now
balance = self.kapi.query_private('Balance')
if balance['error']:
logger.error(balance['error'])
sys.exit(99)
self.bitcoin['amount'] = float(balance['result']['XXBT'])
self.euro['amount'] = float(balance['result']['ZEUR'])
self._log_balance()
else:
logger.debug('CACHE age %s! Exchange balance from cache.',
now - self.bitcoin['timestamp'])
return self.bitcoin['amount'], self.euro['amount']
def _log_rates(self):
"""Exchange rate logger
Log current exchange rates.
"""
logger.info('Exchange rate BTC to EUR ask : %s', self.rates['ask'])
logger.info('Exchange rate BTC to EUR bid : %s', self.rates['bid'])
logger.info('Exchange rate BTC to EUR low : %s', self.rates['low'])
logger.info('Exchange rate BTC to EUR high: %s', self.rates['high'])
def get_exchange_rates(self, force=False):
now = datetime.now()
delta = timedelta(seconds=self.sleep)
if self.rates['timestamp'] == None or force \
or self.rates['timestamp'] + delta < now:
ticker = self.kapi.query_public('Ticker', {'pair': 'XXBTZEUR'})
self.rates['timestamp'] = now
self.rates['ask'] = float(ticker['result']['XXBTZEUR']['a'][0])
self.rates['bid'] = float(ticker['result']['XXBTZEUR']['b'][0])
self.rates['low'] = float(ticker['result']['XXBTZEUR']['l'][0])
self.rates['high'] = float(ticker['result']['XXBTZEUR']['h'][0])
else:
logger.debug('CACHE age %s! Exchange rates from cache.',
now - self.rates['timestamp'])
return self.rates['low'], self.rates['high']
def connect(self):
self.kapi = krakenex.API()
self.kapi.load_key('trader/kraken.key')
self.get_exchange_balance()
self._log_rates()
super().connect()
def _buy_bitcoin(self):
"""Create an order for Bitcoin purchase"""
amt = self.euro['amount']
if amt > 0:
ask = self.rates['ask'] # An ask is an order to sell in the order book.
# Calculate the volume we are buying
if amt > self.trade_amount:
vol = self.trade_amount / ask
else:
vol = amt / ask
logger.info('Placing Order for %s BTC at exchange rate %s' % (vol,
ask))
# Place an order!
order = self.kapi.query_private('AddOrder',
{'pair': 'XXBTZEUR',
'type': 'buy',
'ordertype': 'limit',
'price': str(ask),
'volume': str(vol)})
if order['error']:
logger.error(order['error'])
sys.exit(50)
else:
self.bitcoin['purchased'] = vol
self.bitcoin['price'] = ask
logger.info('Order %s: %s' % (order['result']['txid'],
order['result']['descr']))
# We might want to sleep, before we check the balance ;)
self.get_exchange_balance(force=True)
def _sell_bitcoin(self):
"""Create an order for Bitcoin sell"""
amt = self.bitcoin['amount']
prc = self.bitcoin['price']
bid = self.rates['bid'] # A bid is an order to buy in the order book.
if amt > 0 and prc < bid:
# We bought cheaper than actual rate (we might be interested to sell)
profit = (bid / prc * 100) - 100
logger.info("Exchange rate profit: %s since purchase at price "
"%s" % (profit, prc))
if profit > self.profit_percent:
# Calculate the volume we are selling
if self.bitcoin['purchased'] > amt:
vol = amt
else:
vol = self.bitcoin['purchased']
logger.info('Placing Order for %s BTC at exchange rate %s'
% (vol, bid))
# Place an order!
order = self.kapi.query_private('AddOrder',
{'pair': 'XXBTZEUR',
'type': 'sell',
'ordertype': 'limit',
'price': str(bid),
'volume': str(vol)})
if order['error']:
logger.error(order['error'])
sys.exit(50)
else:
logger.info('Order %s: %s' % (order['result']['txid'],
order['result']['descr']))
# We might want to sleep, before we check the balance ;)
self.get_exchange_balance(force=True)
else:
logger.debug('Profit would be lower than expected, lets wait more!')
def execute(self):
ask = self.rates['ask']
bid = self.rates['bid']
self.get_exchange_rates()
if bid != self.rates['bid']:
logger.debug('Bid changed: %s to %s' % (bid, self.rates['bid']))
self._sell_bitcoin()
super().execute()
def cleanup(self):
self.get_exchange_balance()
logger.debug('BYE!')
super().cleanup()
|
{
"content_hash": "01927a6c7e7a1a145bff85639bc8a32b",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 84,
"avg_line_length": 37.468468468468465,
"alnum_prop": 0.4907429670593893,
"repo_name": "ricco386/trader-bot",
"id": "13b3ea0409c94692d3fe12d7f3a49cc48d1aa70f",
"size": "8318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trader/trader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29467"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0023_merge_20170818_0831'),
]
operations = [
migrations.RenameModel(
old_name='Frame',
new_name='Skill',
),
]
|
{
"content_hash": "1a979b97175ee47924f9873b09c1bf06",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 45,
"avg_line_length": 18.705882352941178,
"alnum_prop": 0.5786163522012578,
"repo_name": "internship2016/sovolo",
"id": "0ed21c2c3ec0c614d68584bf627ca308b0d84fe9",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/user/migrations/0024_auto_20170819_0821.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56092"
},
{
"name": "HTML",
"bytes": "132262"
},
{
"name": "JavaScript",
"bytes": "107993"
},
{
"name": "Python",
"bytes": "255017"
}
],
"symlink_target": ""
}
|
import heapq
from .KBucket import KBucket
class Route(object):
def __init__(self, service, loop, kSize, selfNode):
self.service = service
self.loop = loop
self.selfNode = selfNode
self.ksize = kSize
self.buckets = [KBucket(0, 2 ** 160, self.ksize)]
def getBucket(self, distance):
for index, bucket in enumerate(self.buckets):
if bucket.range[0] <= distance < bucket.range[1]:
return index
def splitBucket(self, index):
leftBucket, rightBucket = self.buckets[index].split()
self.buckets[index] = leftBucket
self.buckets.insert(index + 1, rightBucket)
def removeNode(self, node):
__index = self.getBucket(node.distance(self.selfNode))
self.buckets[__index].removeNode(node)
def isNewNode(self, node):
__index = self.getBucket(node.distance(self.selfNode))
return self.buckets[__index].isNewNode(node)
def addNode(self, node):
index = self.getBucket(node.distance(self.selfNode))
bucket = self.buckets[index]
if bucket.addNode(node):
return
elif bucket.isInRange(node) or bucket.depth() % 5 != 0:
self.splitBucket(index)
self.addNode(node)
else:
#TODO: Check if the first node is online
pass
def findNeighbors(self, node, kSize = None, exclude = []):
def iter_nodes(bucketIndex):
def iter_index(startIndex, endIndex, currentIndex):
__index = currentIndex
__delta = 0
yield __index
while True:
__delta += 1
if __index + __delta <= endIndex:
yield __index + __delta
if __index - __delta >= startIndex:
yield __index - __delta
if __index - __delta <= startIndex and __index + __delta >= endIndex:
break
for index in iter_index(0, len(self.buckets) - 1, bucketIndex):
for key, value in self.buckets[index].nodes.items():
yield value
kSize = kSize or self.ksize
nodes = []
__count = 0
for neighbor in iter_nodes(self.getBucket(node.distance(self.selfNode))):
if neighbor.id != self.selfNode and (not neighbor.id in exclude):
heapq.heappush(nodes, (neighbor.distance(self.selfNode), neighbor))
__count += 1
if len(nodes) is kSize:
break
return heapq.nsmallest(__count, nodes)
|
{
"content_hash": "a394317da2ab1980575cc89cee943878",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 89,
"avg_line_length": 36.583333333333336,
"alnum_prop": 0.54707668944571,
"repo_name": "SkyZH/ddcm-protocol",
"id": "897bb8f4b60d6e15d0afe490445baf9f8784b712",
"size": "2634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ddcm/Route.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "80828"
},
{
"name": "Shell",
"bytes": "68"
}
],
"symlink_target": ""
}
|
import os.path as op
from collections import namedtuple
import re
import numpy as np
from datetime import datetime, timezone
from .._digitization import _make_dig_points
from ..base import BaseRaw
from ..meas_info import create_info
from ..tag import _coil_trans_to_loc
from ..utils import _read_segments_file, _mult_cal_one
from ..constants import FIFF
from ..ctf.trans import _quaternion_align
from ...surface import _normal_orth
from ...transforms import (apply_trans, Transform, get_ras_to_neuromag_trans,
combine_transforms, invert_transform,
_angle_between_quats, rot_to_quat)
from ...utils import check_fname, logger, verbose, _check_fname
from ...annotations import Annotations
FILE_EXTENSIONS = {
"Curry 7": {
"info": ".dap",
"data": ".dat",
"labels": ".rs3",
"events_cef": ".cef",
"events_ceo": ".ceo",
"hpi": ".hpi",
},
"Curry 8": {
"info": ".cdt.dpa",
"data": ".cdt",
"labels": ".cdt.dpa",
"events_cef": ".cdt.cef",
"events_ceo": ".cdt.ceo",
"hpi": ".cdt.hpi",
}
}
CHANTYPES = {"meg": "_MAG1", "eeg": "", "misc": "_OTHERS"}
FIFFV_CHANTYPES = {"meg": FIFF.FIFFV_MEG_CH, "eeg": FIFF.FIFFV_EEG_CH,
"misc": FIFF.FIFFV_MISC_CH}
FIFFV_COILTYPES = {"meg": FIFF.FIFFV_COIL_CTF_GRAD, "eeg": FIFF.FIFFV_COIL_EEG,
"misc": FIFF.FIFFV_COIL_NONE}
SI_UNITS = dict(V=FIFF.FIFF_UNIT_V, T=FIFF.FIFF_UNIT_T)
SI_UNIT_SCALE = dict(c=1e-2, m=1e-3, u=1e-6, µ=1e-6, n=1e-9, p=1e-12, f=1e-15)
CurryParameters = namedtuple('CurryParameters',
'n_samples, sfreq, is_ascii, unit_dict, '
'n_chans, dt_start, chanidx_in_file')
def _get_curry_version(file_extension):
"""Check out the curry file version."""
return "Curry 8" if "cdt" in file_extension else "Curry 7"
def _get_curry_file_structure(fname, required=()):
"""Store paths to a dict and check for required files."""
_msg = "The following required files cannot be found: {0}.\nPlease make " \
"sure all required files are located in the same directory as {1}."
fname = _check_fname(fname, 'read', True, 'fname')
# we don't use os.path.splitext to also handle extensions like .cdt.dpa
fname_base, ext = fname.split(".", maxsplit=1)
version = _get_curry_version(ext)
my_curry = dict()
for key in ('info', 'data', 'labels', 'events_cef', 'events_ceo', 'hpi'):
fname = fname_base + FILE_EXTENSIONS[version][key]
if op.isfile(fname):
_key = 'events' if key.startswith('events') else key
my_curry[_key] = fname
missing = [field for field in required if field not in my_curry]
if missing:
raise FileNotFoundError(_msg.format(np.unique(missing), fname))
return my_curry
def _read_curry_lines(fname, regex_list):
"""Read through the lines of a curry parameter files and save data.
Parameters
----------
fname : str
Path to a curry file.
regex_list : list of str
A list of strings or regular expressions to search within the file.
Each element `regex` in `regex_list` must be formulated so that
`regex + " START_LIST"` initiates the start and `regex + " END_LIST"`
initiates the end of the elements that should be saved.
Returns
-------
data_dict : dict
A dictionary containing the extracted data. For each element `regex`
in `regex_list` a dictionary key `data_dict[regex]` is created, which
contains a list of the according data.
"""
save_lines = {}
data_dict = {}
for regex in regex_list:
save_lines[regex] = False
data_dict[regex] = []
with open(fname) as fid:
for line in fid:
for regex in regex_list:
if re.match(regex + " END_LIST", line):
save_lines[regex] = False
if save_lines[regex] and line != "\n":
result = line.replace("\n", "")
if "\t" in result:
result = result.split("\t")
data_dict[regex].append(result)
if re.match(regex + " START_LIST", line):
save_lines[regex] = True
return data_dict
def _read_curry_parameters(fname):
"""Extract Curry params from a Curry info file."""
_msg_match = "The sampling frequency and the time steps extracted from " \
"the parameter file do not match."
_msg_invalid = "sfreq must be greater than 0. Got sfreq = {0}"
var_names = ['NumSamples', 'SampleFreqHz',
'DataFormat', 'SampleTimeUsec',
'NumChannels',
'StartYear', 'StartMonth', 'StartDay', 'StartHour',
'StartMin', 'StartSec', 'StartMillisec',
'NUM_SAMPLES', 'SAMPLE_FREQ_HZ',
'DATA_FORMAT', 'SAMPLE_TIME_USEC',
'NUM_CHANNELS',
'START_YEAR', 'START_MONTH', 'START_DAY', 'START_HOUR',
'START_MIN', 'START_SEC', 'START_MILLISEC']
param_dict = dict()
unit_dict = dict()
with open(fname) as fid:
for line in iter(fid):
if any(var_name in line for var_name in var_names):
key, val = line.replace(" ", "").replace("\n", "").split("=")
param_dict[key.lower().replace("_", "")] = val
for type in CHANTYPES:
if "DEVICE_PARAMETERS" + CHANTYPES[type] + " START" in line:
data_unit = next(fid)
unit_dict[type] = data_unit.replace(" ", "") \
.replace("\n", "").split("=")[-1]
# look for CHAN_IN_FILE sections, which may or may not exist; issue #8391
types = ["meg", "eeg", "misc"]
chanidx_in_file = _read_curry_lines(fname,
["CHAN_IN_FILE" +
CHANTYPES[key] for key in types])
n_samples = int(param_dict["numsamples"])
sfreq = float(param_dict["samplefreqhz"])
time_step = float(param_dict["sampletimeusec"]) * 1e-6
is_ascii = param_dict["dataformat"] == "ASCII"
n_channels = int(param_dict["numchannels"])
try:
dt_start = datetime(int(param_dict["startyear"]),
int(param_dict["startmonth"]),
int(param_dict["startday"]),
int(param_dict["starthour"]),
int(param_dict["startmin"]),
int(param_dict["startsec"]),
int(param_dict["startmillisec"]) * 1000,
timezone.utc)
# Note that the time zone information is not stored in the Curry info
# file, and it seems the start time info is in the local timezone
# of the acquisition system (which is unknown); therefore, just set
# the timezone to be UTC. If the user knows otherwise, they can
# change it later. (Some Curry files might include StartOffsetUTCMin,
# but its presence is unpredictable, so we won't rely on it.)
except (ValueError, KeyError):
dt_start = None # if missing keywords or illegal values, don't set
if time_step == 0:
true_sfreq = sfreq
elif sfreq == 0:
true_sfreq = 1 / time_step
elif not np.isclose(sfreq, 1 / time_step):
raise ValueError(_msg_match)
else: # they're equal and != 0
true_sfreq = sfreq
if true_sfreq <= 0:
raise ValueError(_msg_invalid.format(true_sfreq))
return CurryParameters(n_samples, true_sfreq, is_ascii, unit_dict,
n_channels, dt_start, chanidx_in_file)
def _read_curry_info(curry_paths):
"""Extract info from curry parameter files."""
curry_params = _read_curry_parameters(curry_paths['info'])
R = np.eye(4)
R[[0, 1], [0, 1]] = -1 # rotate 180 deg
# shift down and back
# (chosen by eyeballing to make the CTF helmet look roughly correct)
R[:3, 3] = [0., -0.015, -0.12]
curry_dev_dev_t = Transform('ctf_meg', 'meg', R)
# read labels from label files
label_fname = curry_paths['labels']
types = ["meg", "eeg", "misc"]
labels = _read_curry_lines(label_fname,
["LABELS" + CHANTYPES[key] for key in types])
sensors = _read_curry_lines(label_fname,
["SENSORS" + CHANTYPES[key] for key in types])
normals = _read_curry_lines(label_fname,
['NORMALS' + CHANTYPES[key] for key in types])
assert len(labels) == len(sensors) == len(normals)
all_chans = list()
dig_ch_pos = dict()
for key in ["meg", "eeg", "misc"]:
chanidx_is_explicit = (len(curry_params.chanidx_in_file["CHAN_IN_FILE"
+ CHANTYPES[key]]) > 0) # channel index
# position in the datafile may or may not be explicitly declared,
# based on the CHAN_IN_FILE section in info file
for ind, chan in enumerate(labels["LABELS" + CHANTYPES[key]]):
chanidx = len(all_chans) + 1 # by default, just assume the
# channel index in the datafile is in order of the channel
# names as we found them in the labels file
if chanidx_is_explicit: # but, if explicitly declared, use
# that index number
chanidx = int(curry_params.chanidx_in_file["CHAN_IN_FILE"
+ CHANTYPES[key]][ind])
if chanidx <= 0: # if chanidx was explicitly declared to be ' 0',
# it means the channel is not actually saved in the data file
# (e.g. the "Ref" channel), so don't add it to our list.
# Git issue #8391
continue
ch = {"ch_name": chan,
"unit": curry_params.unit_dict[key],
"kind": FIFFV_CHANTYPES[key],
"coil_type": FIFFV_COILTYPES[key],
"ch_idx": chanidx
}
if key == "eeg":
loc = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float)
# XXX just the sensor, where is ref (next 3)?
assert loc.shape == (3,)
loc /= 1000. # to meters
loc = np.concatenate([loc, np.zeros(9)])
ch['loc'] = loc
# XXX need to check/ensure this
ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD
dig_ch_pos[chan] = loc[:3]
elif key == 'meg':
pos = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float)
pos /= 1000. # to meters
pos = pos[:3] # just the inner coil
pos = apply_trans(curry_dev_dev_t, pos)
nn = np.array(normals["NORMALS" + CHANTYPES[key]][ind], float)
assert np.isclose(np.linalg.norm(nn), 1., atol=1e-4)
nn /= np.linalg.norm(nn)
nn = apply_trans(curry_dev_dev_t, nn, move=False)
trans = np.eye(4)
trans[:3, 3] = pos
trans[:3, :3] = _normal_orth(nn).T
ch['loc'] = _coil_trans_to_loc(trans)
ch['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
all_chans.append(ch)
dig = _make_dig_points(
dig_ch_pos=dig_ch_pos, coord_frame='head', add_missing_fiducials=True)
del dig_ch_pos
ch_count = len(all_chans)
assert (ch_count == curry_params.n_chans) # ensure that we have assembled
# the same number of channels as declared in the info (.DAP) file in the
# DATA_PARAMETERS section. Git issue #8391
# sort the channels to assure they are in the order that matches how
# recorded in the datafile. In general they most likely are already in
# the correct order, but if the channel index in the data file was
# explicitly declared we might as well use it.
all_chans = sorted(all_chans, key=lambda ch: ch['ch_idx'])
ch_names = [chan["ch_name"] for chan in all_chans]
info = create_info(ch_names, curry_params.sfreq)
with info._unlock():
info['meas_date'] = curry_params.dt_start # for Git issue #8398
info['dig'] = dig
_make_trans_dig(curry_paths, info, curry_dev_dev_t)
for ind, ch_dict in enumerate(info["chs"]):
all_chans[ind].pop('ch_idx')
ch_dict.update(all_chans[ind])
assert ch_dict['loc'].shape == (12,)
ch_dict['unit'] = SI_UNITS[all_chans[ind]['unit'][1]]
ch_dict['cal'] = SI_UNIT_SCALE[all_chans[ind]['unit'][0]]
return info, curry_params.n_samples, curry_params.is_ascii
_card_dict = {'Left ear': FIFF.FIFFV_POINT_LPA,
'Nasion': FIFF.FIFFV_POINT_NASION,
'Right ear': FIFF.FIFFV_POINT_RPA}
def _make_trans_dig(curry_paths, info, curry_dev_dev_t):
# Coordinate frame transformations and definitions
no_msg = 'Leaving device<->head transform as None'
info['dev_head_t'] = None
label_fname = curry_paths['labels']
key = 'LANDMARKS' + CHANTYPES['meg']
lm = _read_curry_lines(label_fname, [key])[key]
lm = np.array(lm, float)
lm.shape = (-1, 3)
if len(lm) == 0:
# no dig
logger.info(no_msg + ' (no landmarks found)')
return
lm /= 1000.
key = 'LM_REMARKS' + CHANTYPES['meg']
remarks = _read_curry_lines(label_fname, [key])[key]
assert len(remarks) == len(lm)
with info._unlock():
info['dig'] = list()
cards = dict()
for remark, r in zip(remarks, lm):
kind = ident = None
if remark in _card_dict:
kind = FIFF.FIFFV_POINT_CARDINAL
ident = _card_dict[remark]
cards[ident] = r
elif remark.startswith('HPI'):
kind = FIFF.FIFFV_POINT_HPI
ident = int(remark[3:]) - 1
if kind is not None:
info['dig'].append(dict(
kind=kind, ident=ident, r=r,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN))
with info._unlock():
info['dig'].sort(key=lambda x: (x['kind'], x['ident']))
has_cards = len(cards) == 3
has_hpi = 'hpi' in curry_paths
if has_cards and has_hpi: # have all three
logger.info('Composing device<->head transformation from dig points')
hpi_u = np.array([d['r'] for d in info['dig']
if d['kind'] == FIFF.FIFFV_POINT_HPI], float)
hpi_c = np.ascontiguousarray(
_first_hpi(curry_paths['hpi'])[:len(hpi_u), 1:4])
unknown_curry_t = _quaternion_align(
'unknown', 'ctf_meg', hpi_u, hpi_c, 1e-2)
angle = np.rad2deg(_angle_between_quats(
np.zeros(3), rot_to_quat(unknown_curry_t['trans'][:3, :3])))
dist = 1000 * np.linalg.norm(unknown_curry_t['trans'][:3, 3])
logger.info(' Fit a %0.1f° rotation, %0.1f mm translation'
% (angle, dist))
unknown_dev_t = combine_transforms(
unknown_curry_t, curry_dev_dev_t, 'unknown', 'meg')
unknown_head_t = Transform(
'unknown', 'head',
get_ras_to_neuromag_trans(
*(cards[key] for key in (FIFF.FIFFV_POINT_NASION,
FIFF.FIFFV_POINT_LPA,
FIFF.FIFFV_POINT_RPA))))
with info._unlock():
info['dev_head_t'] = combine_transforms(
invert_transform(unknown_dev_t), unknown_head_t, 'meg', 'head')
for d in info['dig']:
d.update(coord_frame=FIFF.FIFFV_COORD_HEAD,
r=apply_trans(unknown_head_t, d['r']))
else:
if has_cards:
no_msg += ' (no .hpi file found)'
elif has_hpi:
no_msg += ' (not all cardinal points found)'
else:
no_msg += ' (neither cardinal points nor .hpi file found)'
logger.info(no_msg)
def _first_hpi(fname):
# Get the first HPI result
with open(fname, 'r') as fid:
for line in fid:
line = line.strip()
if any(x in line for x in ('FileVersion', 'NumCoils')) or not line:
continue
hpi = np.array(line.split(), float)
break
else:
raise RuntimeError('Could not find valid HPI in %s' % (fname,))
# t is the first entry
assert hpi.ndim == 1
hpi = hpi[1:]
hpi.shape = (-1, 5)
hpi /= 1000.
return hpi
def _read_events_curry(fname):
"""Read events from Curry event files.
Parameters
----------
fname : str
Path to a curry event file with extensions .cef, .ceo,
.cdt.cef, or .cdt.ceo
Returns
-------
events : ndarray, shape (n_events, 3)
The array of events.
"""
check_fname(fname, 'curry event', ('.cef', '.ceo', '.cdt.cef', '.cdt.ceo'),
endings_err=('.cef', '.ceo', '.cdt.cef', '.cdt.ceo'))
events_dict = _read_curry_lines(fname, ["NUMBER_LIST"])
# The first 3 column seem to contain the event information
curry_events = np.array(events_dict["NUMBER_LIST"], dtype=int)[:, 0:3]
return curry_events
def _read_annotations_curry(fname, sfreq='auto'):
r"""Read events from Curry event files.
Parameters
----------
fname : str
The filename.
sfreq : float | 'auto'
The sampling frequency in the file. If set to 'auto' then the
``sfreq`` is taken from the respective info file of the same name with
according file extension (\*.dap for Curry 7; \*.cdt.dpa for Curry8).
So data.cef looks in data.dap and data.cdt.cef looks in data.cdt.dpa.
Returns
-------
annot : instance of Annotations | None
The annotations.
"""
required = ["events", "info"] if sfreq == 'auto' else ["events"]
curry_paths = _get_curry_file_structure(fname, required)
events = _read_events_curry(curry_paths['events'])
if sfreq == 'auto':
sfreq = _read_curry_parameters(curry_paths['info']).sfreq
onset = events[:, 0] / sfreq
duration = np.zeros(events.shape[0])
description = events[:, 2]
return Annotations(onset, duration, description)
@verbose
def read_raw_curry(fname, preload=False, verbose=None):
"""Read raw data from Curry files.
Parameters
----------
fname : str
Path to a curry file with extensions .dat, .dap, .rs3, .cdt, cdt.dpa,
.cdt.cef or .cef.
%(preload)s
%(verbose)s
Returns
-------
raw : instance of RawCurry
A Raw object containing Curry data.
"""
return RawCurry(fname, preload, verbose)
class RawCurry(BaseRaw):
"""Raw object from Curry file.
Parameters
----------
fname : str
Path to a curry file with extensions .dat, .dap, .rs3, .cdt, cdt.dpa,
.cdt.cef or .cef.
%(preload)s
%(verbose)s
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, fname, preload=False, verbose=None):
curry_paths = _get_curry_file_structure(
fname, required=["info", "data", "labels"])
data_fname = op.abspath(curry_paths['data'])
info, n_samples, is_ascii = _read_curry_info(curry_paths)
last_samps = [n_samples - 1]
raw_extras = dict(is_ascii=is_ascii)
super(RawCurry, self).__init__(
info, preload, filenames=[data_fname], last_samps=last_samps,
orig_format='int', raw_extras=[raw_extras], verbose=verbose)
if 'events' in curry_paths:
logger.info('Event file found. Extracting Annotations from'
' %s...' % curry_paths['events'])
annots = _read_annotations_curry(curry_paths['events'],
sfreq=self.info["sfreq"])
self.set_annotations(annots)
else:
logger.info('Event file not found. No Annotations set.')
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data."""
if self._raw_extras[fi]['is_ascii']:
if isinstance(idx, slice):
idx = np.arange(idx.start, idx.stop)
block = np.loadtxt(
self._filenames[0], skiprows=start, max_rows=stop - start,
ndmin=2).T
_mult_cal_one(data, block, idx, cals, mult)
else:
_read_segments_file(
self, data, idx, fi, start, stop, cals, mult, dtype="<f4")
|
{
"content_hash": "d90e448ebc1418d8ada64a77409d7fa1",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 79,
"avg_line_length": 38.69288389513109,
"alnum_prop": 0.5527538476430162,
"repo_name": "wmvanvliet/mne-python",
"id": "3a1584f62a335842bf550484a6742536a58bd7b7",
"size": "20773",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "mne/io/curry/curry.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "JavaScript",
"bytes": "8008"
},
{
"name": "Jinja",
"bytes": "14962"
},
{
"name": "Makefile",
"bytes": "4612"
},
{
"name": "Python",
"bytes": "10372316"
},
{
"name": "Sass",
"bytes": "257"
},
{
"name": "Shell",
"bytes": "19970"
}
],
"symlink_target": ""
}
|
import zipfile
from django import forms
from django.forms import ModelForm
from django.forms.models import inlineformset_factory
from .models import Challenge, Rule, ChallengeComment, Entry, EntryComment, EntryScreenshot
class ChallengeForm(ModelForm):
class Meta:
model = Challenge
fields = ('name', 'duration',)
name = forms.CharField(
required=True,
widget=forms.TextInput(
attrs={
'placeholder': 'What is the name of this challenge?',
'maxlength': 256,
'size': 50,
'class': 'form-control'
}))
duration = forms.CharField(
required=True,
widget=forms.TextInput(
attrs={
'placeholder': 'How long, in days, does this Challenge last? (ex. 30)',
'maxlength': 3,
'size': 5,
'class': 'form-control'
}))
class RuleForm(ModelForm):
class Meta:
model = Rule
fields = ('description', )
description = forms.CharField(
required=True,
widget=forms.Textarea(
attrs={
'class': 'form-control',
'rows': 2,
'cols': 20,
'placeholder': 'Describe this rule'
}))
class ChallengeCommentForm(ModelForm):
class Meta:
model = ChallengeComment
fields = ('text',)
text = forms.CharField(
required=True,
widget=forms.Textarea(
attrs={
'placeholder': 'What is your comment?',
'rows': 2,
'cols': 20,
'class': 'form-control'
}))
class SubmitEntryForm(ModelForm):
class Meta:
model = Entry
fields = ('name', 'description', 'thefile', )
name = forms.CharField(
required=True,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Name of your entry'
}))
description = forms.CharField(
required=True,
widget=forms.Textarea(
attrs={
'class': 'form-control',
'rows': 2,
'cols': 20,
'placeholder': 'Describe your entry'
}))
def clean(self):
cleaned_data = super(SubmitEntryForm, self).clean()
thefile = cleaned_data.get('thefile')
if not zipfile.is_zipfile(thefile.file):
self._errors['thefile'] = self.error_class([u'Submission is not a valid zip file.'])
del cleaned_data['thefile']
return cleaned_data
class SubmitEntryCommentForm(ModelForm):
class Meta:
model = EntryComment
fields = ('text',)
text = forms.CharField(
required=True,
widget=forms.Textarea(
attrs={
'placeholder': 'What is your comment?',
'rows': 2,
'cols': 20,
'class': 'form-control'
}))
class SubmitEntryScreenshotForm(ModelForm):
class Meta:
model = EntryScreenshot
fields = ('pic',)
pic = forms.FileField(label='Your screenshot:')
AddRuleFormset = inlineformset_factory(Challenge, Rule, form=RuleForm, can_delete=False, extra=0)
AddRuleTemplateFormset = inlineformset_factory(Challenge, Rule, form=RuleForm, can_delete=False, extra=1)
|
{
"content_hash": "0b59072fdc5dc40b12765d2482b46e58",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 105,
"avg_line_length": 26.26984126984127,
"alnum_prop": 0.5549848942598188,
"repo_name": "mavroskardia/codechallenge",
"id": "1ed8ef392a5d2b2a1c869473a5bb08b71faa56d8",
"size": "3310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cc/apps/challenge/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3705"
},
{
"name": "JavaScript",
"bytes": "2910"
},
{
"name": "Python",
"bytes": "185406"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
"""
flaskbb.app
~~~~~~~~~~~~~~~~~~~~
manages the app creation and configuration process
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import os
import logging
import datetime
from flask import Flask, request
from flask.ext.login import current_user
# Import the user blueprint
from flaskbb.user.views import user
from flaskbb.user.models import User, Guest, PrivateMessage
# Import the auth blueprint
from flaskbb.auth.views import auth
# Import the admin blueprint
from flaskbb.management.views import management
# Import the forum blueprint
from flaskbb.forum.views import forum
from flaskbb.forum.models import Post, Topic, Category, Forum
# extensions
from flaskbb.extensions import db, login_manager, mail, cache, redis_store, \
debugtoolbar, migrate, themes, plugin_manager
from flask.ext.whooshalchemy import whoosh_index
# various helpers
from flaskbb.utils.helpers import format_date, time_since, crop_title, \
is_online, render_markup, mark_online, forum_is_unread, topic_is_unread, \
render_template
# permission checks (here they are used for the jinja filters)
from flaskbb.utils.permissions import can_post_reply, can_post_topic, \
can_delete_topic, can_delete_post, can_edit_post, can_edit_user, \
can_ban_user, can_moderate, is_admin, is_moderator, is_admin_or_moderator
# app specific configurations
from flaskbb.utils.settings import flaskbb_config
def create_app(config=None):
"""
Creates the app.
"""
# Initialize the app
app = Flask("flaskbb")
# Use the default config and override it afterwards
app.config.from_object('flaskbb.configs.default.DefaultConfig')
# Update the config
app.config.from_object(config)
# try to update the config via the environment variable
app.config.from_envvar("FLASKBB_SETTINGS", silent=True)
configure_blueprints(app)
configure_extensions(app)
configure_template_filters(app)
configure_context_processors(app)
configure_before_handlers(app)
configure_errorhandlers(app)
configure_logging(app)
return app
def configure_blueprints(app):
app.register_blueprint(forum, url_prefix=app.config["FORUM_URL_PREFIX"])
app.register_blueprint(user, url_prefix=app.config["USER_URL_PREFIX"])
app.register_blueprint(auth, url_prefix=app.config["AUTH_URL_PREFIX"])
app.register_blueprint(management, url_prefix=app.config["ADMIN_URL_PREFIX"])
def configure_extensions(app):
"""
Configures the extensions
"""
# Flask-Plugins
plugin_manager.init_app(app)
# Flask-SQLAlchemy
db.init_app(app)
# Flask-Migrate
migrate.init_app(app, db)
# Flask-Mail
mail.init_app(app)
# Flask-Cache
cache.init_app(app)
# Flask-Debugtoolbar
debugtoolbar.init_app(app)
# Flask-Themes
themes.init_themes(app, app_identifier="flaskbb")
# Flask-And-Redis
redis_store.init_app(app)
# Flask-WhooshAlchemy
with app.app_context():
whoosh_index(app, Post)
whoosh_index(app, Topic)
whoosh_index(app, Forum)
whoosh_index(app, Category)
whoosh_index(app, User)
# Flask-Login
login_manager.login_view = app.config["LOGIN_VIEW"]
login_manager.refresh_view = app.config["REAUTH_VIEW"]
login_manager.anonymous_user = Guest
@login_manager.user_loader
def load_user(id):
"""
Loads the user. Required by the `login` extension
"""
unread_count = db.session.query(db.func.count(PrivateMessage.id)).\
filter(PrivateMessage.unread == True,
PrivateMessage.user_id == id).subquery()
u = db.session.query(User, unread_count).filter(User.id == id).first()
if u:
user, user.pm_unread = u
return user
else:
return None
login_manager.init_app(app)
def configure_template_filters(app):
"""
Configures the template filters
"""
app.jinja_env.filters['markup'] = render_markup
app.jinja_env.filters['format_date'] = format_date
app.jinja_env.filters['time_since'] = time_since
app.jinja_env.filters['is_online'] = is_online
app.jinja_env.filters['crop_title'] = crop_title
app.jinja_env.filters['forum_is_unread'] = forum_is_unread
app.jinja_env.filters['topic_is_unread'] = topic_is_unread
# Permission filters
app.jinja_env.filters['edit_post'] = can_edit_post
app.jinja_env.filters['delete_post'] = can_delete_post
app.jinja_env.filters['delete_topic'] = can_delete_topic
app.jinja_env.filters['post_reply'] = can_post_reply
app.jinja_env.filters['post_topic'] = can_post_topic
# Moderator permission filters
app.jinja_env.filters['is_admin'] = is_admin
app.jinja_env.filters['is_moderator'] = is_moderator
app.jinja_env.filters['is_admin_or_moderator'] = is_admin_or_moderator
app.jinja_env.filters['can_moderate'] = can_moderate
app.jinja_env.filters['can_edit_user'] = can_edit_user
app.jinja_env.filters['can_ban_user'] = can_ban_user
def configure_context_processors(app):
"""
Configures the context processors
"""
@app.context_processor
def inject_flaskbb_config():
"""
Injects the ``flaskbb_config`` config variable into the templates.
"""
return dict(flaskbb_config=flaskbb_config)
def configure_before_handlers(app):
"""
Configures the before request handlers
"""
@app.before_request
def update_lastseen():
"""
Updates `lastseen` before every reguest if the user is authenticated
"""
if current_user.is_authenticated():
current_user.lastseen = datetime.datetime.utcnow()
db.session.add(current_user)
db.session.commit()
if app.config["REDIS_ENABLED"]:
@app.before_request
def mark_current_user_online():
if current_user.is_authenticated():
mark_online(current_user.username)
else:
mark_online(request.remote_addr, guest=True)
def configure_errorhandlers(app):
"""
Configures the error handlers
"""
@app.errorhandler(403)
def forbidden_page(error):
return render_template("errors/forbidden_page.html"), 403
@app.errorhandler(404)
def page_not_found(error):
return render_template("errors/page_not_found.html"), 404
@app.errorhandler(500)
def server_error_page(error):
return render_template("errors/server_error.html"), 500
def configure_logging(app):
"""
Configures logging.
"""
logs_folder = os.path.join(app.root_path, os.pardir, "logs")
from logging.handlers import SMTPHandler
formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]')
info_log = os.path.join(logs_folder, app.config['INFO_LOG'])
info_file_handler = logging.handlers.RotatingFileHandler(
info_log,
maxBytes=100000,
backupCount=10
)
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(formatter)
app.logger.addHandler(info_file_handler)
error_log = os.path.join(logs_folder, app.config['ERROR_LOG'])
error_file_handler = logging.handlers.RotatingFileHandler(
error_log,
maxBytes=100000,
backupCount=10
)
error_file_handler.setLevel(logging.ERROR)
error_file_handler.setFormatter(formatter)
app.logger.addHandler(error_file_handler)
if app.config["SEND_LOGS"]:
mail_handler = \
SMTPHandler(app.config['MAIL_SERVER'],
app.config['MAIL_DEFAULT_SENDER'],
app.config['ADMINS'],
'application error, no admins specified',
(
app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'],
))
mail_handler.setLevel(logging.ERROR)
mail_handler.setFormatter(formatter)
app.logger.addHandler(mail_handler)
|
{
"content_hash": "d05fdf813b8e3b3d1ffb17b26396f286",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 81,
"avg_line_length": 30.743396226415094,
"alnum_prop": 0.6568061863262551,
"repo_name": "mattcaldwell/flaskbb",
"id": "bf4d604b7596663e0db4168fd450271d31bef310",
"size": "8171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flaskbb/app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from modules.setup.app import app
from modules import helpers
from flask import redirect, request, url_for, flash, session as login_session
import json
import requests
import os
import sys
ROOT_DIR = os.path.dirname(sys.modules['__main__'].__file__)
client_secret_path = os.path.join(
ROOT_DIR, 'client_secrets/gh_client_secrets.json')
@app.route('/ghconnect', methods=['POST', 'GET'])
def ghconnect():
if 'code' in request.args:
url = 'https://github.com/login/oauth/access_token'
payload = {
'client_id': json.loads(open(
client_secret_path, 'r').read())['web']['client_id'],
'client_secret': json.loads(open(
client_secret_path, 'r').read())['web']['client_secret'],
'code': request.args['code'],
'state': login_session['state']
}
headers = {'Accept': 'application/json'}
r = requests.post(url, params=payload, headers=headers)
response = r.json()
if 'access_token' in response:
login_session['access_token'] = response['access_token']
else:
app.logger.error('GitHub didn\'t return an access token')
url = 'https://api.github.com/user?access_token=%s' % login_session['access_token'] # NOQA
r = requests.get(url)
response = r.json()
login_session['provider'] = 'github'
login_session['username'] = response['name']
login_session['picture'] = response['avatar_url']
# Get user's email
url = 'https://api.github.com/user/emails?access_token=%s' % login_session['access_token'] # NOQA
r = requests.get(url)
response = r.json()
login_session['email'] = response[0]['email']
# Create user, it it doesn't already exist
user_id = helpers.getUserID(login_session['email'])
if not user_id:
user_id = helpers.createUser(login_session)
login_session['user_id'] = user_id
flash('You are now logged in as %s (%s)' % (
login_session['username'], login_session['email']))
return redirect(url_for('front'))
return '', 404
|
{
"content_hash": "544325190c9e82be229f7fc4e79445d2",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 106,
"avg_line_length": 41.42307692307692,
"alnum_prop": 0.5947075208913649,
"repo_name": "stonescar/item-catalog",
"id": "c6177b5fc1c3e6ea5a12da14ccf00b73dfffb34d",
"size": "2154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/views/login/github/ghconnect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "484"
},
{
"name": "HTML",
"bytes": "24756"
},
{
"name": "JavaScript",
"bytes": "1293"
},
{
"name": "Python",
"bytes": "29252"
},
{
"name": "Shell",
"bytes": "358"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from taskmaster.models import *
class DependencyTestCase(TestCase):
def setUp(self):
self.group = JobGroup.objects.create(name="pizza maker group")
self.job1 = self.group.new_job(name="tony job")
self.job2 = self.group.new_job(name="steve job")
self.job3 = self.group.new_job(name="charlie job")
def test_adding_self(self):
"""Jobs can't be self-dependent"""
j1, j2, j3 = self.job1, self.job2, self.job3
self.assertRaises(InvalidDependencyException, lambda: j1.add_parent(j1))
self.assertRaises(InvalidDependencyException, lambda: j1.add_child(j1))
j1.add_child(j2)
j2.add_child(j3)
self.assertRaises(InvalidDependencyException, lambda: j3.add_child(j1))
def test_depends_on(self):
"""The depends_on method can follow relationships"""
j1, j2, j3 = self.job1, self.job2, self.job3
self.assertFalse(j2.depends_on(j1), "simple dependency not added yet is ignored")
j1.add_child(j2)
j2.add_child(j3)
self.assertTrue(j2.depends_on(j1), "simple dependency detected")
self.assertFalse(j1.depends_on(j2), "simple reverse-dependency ignored")
self.assertTrue(j3.depends_on(j1), "extended dependency detected")
self.assertFalse(j1.depends_on(j3), "extended reverse-dependency ignored")
self.assertFalse(j1.depends_on(j1), "no self dependency exists")
def test_status_transitions(self):
"""Job statuses are updated properly when adding parents"""
j1, j2, j3 = self.job1, self.job2, self.job3
self.assertEqual(j1.status, Job.READY, "Free-floating job is Ready")
self.assertEqual(j2.status, Job.READY, "Other free-floating job is Ready")
j1.add_child(j2)
self.assertEqual(j1.status, Job.READY, "Parent job is Ready")
self.assertEqual(j2.status, Job.WAITING, "Child job is waiting")
def test_status_refuses_adding_dependencies(self):
"""Depndencies can't be added unless both jobs' statuses are Ready or Waiting"""
j1, j2, j3 = self.job1, self.job2, self.job3
for invalid_status in (Job.RUNNING, Job.RUNNING, Job.COMPLETED, Job.ERRORED, Job.KILLED):
invalid_job = self.group.new_job(name="poor job", status=invalid_status)
self.assertRaises(InvalidDependencyException, lambda: j1.add_child(invalid_job))
self.assertRaises(InvalidDependencyException, lambda: invalid_job.add_child(j1))
|
{
"content_hash": "9ef343c6e9d1abaefa1aff675cdeb6c6",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 91,
"avg_line_length": 38.91525423728814,
"alnum_prop": 0.7395470383275261,
"repo_name": "ChrisCooper/pipeline-nanny",
"id": "f7c794f972d44b78afc2314fa98bed6bcc422765",
"size": "2296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskmaster/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "Python",
"bytes": "10655"
}
],
"symlink_target": ""
}
|
class ActorGraph():
def __init__(self):
self.actors = []
def add(self, obj):
self.actors.append(obj)
|
{
"content_hash": "25c619747c25879d8faf00d9fff55f6c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 31,
"avg_line_length": 19.142857142857142,
"alnum_prop": 0.5074626865671642,
"repo_name": "fos/fos-legacy",
"id": "4dbfde6a6eb137c786afe05beff44f2bf1023a2f",
"size": "136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fos/core/actor_graph.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "7294"
},
{
"name": "Erlang",
"bytes": "2662"
},
{
"name": "Haskell",
"bytes": "1973"
},
{
"name": "JavaScript",
"bytes": "432354"
},
{
"name": "Python",
"bytes": "1231025"
}
],
"symlink_target": ""
}
|
"""Provides the most used functions in a nicely wrapped API.
This module defines a global environment, so that most methods can be
called without the need to specify an environment or a FormulaManager.
Functions trying to access the global environment should use the
method get_global_env(). Keep in mind that the global state of the
environment might lead to inconsistency and unexpected bugs. This is
particularly true for tests. For tests it is recommended to perform an
environment reset in the setUp phase, to be guaranteed that a fresh
environment is used.
"""
# Enable default deprecation warnings!
import warnings
warnings.simplefilter('default')
import pysmt.typing as types
import pysmt.configuration as config
import pysmt.environment
def get_env():
"""Returns the global environment."""
return pysmt.environment.get_env()
def reset_env():
"""Resets the global environment, and returns the new one."""
return pysmt.environment.reset_env()
##### Shortcuts for FormulaManager #####
def get_type(formula):
"""Returns the type of the formula."""
return get_env().stc.get_type(formula)
def simplify(formula):
"""Returns the simplified version of the formula."""
return get_env().simplifier.simplify(formula)
def substitute(formula, subs):
"""Applies the substitutions defined in the dictionary to the formula."""
return get_env().substituter.substitute(formula, subs)
def serialize(formula, threshold=None):
"""Provides a string representing the formula."""
return get_env().serializer.serialize(formula,
threshold=threshold)
def get_free_variables(formula):
"""Returns the simplified version of the formula."""
return get_env().fvo.get_free_variables(formula)
def get_atoms(formula):
"""Returns the set of atoms of the formula."""
return get_env().ao.get_atoms(formula)
def get_formula_size(formula, measure=None):
"""Returns the size of the formula as measured by the given counting type.
See pysmt.oracles.SizeOracle for details.
"""
return get_env().sizeo.get_size(formula, measure)
##### Nodes Creation #####
def ForAll(variables, formula):
r""".. math:: \forall v_1, \cdots, v_n . \varphi(v_1, \cdots, v_n)"""
return get_env().formula_manager.ForAll(variables, formula)
def Exists(variables, formula):
r""".. math:: \exists v_1, \cdots, v_n . \varphi(v_1, \cdots, v_n)"""
return get_env().formula_manager.Exists(variables, formula)
def Function(vname, params):
r""".. math:: vname(p_1, \cdots, p_n)"""
return get_env().formula_manager.Function(vname, params)
def Not(formula):
r""".. math:: \lnot \varphi"""
return get_env().formula_manager.Not(formula)
def Implies(left, right):
r""".. math:: l \rightarrow r"""
return get_env().formula_manager.Implies(left, right)
def Iff(left, right):
r""".. math:: l \leftrightarrow r """
return get_env().formula_manager.Iff(left, right)
def GE(left, right):
r""".. math:: l \ge r"""
return get_env().formula_manager.GE(left, right)
def Minus(left, right):
r""".. math:: l - r """
return get_env().formula_manager.Minus(left, right)
def Times(left, right):
r""".. math:: l * r"""
return get_env().formula_manager.Times(left, right)
def Div(left, right):
r""".. math:: \frac{l}{r}"""
return get_env().formula_manager.Div(left, right)
def Equals(left, right):
r""".. math:: l = r"""
return get_env().formula_manager.Equals(left, right)
def GT(left, right):
r""".. math:: l > r"""
return get_env().formula_manager.GT(left, right)
def LE(left, right):
r""".. math:: l \le r"""
return get_env().formula_manager.LE(left, right)
def LT(left, right):
r""".. math:: l < r"""
return get_env().formula_manager.LT(left, right)
def Ite(iff, left, right):
r""".. math:: \text{ If } i \text{ Then } l \text{ Else } r"""
return get_env().formula_manager.Ite(iff, left, right)
def Symbol(name, typename=types.BOOL):
"""Returns a symbol with the given name and type."""
return get_env().formula_manager.Symbol(name, typename)
def FreshSymbol(typename=types.BOOL, template=None):
"""Returns a symbol with a fresh name and given type."""
return get_env().formula_manager.FreshSymbol(typename, template)
def Int(value):
"""Returns an Integer constant with the given value."""
return get_env().formula_manager.Int(value)
def Bool(value):
"""Returns a Boolean constant with the given value."""
return get_env().formula_manager.Bool(value)
def Real(value):
"""Returns a Real constant with the given value."""
return get_env().formula_manager.Real(value)
def TRUE():
"""Returns the Boolean constant TRUE."""
return get_env().formula_manager.TRUE()
def FALSE():
"""Returns the Boolean constant FALSE."""
return get_env().formula_manager.FALSE()
def And(*args):
r""".. math:: \varphi_0 \land \cdots \land \varphi_n """
return get_env().formula_manager.And(*args)
def Or(*args):
r""".. math:: \varphi_0 \lor \cdots \lor \varphi_n """
return get_env().formula_manager.Or(*args)
def Plus(*args):
r""".. math:: \varphi_0 + \cdots + \varphi_n """
return get_env().formula_manager.Plus(*args)
def ToReal(formula):
"""Explicit cast of a term into a Real term."""
return get_env().formula_manager.ToReal(formula)
def AtMostOne(*args):
"""
Cardinality constraint over a set of boolean expressions.
At most one can be true at anytime.
"""
return get_env().formula_manager.AtMostOne(*args)
def ExactlyOne(*args):
"""Given a set of boolean expressions requires that exactly one holds."""
return get_env().formula_manager.ExactlyOne(*args)
def AllDifferent(*args):
"""Given a set of non-boolean expressions, requires that each of them
has value different from all the others
"""
return get_env().formula_manager.AllDifferent(*args)
def Xor(left, right):
"""Returns the XOR of left and right"""
return get_env().formula_manager.Xor(left, right)
def Min(*args):
"""
Minimum over a set of real or integer terms
"""
return get_env().formula_manager.Min(*args)
def Max(*args):
"""
Maximum over a set of real or integer terms
"""
return get_env().formula_manager.Max(*args)
# Bit Vectors
def BV(value, width=None):
"""Returns a constant of type BitVector.
value can be either:
- a string of 0s and 1s
- a string starting with "#b" followed by a sequence of 0s and 1s
- an integer number s.t. 0 <= value < 2**width
In order to create the BV representation of a signed integer,
the SBV() method shall be used.
"""
return get_env().formula_manager.BV(value, width)
def SBV(value, width=None):
"""Returns a constant of type BitVector interpreting the sign.
If the specified value is an integer, it is converted in the
2-complement representation of the given number, otherwise the
behavior is the same as BV().
"""
return get_env().formula_manager.SBV(value, width)
def BVOne(width=None):
"""Returns the unsigned one constant BitVector."""
return get_env().formula_manager.BVOne(width)
def BVZero(width=None):
"""Returns the zero constant BitVector."""
return get_env().formula_manager.BVZero(width)
def BVNot(formula):
"""Returns the bitvector Not(bv)"""
return get_env().formula_manager.BVNot(formula)
def BVAnd(left, right):
"""Returns the Bit-wise AND of two bitvectors of the same size."""
return get_env().formula_manager.BVAnd(left, right)
def BVOr(left, right):
"""Returns the Bit-wise OR of two bitvectors of the same size."""
return get_env().formula_manager.BVOr(left, right)
def BVXor(left, right):
"""Returns the Bit-wise XOR of two bitvectors of the same size."""
return get_env().formula_manager.BVXor(left, right)
def BVConcat(left, right):
"""Returns the Concatenation of the two BVs"""
return get_env().formula_manager.BVConcat(left, right)
def BVExtract(formula, start=0, end=None):
"""Returns the slice of formula from start to end (inclusive)."""
return get_env().formula_manager.BVExtract(formula, start=start, end=end)
def BVULT(left, right):
"""Returns the formula left < right."""
return get_env().formula_manager.BVULT(left, right)
def BVUGT(left, right):
"""Returns the formula left > right."""
return get_env().formula_manager.BVUGT(left, right)
def BVULE(left, right):
"""Returns the formula left <= right."""
return get_env().formula_manager.BVULE(left, right)
def BVUGE(left, right):
"""Returns the formula left >= right."""
return get_env().formula_manager.BVUGE(left, right)
def BVNeg(formula):
"""Returns the arithmetic negation of the BV."""
return get_env().formula_manager.BVNeg(formula)
def BVAdd(left, right):
"""Returns the sum of two BV."""
return get_env().formula_manager.BVAdd(left, right)
def BVSub(left, right):
"""Returns the difference of two BV."""
return get_env().formula_manager.BVSub(left, right)
def BVMul(left, right):
"""Returns the product of two BV."""
return get_env().formula_manager.BVMul(left, right)
def BVUDiv(left, right):
"""Returns the division of the two BV."""
return get_env().formula_manager.BVUDiv(left, right)
def BVURem(left, right):
"""Returns the reminder of the two BV."""
return get_env().formula_manager.BVURem(left, right)
def BVLShl(left, right):
"""Returns the logical left shift the BV."""
return get_env().formula_manager.BVLShl(left, right)
def BVLShr(left, right):
"""Returns the logical right shift the BV."""
return get_env().formula_manager.BVLShr(left, right)
def BVRol(formula, steps):
"""Returns the LEFT rotation of the BV by the number of steps."""
return get_env().formula_manager.BVRol(formula, steps)
def BVRor(formula, steps):
"""Returns the RIGHT rotation of the BV by the number of steps."""
return get_env().formula_manager.BVRor(formula, steps)
def BVZExt(formula, increase):
"""Returns the extension of the BV
New bits are set to zero.
"""
return get_env().formula_manager.BVZExt(formula, increase)
def BVSExt(formula, increase):
"""Returns the signed extension of the BV
New bits are set according to the most-significant-bit.
"""
return get_env().formula_manager.BVSExt(formula, increase)
def BVSLT(left, right):
"""Returns the SIGNED LOWER-THAN comparison for BV."""
return get_env().formula_manager.BVSLT(left, right)
def BVSLE(left, right):
"""Returns the SIGNED LOWER-THAN-OR-EQUAL-TO comparison for BV."""
return get_env().formula_manager.BVSLE(left, right)
def BVSGT(left, right):
"""Returns the SIGNED GREATER-THAN comparison for BV."""
return get_env().formula_manager.BVSGT(left, right)
def BVSGE(left, right):
"""Returns the SIGNED GREATER-THAN-OR-EQUAL-TO comparison for BV."""
return get_env().formula_manager.BVSGE(left, right)
def BVSDiv(left, right):
"""Returns the SIGNED DIVISION of left by right"""
return get_env().formula_manager.BVSDiv(left, right)
def BVSRem(left, right):
"""Returns the SIGNED REMAINDER of left divided by right"""
return get_env().formula_manager.BVSRem(left, right)
def BVComp(left, right):
"""Returns a BV of size 1 equal to 0 if left is equal to right,
otherwise 1 is returned."""
return get_env().formula_manager.BVComp(left, right)
def BVAShr(left, right):
"""Returns the RIGHT arithmetic rotation of the left BV by the number
of steps specified by the right BV."""
return get_env().formula_manager.BVAShr(left, right)
#### Shortcuts for Solvers Factory #####
def Solver(quantified=False, name=None, logic=None):
"""Returns a solver."""
return get_env().factory.Solver(quantified=quantified,
name=name,
logic=logic)
def UnsatCoreSolver(quantified=False, name=None, logic=None,
unsat_cores_mode="all"):
"""Returns a solver supporting unsat core extraction."""
return get_env().factory.UnsatCoreSolver(quantified=quantified,
name=name,
logic=logic,
unsat_cores_mode=unsat_cores_mode)
def QuantifierEliminator(name=None, logic=None):
"""Returns a quantifier eliminator"""
return get_env().factory.QuantifierEliminator(name=name, logic=logic)
def Interpolator(name=None, logic=None):
"""Returns an interpolator"""
return get_env().factory.Interpolator(name=name, logic=logic)
def is_sat(formula, solver_name=None, logic=None):
""" Returns whether a formula is satisfiable.
:param formula: The formula to check satisfiability
:type formula: FNode
:param solver_name: Specify the name of the solver to be used.
:param logic: Specify the logic that is going to be used.
:returns: Whether the formula is SAT or UNSAT.
:rtype: bool
"""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_sat")
formula = env.formula_manager.normalize(formula)
return env.factory.is_sat(formula,
solver_name=solver_name,
logic=logic)
def get_model(formula, solver_name=None, logic=None):
""" Similar to :py:func:`is_sat` but returns a model if the formula is
satisfiable, otherwise None."""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during get_model")
formula = env.formula_manager.normalize(formula)
return env.factory.get_model(formula,
solver_name=solver_name,
logic=logic)
def get_implicant(formula, solver_name=None, logic=None):
"""Returns a formula f_i such that Implies(f_i, formula) is valid or None
if formula is unsatisfiable.
if complete is set to true, all the variables appearing in the
formula are forced to appear in f_i.
"""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during get_model")
formula = env.formula_manager.normalize(formula)
return env.factory.get_implicant(formula,
solver_name=solver_name,
logic=logic)
def get_unsat_core(clauses, solver_name=None, logic=None):
"""Similar to :py:func:`get_model` but returns the unsat core of the
conjunction of the input clauses"""
env = get_env()
if any(c not in env.formula_manager for c in clauses):
warnings.warn("Warning: Contextualizing formula during get_model")
clauses = [env.formula_manager.normalize(c) for c in clauses]
return env.factory.get_unsat_core(clauses,
solver_name=solver_name,
logic=logic)
def is_valid(formula, solver_name=None, logic=None):
"""Similar to :py:func:`is_sat` but checks validity."""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_valid")
formula = env.formula_manager.normalize(formula)
return env.factory.is_valid(formula,
solver_name=solver_name,
logic=logic)
def is_unsat(formula, solver_name=None, logic=None):
"""Similar to :py:func:`is_sat` but checks unsatisfiability."""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_unsat")
formula = env.formula_manager.normalize(formula)
return env.factory.is_unsat(formula,
solver_name=solver_name,
logic=logic)
def qelim(formula, solver_name=None, logic=None):
"""Performs quantifier elimination of the given formula."""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_unsat")
formula = env.formula_manager.normalize(formula)
return env.factory.qelim(formula,
solver_name=solver_name,
logic=logic)
def binary_interpolant(formula_a, formula_b, solver_name=None, logic=None):
"""Computes an interpolant of (formula_a, formula_b). Returns None
if the conjunction is satisfiable"""
env = get_env()
formulas = [formula_a, formula_b]
for i, f in enumerate(formulas):
if f not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during "
"binary_interpolant")
formulas[i] = env.formula_manager.normalize(f)
return env.factory.binary_interpolant(formulas[0], formulas[1],
solver_name=solver_name,
logic=logic)
def sequence_interpolant(formulas, solver_name=None, logic=None):
"""Computes a sequence interpolant of the formulas. Returns None
if the conjunction is satisfiable"""
env = get_env()
formulas = list(formulas)
for i, f in enumerate(formulas):
if f not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during "
"sequence_interpolant")
formulas[i] = env.formula_manager.normalize(f)
return env.factory.sequence_interpolant(formulas,
solver_name=solver_name,
logic=logic)
def read_configuration(config_filename, environment=None):
"""
Reads the pysmt configuration of the given file path and applies
it on the specified environment. If no environment is specified,
the top-level environment will be used.
"""
if environment is None:
environment = get_env()
config.configure_environment(config_filename, environment)
def write_configuration(config_filename, environment=None):
"""
Dumps the current pysmt configuration to the specified file path
"""
if environment is None:
environment = get_env()
config.write_environment_configuration(config_filename, environment)
|
{
"content_hash": "ebf662130cd42222a7d72c8ca83ac9ce",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 79,
"avg_line_length": 35.37595419847328,
"alnum_prop": 0.6528564492636348,
"repo_name": "idkwim/pysmt",
"id": "a9a2f5bda7cf394d0fdc8cb2df353163a9661f2f",
"size": "19187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysmt/shortcuts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "PowerShell",
"bytes": "5987"
},
{
"name": "Python",
"bytes": "751201"
},
{
"name": "SMT",
"bytes": "4905619"
},
{
"name": "Shell",
"bytes": "4307"
}
],
"symlink_target": ""
}
|
"""Multi-layer Perceptron
"""
# Authors: Issam H. Laradji <issam.laradji@gmail.com>
# Andreas Mueller
# Jiyuan Qian
# License: BSD 3 clause
import numpy as np
from abc import ABCMeta, abstractmethod
from scipy.optimize import fmin_l_bfgs_b
import warnings
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ._base import ACTIVATIONS, DERIVATIVES, LOSS_FUNCTIONS
from ._stochastic_optimizers import SGDOptimizer, AdamOptimizer
from ..model_selection import train_test_split
from ..externals import six
from ..preprocessing import LabelBinarizer
from ..utils import gen_batches, check_random_state
from ..utils import shuffle
from ..utils import check_array, check_X_y, column_or_1d
from ..exceptions import ConvergenceWarning
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.multiclass import _check_partial_fit_first_call, unique_labels
from ..utils.multiclass import type_of_target
_STOCHASTIC_SOLVERS = ['sgd', 'adam']
def _pack(coefs_, intercepts_):
"""Pack the parameters into a single vector."""
return np.hstack([l.ravel() for l in coefs_ + intercepts_])
class BaseMultilayerPerceptron(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for MLP classification and regression.
Warning: This class should not be used directly.
Use derived classes instead.
.. versionadded:: 0.18
"""
@abstractmethod
def __init__(self, hidden_layer_sizes, activation, solver,
alpha, batch_size, learning_rate, learning_rate_init, power_t,
max_iter, loss, shuffle, random_state, tol, verbose,
warm_start, momentum, nesterovs_momentum, early_stopping,
validation_fraction, beta_1, beta_2, epsilon):
self.activation = activation
self.solver = solver
self.alpha = alpha
self.batch_size = batch_size
self.learning_rate = learning_rate
self.learning_rate_init = learning_rate_init
self.power_t = power_t
self.max_iter = max_iter
self.loss = loss
self.hidden_layer_sizes = hidden_layer_sizes
self.shuffle = shuffle
self.random_state = random_state
self.tol = tol
self.verbose = verbose
self.warm_start = warm_start
self.momentum = momentum
self.nesterovs_momentum = nesterovs_momentum
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def _unpack(self, packed_parameters):
"""Extract the coefficients and intercepts from packed_parameters."""
for i in range(self.n_layers_ - 1):
start, end, shape = self._coef_indptr[i]
self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)
start, end = self._intercept_indptr[i]
self.intercepts_[i] = packed_parameters[start:end]
def _forward_pass(self, activations):
"""Perform a forward pass on the network by computing the values
of the neurons in the hidden layers and the output layer.
Parameters
----------
activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
with_output_activation : bool, default True
If True, the output passes through the output activation
function, which is either the softmax function or the
logistic function
"""
hidden_activation = ACTIVATIONS[self.activation]
# Iterate over the hidden layers
for i in range(self.n_layers_ - 1):
activations[i + 1] = safe_sparse_dot(activations[i],
self.coefs_[i])
activations[i + 1] += self.intercepts_[i]
# For the hidden layers
if (i + 1) != (self.n_layers_ - 1):
activations[i + 1] = hidden_activation(activations[i + 1])
# For the last layer
output_activation = ACTIVATIONS[self.out_activation_]
activations[i + 1] = output_activation(activations[i + 1])
return activations
def _compute_loss_grad(self, layer, n_samples, activations, deltas,
coef_grads, intercept_grads):
"""Compute the gradient of loss with respect to coefs and intercept for
specified layer.
This function does backpropagation for the specified one layer.
"""
coef_grads[layer] = safe_sparse_dot(activations[layer].T,
deltas[layer])
coef_grads[layer] += (self.alpha * self.coefs_[layer])
coef_grads[layer] /= n_samples
intercept_grads[layer] = np.mean(deltas[layer], 0)
return coef_grads, intercept_grads
def _loss_grad_lbfgs(self, packed_coef_inter, X, y, activations, deltas,
coef_grads, intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to the different parameters given in the initialization.
Returned gradients are packed in a single vector so it can be used
in lbfgs
Parameters
----------
packed_parameters : array-like
A vector comprising the flattened coefficients and intercepts.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grad : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
grad : array-like, shape (number of nodes of all layers,)
"""
self._unpack(packed_coef_inter)
loss, coef_grads, intercept_grads = self._backprop(
X, y, activations, deltas, coef_grads, intercept_grads)
self.n_iter_ += 1
grad = _pack(coef_grads, intercept_grads)
return loss, grad
def _backprop(self, X, y, activations, deltas, coef_grads,
intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to each parameter: weights and bias vectors.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grad : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
coef_grads : list, length = n_layers - 1
intercept_grads : list, length = n_layers - 1
"""
n_samples = X.shape[0]
# Forward propagate
activations = self._forward_pass(activations)
# Get loss
loss_func_name = self.loss
if loss_func_name == 'log_loss' and self.out_activation_ == 'logistic':
loss_func_name = 'binary_log_loss'
loss = LOSS_FUNCTIONS[loss_func_name](y, activations[-1])
# Add L2 regularization term to loss
values = np.sum(
np.array([np.dot(s.ravel(), s.ravel()) for s in self.coefs_]))
loss += (0.5 * self.alpha) * values / n_samples
# Backward propagate
last = self.n_layers_ - 2
# The calculation of delta[last] here works with following
# combinations of output activation and loss function:
# sigmoid and binary cross entropy, softmax and categorical cross
# entropy, and identity with squared loss
deltas[last] = activations[-1] - y
# Compute gradient for the last layer
coef_grads, intercept_grads = self._compute_loss_grad(
last, n_samples, activations, deltas, coef_grads, intercept_grads)
# Iterate over the hidden layers
for i in range(self.n_layers_ - 2, 0, -1):
deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T)
inplace_derivative = DERIVATIVES[self.activation]
inplace_derivative(activations[i], deltas[i - 1])
coef_grads, intercept_grads = self._compute_loss_grad(
i - 1, n_samples, activations, deltas, coef_grads,
intercept_grads)
return loss, coef_grads, intercept_grads
def _initialize(self, y, layer_units):
# set all attributes, allocate weights etc for first call
# Initialize parameters
self.n_iter_ = 0
self.t_ = 0
self.n_outputs_ = y.shape[1]
# Compute the number of layers
self.n_layers_ = len(layer_units)
# Output for regression
if not isinstance(self, ClassifierMixin):
self.out_activation_ = 'identity'
# Output for multi class
elif self._label_binarizer.y_type_ == 'multiclass':
self.out_activation_ = 'softmax'
# Output for binary class and multi-label
else:
self.out_activation_ = 'logistic'
# Initialize coefficient and intercept layers
self.coefs_ = []
self.intercepts_ = []
for i in range(self.n_layers_ - 1):
coef_init, intercept_init = self._init_coef(layer_units[i],
layer_units[i + 1])
self.coefs_.append(coef_init)
self.intercepts_.append(intercept_init)
if self.solver in _STOCHASTIC_SOLVERS:
self.loss_curve_ = []
self._no_improvement_count = 0
if self.early_stopping:
self.validation_scores_ = []
self.best_validation_score_ = -np.inf
else:
self.best_loss_ = np.inf
def _init_coef(self, fan_in, fan_out):
if self.activation == 'logistic':
# Use the initialization method recommended by
# Glorot et al.
init_bound = np.sqrt(2. / (fan_in + fan_out))
elif self.activation in ('identity', 'tanh', 'relu'):
init_bound = np.sqrt(6. / (fan_in + fan_out))
else:
# this was caught earlier, just to make sure
raise ValueError("Unknown activation function %s" %
self.activation)
coef_init = self._random_state.uniform(-init_bound, init_bound,
(fan_in, fan_out))
intercept_init = self._random_state.uniform(-init_bound, init_bound,
fan_out)
return coef_init, intercept_init
def _fit(self, X, y, incremental=False):
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
# Validate input parameters.
self._validate_hyperparameters()
if np.any(np.array(hidden_layer_sizes) <= 0):
raise ValueError("hidden_layer_sizes must be > 0, got %s." %
hidden_layer_sizes)
X, y = self._validate_input(X, y, incremental)
n_samples, n_features = X.shape
# Ensure y is 2D
if y.ndim == 1:
y = y.reshape((-1, 1))
self.n_outputs_ = y.shape[1]
layer_units = ([n_features] + hidden_layer_sizes +
[self.n_outputs_])
# check random state
self._random_state = check_random_state(self.random_state)
if not hasattr(self, 'coefs_') or (not self.warm_start and not
incremental):
# First time training the model
self._initialize(y, layer_units)
# lbfgs does not support mini-batches
if self.solver == 'lbfgs':
batch_size = n_samples
elif self.batch_size == 'auto':
batch_size = min(200, n_samples)
else:
if self.batch_size < 1 or self.batch_size > n_samples:
warnings.warn("Got `batch_size` less than 1 or larger than "
"sample size. It is going to be clipped")
batch_size = np.clip(self.batch_size, 1, n_samples)
# Initialize lists
activations = [X]
activations.extend(np.empty((batch_size, n_fan_out))
for n_fan_out in layer_units[1:])
deltas = [np.empty_like(a_layer) for a_layer in activations]
coef_grads = [np.empty((n_fan_in_, n_fan_out_)) for n_fan_in_,
n_fan_out_ in zip(layer_units[:-1],
layer_units[1:])]
intercept_grads = [np.empty(n_fan_out_) for n_fan_out_ in
layer_units[1:]]
# Run the Stochastic optimization solver
if self.solver in _STOCHASTIC_SOLVERS:
self._fit_stochastic(X, y, activations, deltas, coef_grads,
intercept_grads, layer_units, incremental)
# Run the LBFGS solver
elif self.solver == 'lbfgs':
self._fit_lbfgs(X, y, activations, deltas, coef_grads,
intercept_grads, layer_units)
return self
def _validate_hyperparameters(self):
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False, got %s." %
self.shuffle)
if self.max_iter <= 0:
raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0, got %s." % self.alpha)
if (self.learning_rate in ["constant", "invscaling", "adaptive"] and
self.learning_rate_init <= 0.0):
raise ValueError("learning_rate_init must be > 0, got %s." %
self.learning_rate)
if self.momentum > 1 or self.momentum < 0:
raise ValueError("momentum must be >= 0 and <= 1, got %s" %
self.momentum)
if not isinstance(self.nesterovs_momentum, bool):
raise ValueError("nesterovs_momentum must be either True or False,"
" got %s." % self.nesterovs_momentum)
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False,"
" got %s." % self.early_stopping)
if self.validation_fraction < 0 or self.validation_fraction >= 1:
raise ValueError("validation_fraction must be >= 0 and < 1, "
"got %s" % self.validation_fraction)
if self.beta_1 < 0 or self.beta_1 >= 1:
raise ValueError("beta_1 must be >= 0 and < 1, got %s" %
self.beta_1)
if self.beta_2 < 0 or self.beta_2 >= 1:
raise ValueError("beta_2 must be >= 0 and < 1, got %s" %
self.beta_2)
if self.epsilon <= 0.0:
raise ValueError("epsilon must be > 0, got %s." % self.epsilon)
# raise ValueError if not registered
supported_activations = ('identity', 'logistic', 'tanh', 'relu')
if self.activation not in supported_activations:
raise ValueError("The activation '%s' is not supported. Supported "
"activations are %s." % (self.activation,
supported_activations))
if self.learning_rate not in ["constant", "invscaling", "adaptive"]:
raise ValueError("learning rate %s is not supported. " %
self.learning_rate)
supported_solvers = _STOCHASTIC_SOLVERS + ["lbfgs"]
if self.solver not in supported_solvers:
raise ValueError("The solver %s is not supported. "
" Expected one of: %s" %
(self.solver, ", ".join(supported_solvers)))
def _fit_lbfgs(self, X, y, activations, deltas, coef_grads,
intercept_grads, layer_units):
# Store meta information for the parameters
self._coef_indptr = []
self._intercept_indptr = []
start = 0
# Save sizes and indices of coefficients for faster unpacking
for i in range(self.n_layers_ - 1):
n_fan_in, n_fan_out = layer_units[i], layer_units[i + 1]
end = start + (n_fan_in * n_fan_out)
self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
start = end
# Save sizes and indices of intercepts for faster unpacking
for i in range(self.n_layers_ - 1):
end = start + layer_units[i + 1]
self._intercept_indptr.append((start, end))
start = end
# Run LBFGS
packed_coef_inter = _pack(self.coefs_,
self.intercepts_)
if self.verbose is True or self.verbose >= 1:
iprint = 1
else:
iprint = -1
optimal_parameters, self.loss_, d = fmin_l_bfgs_b(
x0=packed_coef_inter,
func=self._loss_grad_lbfgs,
maxfun=self.max_iter,
iprint=iprint,
pgtol=self.tol,
args=(X, y, activations, deltas, coef_grads, intercept_grads))
self._unpack(optimal_parameters)
def _fit_stochastic(self, X, y, activations, deltas, coef_grads,
intercept_grads, layer_units, incremental):
if not incremental or not hasattr(self, '_optimizer'):
params = self.coefs_ + self.intercepts_
if self.solver == 'sgd':
self._optimizer = SGDOptimizer(
params, self.learning_rate_init, self.learning_rate,
self.momentum, self.nesterovs_momentum, self.power_t)
elif self.solver == 'adam':
self._optimizer = AdamOptimizer(
params, self.learning_rate_init, self.beta_1, self.beta_2,
self.epsilon)
# early_stopping in partial_fit doesn't make sense
early_stopping = self.early_stopping and not incremental
if early_stopping:
X, X_val, y, y_val = train_test_split(
X, y, random_state=self._random_state,
test_size=self.validation_fraction)
if isinstance(self, ClassifierMixin):
y_val = self._label_binarizer.inverse_transform(y_val)
else:
X_val = None
y_val = None
n_samples = X.shape[0]
if self.batch_size == 'auto':
batch_size = min(200, n_samples)
else:
batch_size = np.clip(self.batch_size, 1, n_samples)
try:
for it in range(self.max_iter):
X, y = shuffle(X, y, random_state=self._random_state)
accumulated_loss = 0.0
for batch_slice in gen_batches(n_samples, batch_size):
activations[0] = X[batch_slice]
batch_loss, coef_grads, intercept_grads = self._backprop(
X[batch_slice], y[batch_slice], activations, deltas,
coef_grads, intercept_grads)
accumulated_loss += batch_loss * (batch_slice.stop -
batch_slice.start)
# update weights
grads = coef_grads + intercept_grads
self._optimizer.update_params(grads)
self.n_iter_ += 1
self.loss_ = accumulated_loss / X.shape[0]
self.t_ += n_samples
self.loss_curve_.append(self.loss_)
if self.verbose:
print("Iteration %d, loss = %.8f" % (self.n_iter_,
self.loss_))
# update no_improvement_count based on training loss or
# validation score according to early_stopping
self._update_no_improvement_count(early_stopping, X_val, y_val)
# for learning rate that needs to be updated at iteration end
self._optimizer.iteration_ends(self.t_)
if self._no_improvement_count > 2:
# not better than last two iterations by tol.
# stop or decrease learning rate
if early_stopping:
msg = ("Validation score did not improve more than "
"tol=%f for two consecutive epochs." % self.tol)
else:
msg = ("Training loss did not improve more than tol=%f"
" for two consecutive epochs." % self.tol)
is_stopping = self._optimizer.trigger_stopping(
msg, self.verbose)
if is_stopping:
break
else:
self._no_improvement_count = 0
if incremental:
break
if self.n_iter_ == self.max_iter:
warnings.warn('Stochastic Optimizer: Maximum iterations'
' reached and the optimization hasn\'t '
'converged yet.'
% (), ConvergenceWarning)
except KeyboardInterrupt:
pass
if early_stopping:
# restore best weights
self.coefs_ = self._best_coefs
self.intercepts_ = self._best_intercepts
def _update_no_improvement_count(self, early_stopping, X_val, y_val):
if early_stopping:
# compute validation score, use that for stopping
self.validation_scores_.append(self.score(X_val, y_val))
if self.verbose:
print("Validation score: %f" % self.validation_scores_[-1])
# update best parameters
# use validation_scores_, not loss_curve_
# let's hope no-one overloads .score with mse
last_valid_score = self.validation_scores_[-1]
if last_valid_score < (self.best_validation_score_ +
self.tol):
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if last_valid_score > self.best_validation_score_:
self.best_validation_score_ = last_valid_score
self._best_coefs = [c.copy() for c in self.coefs_]
self._best_intercepts = [i.copy()
for i in self.intercepts_]
else:
if self.loss_curve_[-1] > self.best_loss_ - self.tol:
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if self.loss_curve_[-1] < self.best_loss_:
self.best_loss_ = self.loss_curve_[-1]
def fit(self, X, y):
"""Fit the model to data matrix X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
self : returns a trained MLP model.
"""
return self._fit(X, y, incremental=False)
@property
def partial_fit(self):
"""Fit the model to data matrix X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
self : returns a trained MLP model.
"""
if self.solver not in _STOCHASTIC_SOLVERS:
raise AttributeError("partial_fit is only available for stochastic"
" optimizers. %s is not stochastic."
% self.solver)
return self._partial_fit
def _partial_fit(self, X, y, classes=None):
return self._fit(X, y, incremental=True)
def _predict(self, X):
"""Predict using the trained model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y_pred : array-like, shape (n_samples,) or (n_samples, n_outputs)
The decision function of the samples for each class in the model.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
layer_units = [X.shape[1]] + hidden_layer_sizes + \
[self.n_outputs_]
# Initialize layers
activations = [X]
for i in range(self.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
# forward propagate
self._forward_pass(activations)
y_pred = activations[-1]
return y_pred
class MLPClassifier(BaseMultilayerPerceptron, ClassifierMixin):
"""Multi-layer Perceptron classifier.
This model optimizes the log-loss function using LBFGS or stochastic
gradient descent.
.. versionadded:: 0.18
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default (100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default 'relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default 'adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed
by Kingma, Diederik, and Jimmy Ba
Note: The default solver 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform
better.
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
batch_size : int, optional, default 'auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default 'constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by
'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate ``learning_rate_``
at each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when ``solver='sgd'``.
max_iter : int, optional, default 200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations.
random_state : int or RandomState, optional, default None
State or seed for random number generator.
shuffle : bool, optional, default True
Whether to shuffle samples in each iteration. Only used when
solver='sgd' or 'adam'.
tol : float, optional, default 1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least tol for two consecutive iterations, unless `learning_rate`
is set to 'adaptive', convergence is considered to be reached and
training stops.
learning_rate_init : double, optional, default 0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : double, optional, default 0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when solver='sgd'.
verbose : bool, optional, default False
Whether to print progress messages to stdout.
warm_start : bool, optional, default False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution.
momentum : float, default 0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when solver='sgd'.
nesterovs_momentum : boolean, default True
Whether to use Nesterov's momentum. Only used when solver='sgd' and
momentum > 0.
early_stopping : bool, default False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least tol for two consecutive
epochs.
Only effective when solver='sgd' or 'adam'
validation_fraction : float, optional, default 0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, optional, default 0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when solver='adam'
beta_2 : float, optional, default 0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when solver='adam'
epsilon : float, optional, default 1e-8
Value for numerical stability in adam. Only used when solver='adam'
Attributes
----------
`classes_` : array or list of array of shape (n_classes,)
Class labels for each output.
`loss_` : float
The current loss computed with the loss function.
`coefs_` : list, length n_layers - 1
The ith element in the list represents the weight matrix corresponding
to layer i.
`intercepts_` : list, length n_layers - 1
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_iter_ : int,
The number of iterations the solver has ran.
n_layers_ : int
Number of layers.
`n_outputs_` : int
Number of outputs.
`out_activation_` : string
Name of the output activation function.
Notes
-----
MLPClassifier trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
"""
def __init__(self, hidden_layer_sizes=(100,), activation="relu",
solver='adam', alpha=0.0001,
batch_size='auto', learning_rate="constant",
learning_rate_init=0.001, power_t=0.5, max_iter=200,
shuffle=True, random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8):
sup = super(MLPClassifier, self)
sup.__init__(hidden_layer_sizes=hidden_layer_sizes,
activation=activation, solver=solver, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, loss='log_loss', shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
def _validate_input(self, X, y, incremental):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
if not incremental:
self._label_binarizer = LabelBinarizer()
self._label_binarizer.fit(y)
self.classes_ = self._label_binarizer.classes_
else:
classes = unique_labels(y)
if np.setdiff1d(classes, self.classes_, assume_unique=True):
raise ValueError("`y` has classes not in `self.classes_`."
" `self.classes_` has %s. 'y' has %s." %
(self.classes_, classes))
y = self._label_binarizer.transform(y)
return X, y
def predict(self, X):
"""Predict using the multi-layer perceptron classifier
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y : array-like, shape (n_samples,) or (n_samples, n_classes)
The predicted classes.
"""
check_is_fitted(self, "coefs_")
y_pred = self._predict(X)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
return self._label_binarizer.inverse_transform(y_pred)
@property
def partial_fit(self):
"""Fit the model to data matrix X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
classes : array, shape (n_classes)
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns a trained MLP model.
"""
if self.solver not in _STOCHASTIC_SOLVERS:
raise AttributeError("partial_fit is only available for stochastic"
" optimizer. %s is not stochastic"
% self.solver)
return self._partial_fit
def _partial_fit(self, X, y, classes=None):
if _check_partial_fit_first_call(self, classes):
self._label_binarizer = LabelBinarizer()
if type_of_target(y).startswith('multilabel'):
self._label_binarizer.fit(y)
else:
self._label_binarizer.fit(classes)
super(MLPClassifier, self)._partial_fit(X, y)
return self
def predict_log_proba(self, X):
"""Return the log of probability estimates.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input data.
Returns
-------
log_y_prob : array-like, shape (n_samples, n_classes)
The predicted log-probability of the sample for each class
in the model, where classes are ordered as they are in
`self.classes_`. Equivalent to log(predict_proba(X))
"""
y_prob = self.predict_proba(X)
return np.log(y_prob, out=y_prob)
def predict_proba(self, X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : array-like, shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self, "coefs_")
y_pred = self._predict(X)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
if y_pred.ndim == 1:
return np.vstack([1 - y_pred, y_pred]).T
else:
return y_pred
class MLPRegressor(BaseMultilayerPerceptron, RegressorMixin):
"""Multi-layer Perceptron regressor.
This model optimizes the squared-loss using LBFGS or stochastic gradient
descent.
.. versionadded:: 0.18
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default (100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default 'relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default 'adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed by
Kingma, Diederik, and Jimmy Ba
Note: The default solver 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform
better.
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
batch_size : int, optional, default 'auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default 'constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by
'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate ``learning_rate_``
at each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when solver='sgd'.
max_iter : int, optional, default 200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations.
random_state : int or RandomState, optional, default None
State or seed for random number generator.
shuffle : bool, optional, default True
Whether to shuffle samples in each iteration. Only used when
solver='sgd' or 'adam'.
tol : float, optional, default 1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least tol for two consecutive iterations, unless `learning_rate`
is set to 'adaptive', convergence is considered to be reached and
training stops.
learning_rate_init : double, optional, default 0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : double, optional, default 0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when solver='sgd'.
verbose : bool, optional, default False
Whether to print progress messages to stdout.
warm_start : bool, optional, default False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution.
momentum : float, default 0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when solver='sgd'.
nesterovs_momentum : boolean, default True
Whether to use Nesterov's momentum. Only used when solver='sgd' and
momentum > 0.
early_stopping : bool, default False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least tol for two consecutive
epochs.
Only effective when solver='sgd' or 'adam'
validation_fraction : float, optional, default 0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, optional, default 0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when solver='adam'
beta_2 : float, optional, default 0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when solver='adam'
epsilon : float, optional, default 1e-8
Value for numerical stability in adam. Only used when solver='adam'
Attributes
----------
`loss_` : float
The current loss computed with the loss function.
`coefs_` : list, length n_layers - 1
The ith element in the list represents the weight matrix corresponding
to layer i.
`intercepts_` : list, length n_layers - 1
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_iter_ : int,
The number of iterations the solver has ran.
n_layers_ : int
Number of layers.
`n_outputs_` : int
Number of outputs.
`out_activation_` : string
Name of the output activation function.
Notes
-----
MLPRegressor trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense and sparse numpy
arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
"""
def __init__(self, hidden_layer_sizes=(100,), activation="relu",
solver='adam', alpha=0.0001,
batch_size='auto', learning_rate="constant",
learning_rate_init=0.001,
power_t=0.5, max_iter=200, shuffle=True,
random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8):
sup = super(MLPRegressor, self)
sup.__init__(hidden_layer_sizes=hidden_layer_sizes,
activation=activation, solver=solver, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, loss='squared_loss', shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
def predict(self, X):
"""Predict using the multi-layer perceptron model.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y : array-like, shape (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self, "coefs_")
y_pred = self._predict(X)
if y_pred.shape[1] == 1:
return y_pred.ravel()
return y_pred
def _validate_input(self, X, y, incremental):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True, y_numeric=True)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
return X, y
|
{
"content_hash": "b777e7233ce2e488453c7f338a76a3a1",
"timestamp": "",
"source": "github",
"line_count": 1267,
"max_line_length": 79,
"avg_line_length": 39.431728492501975,
"alnum_prop": 0.5855884707766214,
"repo_name": "giorgiop/scikit-learn",
"id": "9f1d1320d196e2fd3abe307940b1bad5ddec1d12",
"size": "49960",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sklearn/neural_network/multilayer_perceptron.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "416843"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1630"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6856728"
},
{
"name": "Shell",
"bytes": "15594"
}
],
"symlink_target": ""
}
|
from players.player_reinforce_1 import *
# PLAYER REINFORCE
class player_reinforce_1A( player_reinforce_1 ):
NUM_FRAMES = 1
LEARNING_RATE = 1e-4
REWARD_DISCOUNT = 0.99
### __INIT__
def __init__( self ):
player_reinforce_1.__init__( self )
# PROCESS OBSERVATION
def process(self, obsv):
return np.stack( tuple( self.obsv_list[i] for i in range( self.NUM_FRAMES ) ), axis = 1 )
# PREPARE NETWORK
def network( self ):
# Input Placeholder
self.brain.addInput( shape = [ None, self.obsv_shape[0], self.NUM_FRAMES ],
name = 'Observation' )
# Fully Connected Layers
self.brain.setLayerDefaults( type = tb.layers.fully ,
activation = tb.activs.relu )
self.brain.addLayer( out_channels = 64 ,
input = 'Observation' )
self.brain.addLayer( out_channels = 64 )
self.brain.addLayer( out_channels = self.num_actions,
activation = tb.activs.softmax,
name = 'Output' )
|
{
"content_hash": "5112d5fcf5a53bab9157e7225e940f1b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 97,
"avg_line_length": 27.642857142857142,
"alnum_prop": 0.5271317829457365,
"repo_name": "NiloFreitas/Deep-Reinforcement-Learning",
"id": "a56e9a5bdd9400c6ce042b835142d1f0e0a9b27d",
"size": "1161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reinforcement/players/player_reinforce_1A.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "181725"
},
{
"name": "Python",
"bytes": "254224"
},
{
"name": "Shell",
"bytes": "530"
}
],
"symlink_target": ""
}
|
'''
Written by Thilina Rajapakse
https://github.com/ThilinaRajapakse/BERT_binary_text_classification/blob/master/converter.py
'''
from __future__ import absolute_import, division, print_function
import csv
import os
import sys
import logging
logger = logging.getLogger()
csv.field_size_limit(2147483647) # Increase CSV reader's field limit incase we have long text.
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class BinaryClassificationProcessor(DataProcessor):
"""Processor for binary classification dataset."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self, output_mode):
"""See base class.
Note that I (TU) have edited this to permit
regression."""
if output_mode == 'regression':
return [None]
else:
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
|
{
"content_hash": "a036b3b36cdafebff45df5eb6876b7e4",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 94,
"avg_line_length": 34.649484536082475,
"alnum_prop": 0.6084498661112764,
"repo_name": "tedunderwood/fiction",
"id": "cd75bce19b52b72df4e01602c80ce9ef9f5243fa",
"size": "3361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bert/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "202591"
},
{
"name": "Python",
"bytes": "436754"
},
{
"name": "R",
"bytes": "20921"
},
{
"name": "Rebol",
"bytes": "263"
},
{
"name": "Shell",
"bytes": "1127"
}
],
"symlink_target": ""
}
|
from devito.data import FULL
__all__ = ['make_clause_reduction']
def make_clause_reduction(symbols):
args = []
for i in symbols:
if i.is_Indexed:
f = i.function
bounds = []
for k, d in zip(i.indices, f.dimensions):
if k.is_Number:
bounds.append('[%s]' % k)
else:
# Languages such as OpenMP and OpenACC expect a range
# as input to a reduction clause, such as
# `reduction(+:f[0:f_vec->size[1]])`
bounds.append('[0:%s]' % f._C_get_field(FULL, d).size)
args.append('%s%s' % (i.name, ''.join(bounds)))
else:
args.append(str(i))
return 'reduction(+:%s)' % ','.join(args)
|
{
"content_hash": "94d6a228085b348781bd5366dd7c658b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 34.34782608695652,
"alnum_prop": 0.47341772151898737,
"repo_name": "opesci/devito",
"id": "a986f2255e75fe2362ec99f531dba7a4d190675f",
"size": "790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devito/passes/iet/languages/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "812"
},
{
"name": "Python",
"bytes": "1683413"
},
{
"name": "Shell",
"bytes": "3900"
}
],
"symlink_target": ""
}
|
from livestreamer.compat import str, bytes, parse_qs
from livestreamer.plugins import Plugin, PluginError, NoStreamsError
from livestreamer.stream import HTTPStream, StreamType
from livestreamer.utils import urlget, verifyjson
import re
import json
class Youtube(Plugin):
@classmethod
def can_handle_url(self, url):
return "youtube.com" in url
def _get_stream_info(self, url):
res = urlget(url)
data = res.text
config = None
match = re.search("'PLAYER_CONFIG': (.+)\n.+}\);", data)
if match:
config = match.group(1)
match = re.search("yt.playerConfig = (.+)\;\n", data)
if match:
config = match.group(1)
if config:
try:
parsed = json.loads(config)
except ValueError as err:
raise PluginError(("Unable to parse config JSON: {0})").format(err))
return parsed
def _parse_stream_map(self, streammap):
streams = []
for stream_qs in streammap.split(","):
stream = parse_qs(stream_qs)
streams.append(stream)
return streams
def _parse_format_map(self, formatsmap):
formats = {}
if len(formatsmap) == 0:
return formats
for format in formatsmap.split(","):
s = format.split("/")
(w, h) = s[1].split("x")
formats[s[0]] = h + "p"
return formats
def _get_streams(self, type):
if type not in (None, StreamType.HTTP):
return {}
info = self._get_stream_info(self.url)
if not info:
raise NoStreamsError(self.url)
args = verifyjson(info, "args")
if not "live_playback" in args or args["live_playback"] == "0":
raise NoStreamsError(self.url)
streams = {}
uestreammap = verifyjson(args, "url_encoded_fmt_stream_map")
fmtlist = verifyjson(args, "fmt_list")
streammap = self._parse_stream_map(uestreammap)
formatmap = self._parse_format_map(fmtlist)
for streaminfo in streammap:
if not ("url" in streaminfo and "sig" in streaminfo):
continue
stream = HTTPStream(self.session, streaminfo["url"][0],
params=dict(signature=streaminfo["sig"][0]))
if streaminfo["itag"][0] in formatmap:
quality = formatmap[streaminfo["itag"][0]]
else:
quality = streaminfo["quality"]
streams[quality] = stream
return streams
__plugin__ = Youtube
|
{
"content_hash": "41b053ea8f5be555867512a62dbfdebd",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 84,
"avg_line_length": 27.568421052631578,
"alnum_prop": 0.5574646811760214,
"repo_name": "derekzhang79/livestreamer",
"id": "20fb1a985bd6a3d2e0bf9c3e125ee2d96b7eb0ad",
"size": "2619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/livestreamer/plugins/youtube.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
}
|
'''
Setup script for pyramid_mongo_scaffolds.
'''
from setuptools import setup, find_packages
requires = [
'pyramid>=1.4,<1.5',
]
scaffolds = """
[pyramid.scaffold]
mongoengine = pyramid_mongo_scaffolds:PyramidMongoengineTemplate
mongokit = pyramid_mongo_scaffolds:PyramidMongokitTemplate
ming = pyramid_mongo_scaffolds:PyramidMingTemplate
mongoalchemy = pyramid_mongo_scaffolds:PyramidMongoAlchemyTemplate
"""
classifiers = [
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
]
setup(
name='pyramid_mongo_scaffolds',
version='0.0.1',
description='A bunch of scaffolds of pyramid and mongo ODMs',
classifiers=classifiers,
author='Mukund K',
author_email='mukund.kri@gmail.com',
url='My github page',
keywords='pyramid mongodb mongo scaffold',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
entry_points=scaffolds,
)
|
{
"content_hash": "4ce9fa9cfd313175972a0e38f80d8971",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 66,
"avg_line_length": 25.476190476190474,
"alnum_prop": 0.6869158878504673,
"repo_name": "mukund-kri/pyramid-mongo-scaffolds",
"id": "83b83c84e61b27684b5721cfbdae073daa9cbdd4",
"size": "1070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24060"
},
{
"name": "Python",
"bytes": "4228"
}
],
"symlink_target": ""
}
|
import parser as p
storeUrl = '/tmp/view-source_https___www.ulmart.ru.html'
# storeUrl = 'https://ulmart.ru'
mainMenuClass = 'li.b-list__item.b-dropdown.b-dropdown_catalog-menu.dropdown.dropdown_catalog-menu > a'
shopId = 0
def product_process(url: str, params: dict):
page = params['page'] if hasattr(params, 'page') else 0
def page_process(url: str, params: dict):
pass
if __name__ == '__main__':
shopId = p.insert_store(name='Ulmart', url=storeUrl)
_ = p.get_parser('/tmp/view-source_https___www.ulmart.ru.html')
menu = _.find(mainMenuClass, cookies={'sity': '281'})
if len(menu) < 1:
print('err')
exit(1)
for i in menu:
if 'href' in i.keys() and i.get('href').find('/catalog/') == 0:
page_process(i.get('href'), {'page': 0,})
|
{
"content_hash": "628a804833c4a279ad283e539984bf6a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 103,
"avg_line_length": 26,
"alnum_prop": 0.607940446650124,
"repo_name": "StixoTvorec/py-try",
"id": "77fa5c8477d7f168a6dbdd4203a3dd4ff2823fc6",
"size": "850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Parsers/Ulmart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "500827"
}
],
"symlink_target": ""
}
|
import rrd_config as C
import re
import subprocess
import os
values = [None] * len(C.Ups._fields)
apc = subprocess.Popen(['apcaccess'], stdout=subprocess.PIPE)
while True:
line = apc.stdout.readline()
if not line:
break
for i in range(0, len(C.Ups._fields)):
if line.startswith(C.Ups._fields[i]):
m = re.search('.*:\s+(\d+(?:.\d+|))', line)
if m:
values[i] = m.group(1)
assert None not in values
assert subprocess.Popen(['rrdtool', 'update', os.path.join(C.rrd_path, 'ups.rrd'),
'--template', ':'.join(C.Ups._fields), '--',
'N:' + ':'.join(values)]).wait() == 0
|
{
"content_hash": "ae41722d08f22cf6aa45f0694bd3a322",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 82,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.571875,
"repo_name": "rain87/pc-health",
"id": "a77d1114e72318b4e7c14b86a60b992134b3055f",
"size": "659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "collect_ups.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1762"
},
{
"name": "Python",
"bytes": "20384"
},
{
"name": "Shell",
"bytes": "1043"
}
],
"symlink_target": ""
}
|
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
# Remaining imports.
import analysis
import ast
import data
import optparse
import nparser
import plankton
import re
import schedule
import token
# Encapsulates the compilation of an individual module.
class ModuleCompile(object):
def __init__(self, manifest_file):
self.manifest_file = manifest_file
self.module = None
def process(self):
# Read the manifest.
manifest = self.parse_manifest()
path = manifest.get_path()
self.module = ast.Module(path.get_name())
root = os.path.dirname(self.manifest_file)
# Scan through and load the source files.
for file in manifest.get_sources():
filename = os.path.join(root, file)
self.parse_source_file(filename)
# Do post-processing.
analysis.scope_analyze(self.module)
def parse_manifest(self):
source = open(self.manifest_file, "rt").read()
tokens = token.tokenize(source)
return nparser.ModuleParser(tokens).parse_module_manifest()
def parse_source_file(self, name):
source = open(name, "rt").read()
tokens = token.tokenize(source)
nparser.Parser(tokens, self.module, name).parse_program()
def add_to_library(self, library):
unbound = self.module.as_unbound_module()
library.add_module(unbound.path, unbound)
# Encapsulates the compilation of source files into a library.
class LibraryCompile(object):
def __init__(self, options):
self.options = options
self.library = data.Library()
def run(self):
self.compile_modules()
self.write_output()
def compile_modules(self):
for module_manifest in self.options["modules"]:
module = ModuleCompile(module_manifest)
module.process()
module.add_to_library(self.library)
def write_output(self):
blob = plankton.Encoder().encode(self.library)
handle = open(self.options['out'], 'wb')
handle.write(blob)
handle.close()
# Encapsulates stats relating to the main script.
class Main(object):
def __init__(self):
self.options = None
self.flags = None
self.scheduler = schedule.TaskScheduler()
# Parses the script arguments, storing the values in the appropriate fields.
def parse_arguments(self):
self.options = plankton.options.parse(sys.argv[1:])
self.flags = self.options.get_flags()
compile = self.flags.compile
if compile:
self.compile_flags = compile
else:
self.compile_flags = None
# If the filter option is set, filters input and return True. Otherwise
# returns False.
def run_filter(self):
if not self.flags.filter:
return False
pattern = re.compile(r'^p64/([a-zA-Z0-9=+/]+)$')
for line in sys.stdin:
match = pattern.match(line.strip())
if match:
code = match.group(1)
decoder = plankton.Decoder({})
if self.flags.disass:
print decoder.base64disassemble(code)
else:
data = decoder.base64decode(code)
print plankton.stringify(data)
else:
print line
return True
# Main entry-point.
def run(self):
self.parse_arguments()
if self.run_filter():
return
# First load the units to compile without actually doing it.
self.schedule_files()
self.schedule_libraries()
# Then compile everything in the right order.
self.scheduler.run()
# Processes any --file arguments. These are used by the nunit tests.
def schedule_files(self):
files = self.flags.files or []
for filename in files:
source = open(filename, "rt").read()
tokens = token.tokenize(source)
module = ast.Module(filename)
nparser.Parser(tokens, module).parse_program()
self.schedule_for_compile(module)
self.schedule_for_output(module)
def schedule_libraries(self):
if not self.compile_flags or not "build_library" in self.compile_flags:
return
process = LibraryCompile(self.compile_flags["build_library"])
process.run()
# Schedules a unit for compilation at the appropriate time relative to any
# of its dependencies.
def schedule_for_compile(self, unit):
# Analysis doesn't depend on anything else so we can just go ahead and get
# that out of the way.
analysis.scope_analyze(unit)
# Schedules the present program of the given unit to be output to stdout when
# all the prerequisites for doing so have been run.
def schedule_for_output(self, unit):
program = unit.get_present_program()
self.output_value(program)
def run_parse_input(self, inputs, parse_thunk):
for expr in inputs:
tokens = token.tokenize(expr)
unit = parse_thunk(tokens)
# Implicitly import the core module into the oldest stage. There needs to
# better model for this but for now it helps make builtin methods slightly
# less magic.
unit.get_oldest_stage().add_import(data.Path(['core']))
self.schedule_for_compile(unit)
self.schedule_for_output(unit)
def output_value(self, value):
if self.flags.out is None:
out = sys.stdout
else:
out = open(self.flags.out, "wb")
encoder = plankton.Encoder()
if self.flags.base64:
print "p64/%s" % encoder.base64encode(value)
else:
out.write(encoder.encode(value))
if __name__ == '__main__':
Main().run()
|
{
"content_hash": "ca5cbd9ceb3a4df3b70bef037201721c",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 80,
"avg_line_length": 29.61111111111111,
"alnum_prop": 0.6744840525328331,
"repo_name": "tundra/neutrino",
"id": "b15abb600b17205172e0496737c7a9a12c6b128a",
"size": "5445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/neutrino/main.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "929089"
},
{
"name": "C++",
"bytes": "384606"
},
{
"name": "Groff",
"bytes": "109315"
},
{
"name": "Nemerle",
"bytes": "38670"
},
{
"name": "Objective-C",
"bytes": "27585"
},
{
"name": "Python",
"bytes": "156047"
},
{
"name": "Shell",
"bytes": "1441"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^$', 'CloudBroWeb.views.home', name='home'),
url(r'^blog/', include('blog.urls', namespace='blog')),
url(r'^twitch/', include('twitch.urls', namespace='twitch')),
url(r'^admin/', include(admin.site.urls)),
)
|
{
"content_hash": "e09dde8b013f80a64df574ff681f4464",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 84,
"avg_line_length": 49.111111111111114,
"alnum_prop": 0.5158371040723982,
"repo_name": "nicka101/CloudBroWeb",
"id": "8f568ce4d8ed5804231d95b1e906136e248b69fb",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CloudBroWeb/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "262876"
},
{
"name": "HTML",
"bytes": "13493"
},
{
"name": "JavaScript",
"bytes": "7244"
},
{
"name": "Python",
"bytes": "16023"
}
],
"symlink_target": ""
}
|
import sys
from ovirtcli.infrastructure.options import OvirtCliOptionParser
from ovirtcli.infrastructure.context import OvirtCliExecutionContext
from ovirtcli.infrastructure.object import create
from ovirtcli.shell.engineshell import EngineShell
def main():
# Parse the command line:
parser = create(OvirtCliOptionParser)
opts, args = parser.parse_args()
# Convert the options to a dictionary, so the rest of the code doesn't
# have to deal with optparse specifics:
opts = vars(opts)
# Create the execution context:
context = OvirtCliExecutionContext(opts=opts, args=args)
# Create the command interpreter:
shell = EngineShell(context)
shell.onecmd_loop()
if __name__ == '__main__':
main()
|
{
"content_hash": "52968b1f3611bb95f4cf8b49578812f8",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 29.76,
"alnum_prop": 0.7379032258064516,
"repo_name": "oVirt/ovirt-engine-cli",
"id": "ac7d5ced2198f04e03f1e960bdfe458059be6214",
"size": "1336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ovirtcli/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "870"
},
{
"name": "Python",
"bytes": "365026"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
}
|
"""Gather conmats."""
import re
import numpy as np
import itertools as iter
from itertools import combinations
from mne.viz import circular_layout, plot_connectivity_circle
def _atoi(text):
"""Get digit."""
return int(text) if text.isdigit() else text
def natural_keys(text):
"""alist.sort(key=natural_keys) sorts in human order.
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
"""
return [_atoi(c) for c in re.split(r'(\d+)', str(text))]
def return_full_mat(mat, elec_labels, all_elec_labels):
"""Get full mat."""
n_labs = len(elec_labels)
assert len(mat.shape) == 2 and mat.shape[0] == mat.shape[1], (
"Error mat shape = {} should be a 2D squared ndarray "
"(matrix)".format(mat.shape))
assert n_labs == mat.shape[0] and n_labs == mat.shape[1], (
"Error, both mat dimension {} {} should be the same as "
"elec_labels {}".format(mat.shape[0], mat.shape[1],
len(elec_labels)))
# if undirected (values are not the same on both triangular parts)
if np.sum(mat[np.tril_indices(mat.shape[0], k=-1)]) != np.sum(mat[
np.triu_indices(mat.shape[0], k=1)]):
mat = mat + np.transpose(mat)
# building full_mat from all_elec_labels
full_mat = np.empty((len(all_elec_labels), len(all_elec_labels)))
full_mat[:] = np.NAN
for pair_lab in iter.permutations(all_elec_labels, 2):
all_i, all_j = all_elec_labels.index(
pair_lab[0]), all_elec_labels.index(pair_lab[1])
if pair_lab[0] in elec_labels and pair_lab[1] in elec_labels:
i, j = elec_labels.index(
pair_lab[0]), elec_labels.index(pair_lab[1])
full_mat[all_i, all_j] = mat[i, j]
return full_mat
def plot_tab_circular_connectivity(list_list_conmat, all_elec_labels,
plot_filename, coh_low_thresh=0.0,
coh_high_thresh=1.0,
color_bar='gist_rainbow', column_labels=[],
row_labels=[]):
"""Plot tab circular conectivity."""
import matplotlib.pyplot as plt
nb_lines = len(list_list_conmat)
print(nb_lines)
print(len(list_list_conmat[0]))
if len(list_list_conmat) != 1:
for i, j in combinations(list(range(nb_lines)), 2):
assert len(list_list_conmat[i]) == len(list_list_conmat[j]), (
"Error, not all the same length {} != "
"{}".format(len(list_list_conmat[i]),
len(list_list_conmat[j])))
nb_cols = len(list_list_conmat[0])
# for i in
# print list_list_conmat[0][0].shape
# Angles
bounds = [0, len(all_elec_labels) / 2]
all_node_angles = circular_layout(all_elec_labels,
node_order=all_elec_labels, start_pos=90,
group_boundaries=bounds)
# print all_node_angles
fig, axes = plt.subplots(nrows=nb_lines, ncols=nb_cols,
figsize=(4 * nb_cols, 4 * nb_lines),
facecolor='black')
# fig = plt.figure(num=None, figsize=(4*nb_cols, 4*nb_lines),
# facecolor='black')
if len(column_labels) == 0:
column_labels = ['Column {}'.format(col) for col in range(nb_cols)]
if len(row_labels) == 0:
row_labels = ['Line {}'.format(row) for row in range(nb_lines)]
assert len(column_labels) == nb_cols, (
"Error, specifying invalid number of column labels")
assert len(row_labels) == nb_lines, (
"Error, specifying invalid number of line labels")
for index_sess, list_conmat in enumerate(list_list_conmat):
print(len(list_conmat))
for index_win, np_all_mean_con_mats in enumerate(list_conmat):
print(np_all_mean_con_mats.shape)
kw = dict(textcolor="black", facecolor="white", n_lines=None,
node_angles=all_node_angles, fontsize_names=15,
show=False, colormap=color_bar, vmin=coh_low_thresh,
vmax=coh_high_thresh, fig=fig,
subplot=(nb_lines,
nb_cols, 1 + index_win + nb_cols * index_sess))
if index_win == len(list_conmat) - 1:
fig, ax = plot_connectivity_circle(np_all_mean_con_mats,
all_elec_labels,
colorbar_size=0.5, **kw)
# (nb_lines,nb_cols,1+index_win+nb_cols*index_sess))
else:
fig, ax = plot_connectivity_circle(np_all_mean_con_mats,
all_elec_labels,
colorbar=False, **kw)
if index_win == 0:
ax.set_ylabel(row_labels[index_sess],
rotation=0, size='large', fontsize=25)
if index_sess == 0:
ax.set_title(column_labels[index_win],
fontdict={'fontsize': 25})
# saving
print(plot_filename)
fig.savefig(plot_filename, facecolor='white')
plt.close(fig)
# fig1.close()
del fig
|
{
"content_hash": "9e6885e5b1174c3a0493c201dbd41f86",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 79,
"avg_line_length": 35.42384105960265,
"alnum_prop": 0.534679379323238,
"repo_name": "neuropycon/ephypype",
"id": "ee4dfed5af1c994dd443d0e1f0cf21d50f4df545",
"size": "5349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ephypype/gather/gather_conmats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "262974"
}
],
"symlink_target": ""
}
|
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
"default": {
"ENGINE": 'django.db.backends.mysql',
"NAME": 'election',
"USER": 'electionUser',
"PASSWORD": 'electionPass',
"HOST": '',
"PORT": '',
"TEST_NAME": 'election_test',
},
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
WEB_ROOT = os.getcwd()
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
STATIC_ROOT = WEB_ROOT + "/static/"
STATIC_URL = '/static/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'sh)ip(^)gy+0!n83ayuk599b1()40-^%m*!$4e*ube61w#8fpi'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'example_project.urls'
LOGIN_URL = "/account/"
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
WEB_ROOT + '/templates/',
)
INSTALLED_APPS = (
'django_elect',
'dal',
'dal_select2',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
)
|
{
"content_hash": "a65bcd574b36f9349d7dd6a125c17315",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 88,
"avg_line_length": 25.897435897435898,
"alnum_prop": 0.6732673267326733,
"repo_name": "MasonM/django-elect",
"id": "95c9c059f37c438aa7fb71bc956f67b78e3a7a78",
"size": "2060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_project/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "542"
},
{
"name": "HTML",
"bytes": "6940"
},
{
"name": "JavaScript",
"bytes": "501"
},
{
"name": "Python",
"bytes": "92122"
}
],
"symlink_target": ""
}
|
import sys
def caller_module(level: int = 2, sys=sys):
"""This function is taken from Pyramid Web Framework - ``pyramid.path.caller_module``."""
module_globals = sys._getframe(level).f_globals
module_name = module_globals.get('__name__') or '__main__'
module = sys.modules[module_name]
return module
def caller_package(level: int = 2, caller_module=caller_module):
"""This function is taken from Pyramid Web Framework - ``pyramid.path.caller_package``."""
# caller_module in arglist for tests
module = caller_module(level + 1)
f = getattr(module, '__file__', '')
if (('__init__.py' in f) or ('__init__$py' in f)): # empty at >>>
# Module is a package
return module
# Go up one level to get package
package_name = module.__name__.rsplit('.', 1)[0]
return sys.modules[package_name]
|
{
"content_hash": "8fb4483fa19eb43ce4fed57e9c6e7eb6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 94,
"avg_line_length": 38.72727272727273,
"alnum_prop": 0.6314553990610329,
"repo_name": "avanov/solo",
"id": "8ddc035a706c109df5a7d1bb3d54f7a686517a99",
"size": "852",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "solo/configurator/path.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "273"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "162984"
},
{
"name": "RAML",
"bytes": "903"
}
],
"symlink_target": ""
}
|
import os
if os.getenv("GEVENT") == "true":
from easypy.gevent import apply_patch
apply_patch()
import logging
import logging.config
from easypy import logging as easypy_logging
logging.addLevelName(logging.WARN, "WARN") # instead of "WARNING", so that it takes less space...
logging.addLevelName(logging.NOTSET, "NA") # instead of "NOTSET", so that it takes less space...
if not issubclass(logging.Logger, easypy_logging.ContextLoggerMixin):
logging.Logger.__bases__ = logging.Logger.__bases__ + (easypy_logging.ContextLoggerMixin,)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s|%(process)2s:%(threadName)-25s|%(name)-40s|%(levelname)-5s|%(funcName)-30s |%(message)s')
|
{
"content_hash": "37aa4a8c20abfc726a77af6c8eda36ac",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 150,
"avg_line_length": 43.9375,
"alnum_prop": 0.7354196301564723,
"repo_name": "weka-io/easypy",
"id": "f64359fb6bb52f978037944484a68213670f7279",
"size": "703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "466469"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmax", parent_name="sunburst.marker", **kwargs):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
**kwargs,
)
|
{
"content_hash": "ffc7ee72022b55b18fb8396ef191fd03",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 84,
"avg_line_length": 39.083333333333336,
"alnum_prop": 0.6140724946695096,
"repo_name": "plotly/plotly.py",
"id": "2966493d56a12043d33383e61eb47b8249caef73",
"size": "469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/sunburst/marker/_cmax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from awx.api.views import (
WorkflowJobNodeList,
WorkflowJobNodeDetail,
WorkflowJobNodeSuccessNodesList,
WorkflowJobNodeFailureNodesList,
WorkflowJobNodeAlwaysNodesList,
WorkflowJobNodeCredentialsList,
)
urls = [
url(r'^$', WorkflowJobNodeList.as_view(), name='workflow_job_node_list'),
url(r'^(?P<pk>[0-9]+)/$', WorkflowJobNodeDetail.as_view(), name='workflow_job_node_detail'),
url(r'^(?P<pk>[0-9]+)/success_nodes/$', WorkflowJobNodeSuccessNodesList.as_view(), name='workflow_job_node_success_nodes_list'),
url(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobNodeFailureNodesList.as_view(), name='workflow_job_node_failure_nodes_list'),
url(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobNodeAlwaysNodesList.as_view(), name='workflow_job_node_always_nodes_list'),
url(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobNodeCredentialsList.as_view(), name='workflow_job_node_credentials_list'),
]
__all__ = ['urls']
|
{
"content_hash": "9fed00f2b8ba79ff7647cc63409deba6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 132,
"avg_line_length": 45.18181818181818,
"alnum_prop": 0.704225352112676,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "809ee515f06fa03deeb770bf8143170ebaa94d05",
"size": "1053",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx/api/urls/workflow_job_node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from datetime import datetime
from unittest.mock import Mock
import pytest
from airflow import settings
from airflow.models import DAG
from airflow.models.baseoperator import BaseOperator
from airflow.operators.dummy import DummyOperator
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule
from tests.models import DEFAULT_DATE
from tests.test_utils.db import clear_db_runs
@pytest.fixture
def get_task_instance(session, dag_maker):
def _get_task_instance(trigger_rule=TriggerRule.ALL_SUCCESS, state=None, upstream_task_ids=None):
with dag_maker(session=session):
task = BaseOperator(
task_id='test_task', trigger_rule=trigger_rule, start_date=datetime(2015, 1, 1)
)
if upstream_task_ids:
task._upstream_task_ids.update(upstream_task_ids)
dr = dag_maker.create_dagrun()
ti = dr.task_instances[0]
ti.task = task
return ti
return _get_task_instance
class TestTriggerRuleDep:
def test_no_upstream_tasks(self, get_task_instance):
"""
If the TI has no upstream TIs then there is nothing to check and the dep is passed
"""
ti = get_task_instance(TriggerRule.ALL_DONE, State.UP_FOR_RETRY)
assert TriggerRuleDep().is_met(ti=ti)
def test_always_tr(self, get_task_instance):
"""
The always trigger rule should always pass this dep
"""
ti = get_task_instance(TriggerRule.ALWAYS, State.UP_FOR_RETRY)
assert TriggerRuleDep().is_met(ti=ti)
def test_one_success_tr_success(self, get_task_instance):
"""
One-success trigger rule success
"""
ti = get_task_instance(TriggerRule.ONE_SUCCESS, State.UP_FOR_RETRY)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=2,
failed=2,
upstream_failed=2,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 0
def test_one_success_tr_failure(self, get_task_instance):
"""
One-success trigger rule failure
"""
ti = get_task_instance(TriggerRule.ONE_SUCCESS)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=2,
upstream_failed=2,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
def test_one_failure_tr_failure(self, get_task_instance):
"""
One-failure trigger rule failure
"""
ti = get_task_instance(TriggerRule.ONE_FAILED)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=2,
skipped=0,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
def test_one_failure_tr_success(self, get_task_instance):
"""
One-failure trigger rule success
"""
ti = get_task_instance(TriggerRule.ONE_FAILED)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=2,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 0
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=0,
upstream_failed=2,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 0
def test_all_success_tr_success(self, get_task_instance):
"""
All-success trigger rule success
"""
ti = get_task_instance(TriggerRule.ALL_SUCCESS, upstream_task_ids=["FakeTaskID"])
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=0,
failed=0,
upstream_failed=0,
done=1,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 0
def test_all_success_tr_failure(self, get_task_instance):
"""
All-success trigger rule failure
"""
ti = get_task_instance(TriggerRule.ALL_SUCCESS, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"])
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=0,
failed=1,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
def test_all_success_tr_skip(self, get_task_instance):
"""
All-success trigger rule fails when some upstream tasks are skipped.
"""
ti = get_task_instance(TriggerRule.ALL_SUCCESS, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"])
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
def test_all_success_tr_skip_flag_upstream(self, get_task_instance):
"""
All-success trigger rule fails when some upstream tasks are skipped. The state of the ti
should be set to SKIPPED when flag_upstream_failed is True.
"""
ti = get_task_instance(TriggerRule.ALL_SUCCESS, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"])
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=True,
session=Mock(),
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
assert ti.state == State.SKIPPED
def test_none_failed_tr_success(self, get_task_instance):
"""
All success including skip trigger rule success
"""
ti = get_task_instance(TriggerRule.NONE_FAILED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"])
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 0
def test_none_failed_tr_skipped(self, get_task_instance):
"""
All success including all upstream skips trigger rule success
"""
ti = get_task_instance(TriggerRule.NONE_FAILED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"])
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=True,
session=Mock(),
)
)
assert len(dep_statuses) == 0
assert ti.state == State.NONE
def test_none_failed_tr_failure(self, get_task_instance):
"""
All success including skip trigger rule failure
"""
ti = get_task_instance(
TriggerRule.NONE_FAILED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID", "FailedFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=1,
upstream_failed=0,
done=3,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
def test_none_failed_min_one_success_tr_success(self, get_task_instance):
"""
All success including skip trigger rule success
"""
ti = get_task_instance(
TriggerRule.NONE_FAILED_MIN_ONE_SUCCESS, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 0
def test_none_failed_min_one_success_tr_skipped(self, get_task_instance):
"""
All success including all upstream skips trigger rule success
"""
ti = get_task_instance(
TriggerRule.NONE_FAILED_MIN_ONE_SUCCESS, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"]
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=2,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=True,
session=Mock(),
)
)
assert len(dep_statuses) == 0
assert ti.state == State.SKIPPED
def test_none_failed_min_one_success_tr_failure(self, session, get_task_instance):
"""
All success including skip trigger rule failure
"""
ti = get_task_instance(
TriggerRule.NONE_FAILED_MIN_ONE_SUCCESS,
upstream_task_ids=["FakeTaskID", "OtherFakeTaskID", "FailedFakeTaskID"],
)
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=1,
upstream_failed=0,
done=3,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
def test_all_failed_tr_success(self, get_task_instance):
"""
All-failed trigger rule success
"""
ti = get_task_instance(TriggerRule.ALL_FAILED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"])
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=0,
failed=2,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 0
def test_all_failed_tr_failure(self, get_task_instance):
"""
All-failed trigger rule failure
"""
ti = get_task_instance(TriggerRule.ALL_FAILED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"])
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=2,
skipped=0,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
def test_all_done_tr_success(self, get_task_instance):
"""
All-done trigger rule success
"""
ti = get_task_instance(TriggerRule.ALL_DONE, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"])
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=2,
skipped=0,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 0
def test_all_done_tr_failure(self, get_task_instance):
"""
All-done trigger rule failure
"""
ti = get_task_instance(TriggerRule.ALL_DONE, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID"])
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=0,
failed=0,
upstream_failed=0,
done=1,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
def test_none_skipped_tr_success(self, get_task_instance):
"""
None-skipped trigger rule success
"""
ti = get_task_instance(
TriggerRule.NONE_SKIPPED, upstream_task_ids=["FakeTaskID", "OtherFakeTaskID", "FailedFakeTaskID"]
)
with create_session() as session:
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=2,
skipped=0,
failed=1,
upstream_failed=0,
done=3,
flag_upstream_failed=False,
session=session,
)
)
assert len(dep_statuses) == 0
# with `flag_upstream_failed` set to True
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=0,
failed=3,
upstream_failed=0,
done=3,
flag_upstream_failed=True,
session=session,
)
)
assert len(dep_statuses) == 0
def test_none_skipped_tr_failure(self, get_task_instance):
"""
None-skipped trigger rule failure
"""
ti = get_task_instance(TriggerRule.NONE_SKIPPED, upstream_task_ids=["FakeTaskID", "SkippedTaskID"])
with create_session() as session:
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=False,
session=session,
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
# with `flag_upstream_failed` set to True
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=1,
failed=0,
upstream_failed=0,
done=2,
flag_upstream_failed=True,
session=session,
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
# Fail until all upstream tasks have completed execution
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=0,
skipped=0,
failed=0,
upstream_failed=0,
done=0,
flag_upstream_failed=False,
session=session,
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
def test_unknown_tr(self, get_task_instance):
"""
Unknown trigger rules should cause this dep to fail
"""
ti = get_task_instance()
ti.task.trigger_rule = "Unknown Trigger Rule"
dep_statuses = tuple(
TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=1,
skipped=0,
failed=0,
upstream_failed=0,
done=1,
flag_upstream_failed=False,
session="Fake Session",
)
)
assert len(dep_statuses) == 1
assert not dep_statuses[0].passed
def test_get_states_count_upstream_ti(self):
"""
this test tests the helper function '_get_states_count_upstream_ti' as a unit and inside update_state
"""
from airflow.ti_deps.dep_context import DepContext
get_states_count_upstream_ti = TriggerRuleDep._get_states_count_upstream_ti
session = settings.Session()
now = timezone.utcnow()
dag = DAG('test_dagrun_with_pre_tis', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op5 = DummyOperator(task_id='E', trigger_rule=TriggerRule.ONE_FAILED)
op1.set_downstream([op2, op3]) # op1 >> op2, op3
op4.set_upstream([op3, op2]) # op3, op2 >> op4
op5.set_upstream([op2, op3, op4]) # (op2, op3, op4) >> op5
clear_db_runs()
dag.clear()
dr = dag.create_dagrun(
run_id='test_dagrun_with_pre_tis', state=State.RUNNING, execution_date=now, start_date=now
)
ti_op1 = dr.get_task_instance(op1.task_id, session)
ti_op2 = dr.get_task_instance(op2.task_id, session)
ti_op3 = dr.get_task_instance(op3.task_id, session)
ti_op4 = dr.get_task_instance(op4.task_id, session)
ti_op5 = dr.get_task_instance(op5.task_id, session)
ti_op1.task = op1
ti_op2.task = op2
ti_op3.task = op3
ti_op4.task = op4
ti_op5.task = op5
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2.set_state(state=State.FAILED, session=session)
ti_op3.set_state(state=State.SUCCESS, session=session)
ti_op4.set_state(state=State.SUCCESS, session=session)
ti_op5.set_state(state=State.SUCCESS, session=session)
session.commit()
# check handling with cases that tasks are triggered from backfill with no finished tasks
finished_tasks = DepContext().ensure_finished_tasks(ti_op2.dag_run, session)
assert get_states_count_upstream_ti(finished_tasks=finished_tasks, ti=ti_op2) == (1, 0, 0, 0, 1)
finished_tasks = dr.get_task_instances(state=State.finished, session=session)
assert get_states_count_upstream_ti(finished_tasks=finished_tasks, ti=ti_op4) == (1, 0, 1, 0, 2)
assert get_states_count_upstream_ti(finished_tasks=finished_tasks, ti=ti_op5) == (2, 0, 1, 0, 3)
dr.update_state()
assert State.SUCCESS == dr.state
|
{
"content_hash": "efa464849c0a0cce63018f636a2cd982",
"timestamp": "",
"source": "github",
"line_count": 597,
"max_line_length": 109,
"avg_line_length": 34.482412060301506,
"alnum_prop": 0.5184591469931021,
"repo_name": "apache/incubator-airflow",
"id": "bbdb84679cb1b950ad035afaa24cf216f0bdc6b7",
"size": "21373",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/ti_deps/deps/test_trigger_rule_dep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
}
|
import uuid
import pytest
from django_q.brokers import get_broker
from django_q.cluster import Cluster
from django_q.conf import Conf
from django_q.monitor import get_ids, info, monitor
from django_q.status import Stat
from django_q.tasks import async_task
@pytest.mark.django_db
def test_monitor(monkeypatch):
cluster_id = uuid.uuid4()
assert Stat.get(pid=0, cluster_id=cluster_id).sentinel == 0
c = Cluster()
c.start()
stats = monitor(run_once=True)
assert get_ids() is True
c.stop()
assert len(stats) > 0
found_c = False
for stat in stats:
if stat.cluster_id == c.cluster_id:
found_c = True
assert stat.uptime() > 0
assert stat.empty_queues() is True
break
assert found_c
# test lock size
monkeypatch.setattr(Conf, "ORM", "default")
b = get_broker("monitor_test")
b.enqueue("test")
b.dequeue()
assert b.lock_size() == 1
monitor(run_once=True, broker=b)
b.delete_queue()
@pytest.mark.django_db
def test_info():
info()
do_sync()
info()
for _ in range(24):
do_sync()
info()
def do_sync():
async_task("django_q.tests.tasks.countdown", 1, sync=True, save=True)
|
{
"content_hash": "93806a83e875e2f6d0717bcf227c8ad9",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 73,
"avg_line_length": 23.673076923076923,
"alnum_prop": 0.6295694557270511,
"repo_name": "Koed00/django-q",
"id": "a7a7980bb3e2cea3d79bb50917f370a3bc8b385f",
"size": "1231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_q/tests/test_monitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "197496"
}
],
"symlink_target": ""
}
|
import numpy as np
from .backends import CSVBackend
try:
basestring
except NameError:
stringtype = str
else:
stringtype = basestring
__all__ = ["postprocess", "make_plots"]
def postprocess(backend=None,
temperature=1.0, cut=0, compression_assert=None,
resample_log_X=0,
compression_bias_min=1, compression_scatter=0,
resample=0,
plot=False, plot_params=None):
# Deal with filename inputs.
if backend is None:
backend = "."
if isinstance(backend, stringtype):
backend = CSVBackend(backend)
# Unpack the backend's data.
levels = backend.levels
samples = backend.samples
sample_info = backend.sample_info
# Remove regularisation from levels if we asked for it.
if compression_assert is not None:
levels = np.array(levels)
levels["log_X"][1:] = \
-np.cumsum(compression_assert*np.ones(len(levels) - 1))
# Remove burn-in.
if cut > 0:
samples, sample_info = remove_burnin(samples, sample_info, cut)
# Subsample; one (randomly selected) particle for each time.
if len(sample_info.shape) > 1:
samples, sample_info = subsample_particles(samples, sample_info)
# Check dimensions.
assert len(samples) == len(sample_info), "dimension mismatch"
# Estimate the X values for the samples by interpolating from the levels.
if resample_log_X:
resample_count = resample_log_X
else:
resample_count = 1
log_z = np.empty(resample_count)
h = np.empty(resample_count)
n_eff = np.empty(resample_count)
log_post = np.empty((resample_count, len(sample_info)))
for i in range(resample_count):
# If requested, jitter the Xs of the levels.
if resample_log_X:
levels_2 = np.array(levels)
comp = -np.diff(levels_2["log_X"])
comp *= np.random.uniform(compression_bias_min, 1.0)
comp *= np.exp(compression_scatter*np.random.randn(len(comp)))
levels_2["log_X"][1:] = -comp
levels_2["log_X"] = np.cumsum(levels_2["log_X"])
else:
levels_2 = levels
sample_log_X = interpolate_samples(levels_2, sample_info,
resample=resample_log_X)
if i == 0:
backend.write_sample_log_X(sample_log_X)
log_z[i], h[i], n_eff[i], log_post[i] = compute_stats(
levels_2, sample_info, sample_log_X,
temperature=temperature,
)
# Re-sample the samples using the posterior weights.
log_post = logsumexp(log_post, axis=0) - np.log(resample_count)
backend.write_weights(np.exp(log_post))
if resample:
new_samples = generate_posterior_samples(
samples, log_post, int(resample * np.mean(n_eff))
)
backend.write_posterior_samples(new_samples)
log_post = np.zeros(len(new_samples))
else:
new_samples = samples
# Compute the final stats based on resampling.
stats = dict(
log_Z=np.mean(log_z), log_Z_std=np.std(log_z),
H=np.mean(h), H_std=np.std(h),
N_eff=np.mean(n_eff), N_eff_std=np.std(n_eff),
)
backend.write_stats(stats)
# Make the plots if requested.
if plot:
if plot_params is None:
plot_params = dict()
make_plots(backend, **plot_params)
return stats
def logsumexp(x, axis=None):
mx = np.max(x, axis=axis)
return np.log(np.sum(np.exp(x - mx), axis=axis)) + mx
def remove_burnin(samples, sample_info, nburn):
return (
samples[int(nburn * len(samples)):],
sample_info[int(nburn * len(sample_info)):],
)
def subsample_particles(samples, sample_info):
if len(samples.shape) == 2 and len(sample_info.shape) == 1:
return samples, sample_info
if len(sample_info.shape) != 2:
raise ValueError("invalid dimensions")
# Subsample; one (randomly selected) particle for each time.
if samples.shape[1] != sample_info.shape[1]:
raise ValueError("dimension mismatch")
n = np.prod(sample_info.shape)
return samples.reshape((n, -1)), sample_info.reshape(n)
# inds = (
# np.arange(len(samples)),
# np.random.randint(samples.shape[1], size=len(samples)),
# )
# return samples[inds], sample_info[inds]
def interpolate_samples(levels, sample_info, resample=False):
# Work out the level assignments. This looks horrifying because we need
# to take tiebreakers into account; if two levels (or samples) have
# exactly the same likelihood, then the tiebreaker decides the assignment.
lev, order = 0, 0
assign = np.empty(len(sample_info), dtype=int)
argsort = np.empty(len(sample_info), dtype=int)
l_set = zip(levels["log_likelihood"], levels["tiebreaker"],
-np.arange(1, len(levels)+1))
s_set = zip(sample_info["log_likelihood"], sample_info["tiebreaker"],
range(len(sample_info)))
for ll, _, ind in sorted(list(l_set) + list(s_set)):
if ind < 0:
lev = -ind - 1
continue
assign[ind] = lev
argsort[ind] = order
order += 1
# Loop over levels and place the samples within each level.
sample_log_X = np.empty(len(sample_info))
x_min = np.exp(np.append(levels["log_X"][1:], -np.inf))
x_max = np.exp(levels["log_X"])
dx = x_max - x_min
for i, lev in enumerate(levels):
# Use the level assignments to get a mask of sample IDs in the correct
# order.
m = assign == i
inds = np.arange(len(sample_info))[m][np.argsort(argsort[m])]
if resample:
# Re-sample the points uniformly---in X---between the level
# boundaries.
sample_log_X[inds] = np.sort(np.log(
np.random.uniform(x_min[i], x_max[i], size=len(inds))
))[::-1]
else:
# Place the samples uniformly---in X not log(X)---between the
# level boundaries.
N = len(inds)
# FIXME: there are two options here and we're using the backwards
# compatible one but the other might be better. Need to think
# about it further. It won't matter as the number of samples gets
# large.
n = ((np.arange(1, N+1)) / (N+1))[::-1]
# n = ((np.arange(N) + 0.5) / N)[::-1]
sample_log_X[inds] = np.log(x_min[i] + dx[i] * n)
return sample_log_X
def compute_stats(levels, sample_info, sample_log_X, temperature=1.0):
# Use the log(X) estimates for the levels and the samples to estimate
# log(Z) using the trapezoid rule.
log_x = np.append(levels["log_X"], sample_log_X)
log_y = np.append(levels["log_likelihood"], sample_info["log_likelihood"])
samp_inds = np.append(-np.ones(len(levels), dtype=int),
np.arange(len(sample_info)))
is_samp = np.append(
np.zeros(len(levels), dtype=bool),
np.ones(len(sample_info), dtype=bool)
)
inds = np.argsort(log_x)
log_x = log_x[inds]
log_y = log_y[inds] / temperature
samp_inds = samp_inds[inds]
is_samp = is_samp[inds]
# Extend to X=0.
log_x = np.append(-np.inf, log_x)
log_y = np.append(log_y[0], log_y)
# Compute log(exp(L_k+1) + exp(L_k)) using logsumexp rules...
d_log_y = log_y[1:] - log_y[:-1]
log_y_mean = np.log(0.5) + np.log(1+np.exp(d_log_y)) + log_y[:-1]
# ...and log(exp(log(X_k+1)) + exp(log(X_k))) using logsumexp rules.
log_x_diff = np.log(1. - np.exp(log_x[:-1] - log_x[1:])) + log_x[1:]
# Then from the trapezoid rule:
# log(Z) = log(0.5) + logsumexp(log_x_diff + log_y_mean)
log_p = log_x_diff + log_y_mean
log_z = logsumexp(log_p)
log_p -= log_z
# Compute the sample posterior weights. These are equal to:
# w_k = L_k / (0.5 * (X_k+1 - X_k-1)) / Z
# but we'll recompute Z not using the levels just to be safe.
log_prior = np.log(0.5) + np.logaddexp(log_x_diff[1:], log_x_diff[:-1])
log_post = np.array(sample_info["log_likelihood"])
log_post[samp_inds[samp_inds >= 0]] += log_prior[samp_inds[:-1] >= 0]
log_post -= logsumexp(log_post)
# Compute the information and effective sample size.
h = -log_z + np.sum(np.exp(log_post) * sample_info["log_likelihood"])
n_eff = np.exp(-np.sum(np.exp(log_post)*log_post))
return log_z, h, n_eff, log_post
def generate_posterior_samples(samples, log_weights, N):
w = np.exp(log_weights - logsumexp(log_weights))
inds = np.random.choice(np.arange(len(samples)), size=int(N), p=w)
return samples[inds]
def make_plots(backend):
figs = dict()
figs["levels"] = make_levels_plot(backend)
figs["compression"] = make_compression_plot(backend)
figs["log_X_log_L"] = make_log_X_log_L_plot(backend)
return figs
def make_levels_plot(backend):
import matplotlib.pyplot as pl
fig, ax = pl.subplots(1, 1)
ax.plot(backend.sample_info["level_assignment"], color="k")
ax.set_xlabel("Iterations")
ax.set_ylabel("Level")
return fig
def make_compression_plot(backend):
import matplotlib.pyplot as pl
fig, axes = pl.subplots(2, 1, sharex=True)
levels = backend.levels
ax = axes[0]
ax.plot(np.diff(levels["log_X"]), color="k")
ax.axhline(-1., color="g")
ax.axhline(-np.log(10.), color="g", linestyle="--")
ax.set_ylim(ymax=0.05)
ax.set_ylabel("Compression")
ax = axes[1]
m = levels["tries"] > 0
ax.plot(np.arange(len(levels))[m],
levels[m]["accepts"]/levels[m]["tries"],
"ko-")
ax.set_ylabel("MH Acceptance")
ax.set_xlabel("level")
ax.set_ylim([0.0, 1.0])
return fig
def make_log_X_log_L_plot(backend):
import matplotlib.pyplot as pl
fig, axes = pl.subplots(2, 1, sharex=True)
levels = backend.levels
sample_info = backend.sample_info
sample_log_X = backend.sample_log_X
weights = backend.weights
ax = axes[0]
ax.plot(sample_log_X.flatten(), sample_info["log_likelihood"].flatten(),
"k.", label="Samples")
ax.plot(levels["log_X"][1:], levels["log_likelihood"][1:], "g.",
label="Levels")
ax.legend(numpoints=1, loc="lower left")
ax.set_ylabel("log(L)")
ax.set_title("log(Z) = {0}".format(backend.stats["log_Z"]))
# Use all plotted logl values to set ylim
combined_logl = np.hstack([sample_info["log_likelihood"],\
levels["log_likelihood"][1:]])
combined_logl = np.sort(combined_logl)
lower = combined_logl[int(0.1*combined_logl.size)]
upper = combined_logl[-1]
diff = upper - lower
lower -= 0.05*diff
upper += 0.05*diff
ax.set_ylim([lower, upper])
ax = axes[1]
ax.plot(sample_log_X, weights, "k.")
ax.set_ylabel("posterior weight")
ax.set_xlabel("log(X)")
return fig
|
{
"content_hash": "21aab05e3265aa72cbab4ba3d6eb6a3d",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 78,
"avg_line_length": 33.17272727272727,
"alnum_prop": 0.5939526810998447,
"repo_name": "eggplantbren/DNest4",
"id": "3bd3329111a463324928e954d1f593df372f923f",
"size": "10972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/dnest4/analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1164"
},
{
"name": "C++",
"bytes": "156462"
},
{
"name": "Cython",
"bytes": "8961"
},
{
"name": "Julia",
"bytes": "608"
},
{
"name": "Makefile",
"bytes": "6720"
},
{
"name": "Python",
"bytes": "85447"
},
{
"name": "R",
"bytes": "779"
},
{
"name": "Shell",
"bytes": "83"
},
{
"name": "TeX",
"bytes": "106502"
}
],
"symlink_target": ""
}
|
import warnings
import numpy as np
import scipy.stats as sps
from .abstract_transformation import AbstractTransformation
from ..utils import priors
from ..utils.param import Param as Hyperparameter
def truncate_inputs(func):
"""
Decorator function.
Truncates the inputs to lie between 0 and 1 if it doesn't already.
This is to prevent small rounding errors from making the beta cdf and pdf
go crazy. If the inputs genuinely lives outside of [0,1] then we obviously
don't want to do this, so print out a warning just in case.
"""
def inner(cls_instance, inputs, *args):
inputs = inputs.copy()
if np.any(inputs < 0):
warnings.warn('BetaWarp encountered negative values: %s' % inputs[inputs<0])
inputs[inputs<0] = 0.0
if np.any(inputs > 1):
warnings.warn('BetaWarp encountered values above 1: %s' % inputs[inputs>1])
inputs[inputs>1] = 1.0
return func(cls_instance, inputs, *args)
return inner
class BetaWarp(AbstractTransformation):
def __init__(self, num_dims, alpha=None, beta=None, name="BetaWarp"):
self.name = name
self.num_dims = num_dims
default_alpha = Hyperparameter(
initial_value = np.ones(num_dims),
prior = priors.LognormalTophat(1.5,0.1,10),
name = 'alpha'
)
default_beta = Hyperparameter(
initial_value = np.ones(num_dims),
prior = priors.LognormalTophat(1.5,0.1,10),
name = 'beta'
)
self.alpha = alpha if alpha is not None else default_alpha
self.beta = beta if beta is not None else default_beta
assert self.alpha.value.shape[0] == self.num_dims and self.beta.value.shape[0] == self.num_dims
@property
def hypers(self):
return (self.alpha, self.beta)
@truncate_inputs
def forward_pass(self, inputs):
self._inputs = inputs
return sps.beta.cdf(inputs, self.alpha.value, self.beta.value)
def backward_pass(self, V):
dx = sps.beta.pdf(self._inputs, self.alpha.value, self.beta.value)
dx[np.logical_not(np.isfinite(dx))] = 1.0
return dx*V
|
{
"content_hash": "5f73a492e101bcc582a016d09eab72b7",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 103,
"avg_line_length": 31.19178082191781,
"alnum_prop": 0.6078173034694774,
"repo_name": "fmaguire/BayeHem",
"id": "2afed4c738e4409b4280511bfc910e68abccaae6",
"size": "11907",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Spearmint/spearmint/transformations/beta_warp.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "10914"
},
{
"name": "Batchfile",
"bytes": "23271"
},
{
"name": "C",
"bytes": "3837551"
},
{
"name": "C++",
"bytes": "33218177"
},
{
"name": "CSS",
"bytes": "1556"
},
{
"name": "Groff",
"bytes": "59793"
},
{
"name": "HTML",
"bytes": "365248"
},
{
"name": "IDL",
"bytes": "14"
},
{
"name": "Java",
"bytes": "13433"
},
{
"name": "Lua",
"bytes": "23713"
},
{
"name": "M4",
"bytes": "19951"
},
{
"name": "Makefile",
"bytes": "118962"
},
{
"name": "Objective-C",
"bytes": "8790"
},
{
"name": "Perl",
"bytes": "272990"
},
{
"name": "Python",
"bytes": "6200881"
},
{
"name": "QML",
"bytes": "593"
},
{
"name": "R",
"bytes": "4898"
},
{
"name": "Shell",
"bytes": "270614"
},
{
"name": "TeX",
"bytes": "8434"
},
{
"name": "XSLT",
"bytes": "759"
},
{
"name": "Yacc",
"bytes": "18910"
}
],
"symlink_target": ""
}
|
from neutron.openstack.common import log as logging
from quark import network_strategy
STRATEGY = network_strategy.STRATEGY
LOG = logging.getLogger(__name__)
class UnmanagedDriver(object):
"""Unmanaged network driver.
Returns a bridge...
"""
def __init__(self):
self.load_config()
def load_config(self):
LOG.info("load_config")
@classmethod
def get_name(klass):
return "UNMANAGED"
def get_connection(self):
LOG.info("get_connection")
def create_network(self, context, network_name, tags=None,
network_id=None, **kwargs):
LOG.info("create_network %s %s %s" % (context, network_name,
tags))
def delete_network(self, context, network_id):
LOG.info("delete_network %s" % network_id)
def diag_network(self, context, network_id, **kwargs):
LOG.info("diag_network %s" % network_id)
return {}
def create_port(self, context, network_id, port_id, **kwargs):
LOG.info("create_port %s %s %s" % (context.tenant_id, network_id,
port_id))
bridge_name = STRATEGY.get_network(context, network_id)["bridge"]
return {"uuid": port_id, "bridge": bridge_name}
def update_port(self, context, port_id, **kwargs):
LOG.info("update_port %s %s" % (context.tenant_id, port_id))
return {"uuid": port_id}
def delete_port(self, context, port_id, **kwargs):
LOG.info("delete_port %s %s" % (context.tenant_id, port_id))
def diag_port(self, context, network_id, **kwargs):
LOG.info("diag_port %s" % network_id)
return {}
def create_security_group(self, context, group_name, **group):
LOG.info("Creating security profile %s for tenant %s" %
(group_name, context.tenant_id))
def delete_security_group(self, context, group_id, **kwargs):
LOG.info("Deleting security profile %s for tenant %s" %
(group_id, context.tenant_id))
def update_security_group(self, context, group_id, **kwargs):
LOG.info("Updating security profile %s for tenant %s" %
(group_id, context.tenant_id))
def create_security_group_rule(self, context, group_id, rule):
LOG.info("Creating security rule on group %s for tenant %s" %
(group_id, context.tenant_id))
def delete_security_group_rule(self, context, group_id, rule):
LOG.info("Deleting security rule on group %s for tenant %s" %
(group_id, context.tenant_id))
|
{
"content_hash": "a04004ae096083fb344a796d4dc2d8e6",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 73,
"avg_line_length": 35.270270270270274,
"alnum_prop": 0.5950191570881226,
"repo_name": "jkoelker/quark",
"id": "35fdc982a780624bc365bbac3aa5a74d1837a028",
"size": "3224",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "quark/drivers/unmanaged.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "466498"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.apps import AppConfig
class HomeAppConfig(AppConfig):
name = 'home_app'
|
{
"content_hash": "d539612531b68ba7fa3bd1c77f0daf9c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 18.714285714285715,
"alnum_prop": 0.7480916030534351,
"repo_name": "shahjalalh/homely",
"id": "ec60a3ef7c7e83c926f5bfd0fe36084ae10f8ecf",
"size": "131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home_app/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "174476"
},
{
"name": "HTML",
"bytes": "15415"
},
{
"name": "JavaScript",
"bytes": "77525"
},
{
"name": "Python",
"bytes": "7697"
}
],
"symlink_target": ""
}
|
"""The data layer used during training to train a R*CNN network.
AttributesRoIDataLayer implements a Caffe Python layer.
"""
import caffe
from fast_rcnn.config import cfg
from attr_roi_data_layer.minibatch import get_minibatch
import numpy as np
import yaml
from multiprocessing import Process, Queue
# import pdb
class AttributesRoIDataLayer(caffe.Layer):
"""R*CNN data layer used for training."""
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def _get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch.
If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a
separate process and made available through self._blob_queue.
"""
if cfg.TRAIN.USE_PREFETCH:
return self._blob_queue.get()
else:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
return get_minibatch(minibatch_db, self._num_classes)
def set_roidb(self, roidb):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._shuffle_roidb_inds()
if cfg.TRAIN.USE_PREFETCH:
self._blob_queue = Queue(10)
self._prefetch_process = BlobFetcher(self._blob_queue,
self._roidb,
self._num_classes)
self._prefetch_process.start()
# Terminate the child process when the parent exists
def cleanup():
print 'Terminating BlobFetcher'
self._prefetch_process.terminate()
self._prefetch_process.join()
import atexit
atexit.register(cleanup)
def setup(self, bottom, top):
"""Setup the RoIDataLayer."""
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str_)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {
'data': 0,
'rois': 1,
'secondary_rois': 2,
'labels': 3}
# data blob: holds a batch of N images, each with 3 channels
# The height and width (100 x 100) are dummy values
top[0].reshape(1, 3, 100, 100)
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
top[1].reshape(1, 5)
# secondary rois blob: holds R*C regions of interest, where C is
# the number of secondary regions
# (n, x1, y1, x2, y2) specifying an image batch
top[2].reshape(cfg.TRAIN.CONTEXT_NUM_ROIS, 5)
# labels blob: R categorical labels in [0, ..., K] for K foreground
# classes plus background
top[3].reshape(1, self._num_classes)
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
class BlobFetcher(Process):
"""Experimental class for prefetching blobs in a separate process."""
def __init__(self, queue, roidb, num_classes):
super(BlobFetcher, self).__init__()
self._queue = queue
self._roidb = roidb
self._num_classes = num_classes
self._perm = None
self._cur = 0
self._shuffle_roidb_inds()
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
# TODO(rbg): remove duplicated code
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
# TODO(rbg): remove duplicated code
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def run(self):
print 'BlobFetcher started'
while True:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs = get_minibatch(minibatch_db, self._num_classes)
self._queue.put(blobs)
|
{
"content_hash": "1ff34d66c985cfddfd34f56e84b01205",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 75,
"avg_line_length": 36.79865771812081,
"alnum_prop": 0.5898230895495167,
"repo_name": "gkioxari/RstarCNN",
"id": "983d3fc4692ccbe212296c511989b36c516a7880",
"size": "5954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/attr_roi_data_layer/layer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "56"
},
{
"name": "Matlab",
"bytes": "1878"
},
{
"name": "Python",
"bytes": "245433"
}
],
"symlink_target": ""
}
|
"""
This module contains convenience functions for getting a coordinate object
for a named object by querying SESAME and getting the first returned result.
Note that this is intended to be a convenience, and is very simple. If you
need precise coordinates for an object you should find the appropriate
reference for that measurement and input the coordinates manually.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# Standard library
import os
import re
import socket
# Astropy
from ..extern.six.moves import urllib
from .. import units as u
from .sky_coordinate import SkyCoord
from ..utils import data
from ..utils.state import ScienceState
__all__ = ["get_icrs_coordinates"]
class sesame_url(ScienceState):
"""
The URL(s) to Sesame's web-queryable database.
"""
_value = ["http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame/",
"http://vizier.cfa.harvard.edu/viz-bin/nph-sesame/"]
@classmethod
def validate(cls, value):
# TODO: Implement me
return value
class sesame_database(ScienceState):
"""
This specifies the default database that SESAME will query when
using the name resolve mechanism in the coordinates
subpackage. Default is to search all databases, but this can be
'all', 'simbad', 'ned', or 'vizier'.
"""
_value = 'all'
@classmethod
def validate(cls, value):
if value not in ['all', 'simbad', 'ned', 'vizier']:
raise ValueError("Unknown database '{0}'".format(value))
return value
class NameResolveError(Exception):
pass
def _parse_response(resp_data):
"""
Given a string response from SESAME, parse out the coordinates by looking
for a line starting with a J, meaning ICRS J2000 coordinates.
Parameters
----------
resp_data : str
The string HTTP response from SESAME.
Returns
-------
ra : str
The string Right Ascension parsed from the HTTP response.
dec : str
The string Declination parsed from the HTTP response.
"""
pattr = re.compile(r"%J\s*([0-9\.]+)\s*([\+\-\.0-9]+)")
matched = pattr.search(resp_data.decode('utf-8'))
if matched is None:
return None, None
else:
ra, dec = matched.groups()
return ra, dec
def get_icrs_coordinates(name):
"""
Retrieve an ICRS object by using an online name resolving service to
retrieve coordinates for the specified name. By default, this will
search all available databases until a match is found. If you would like
to specify the database, use the science state
``astropy.coordinates.name_resolve.sesame_database``. You can also
specify a list of servers to use for querying Sesame using the science
state ``astropy.coordinates.name_resolve.sesame_url``. This will try
each one in order until a valid response is returned. By default, this
list includes the main Sesame host and a mirror at vizier. The
configuration item `astropy.utils.data.Conf.remote_timeout` controls the
number of seconds to wait for a response from the server before giving
up.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
Returns
-------
coord : `astropy.coordinates.ICRS` object
The object's coordinates in the ICRS frame.
"""
database = sesame_database.get()
# The web API just takes the first letter of the database name
db = database.upper()[0]
# Make sure we don't have duplicates in the url list
urls = []
domains = []
for url in sesame_url.get():
domain = urllib.parse.urlparse(url).netloc
# Check for duplicates
if domain not in domains:
domains.append(domain)
# Add the query to the end of the url, add to url list
fmt_url = os.path.join(url, "{db}?{name}")
fmt_url = fmt_url.format(name=urllib.parse.quote(name), db=db)
urls.append(fmt_url)
for url in urls:
try:
# Retrieve ascii name resolve data from CDS
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = resp.read()
break
except urllib.error.URLError as e:
# This catches a timeout error, see:
# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python
if isinstance(e.reason, socket.timeout):
# If it was a timeout, try with the next URL
continue
else:
raise NameResolveError(
"Unable to retrieve coordinates for name '{0}'; "
"connection timed out".format(name))
except socket.timeout:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
raise NameResolveError(
"Unable to retrieve coordinates for name '{0}'; connection "
"timed out".format(name))
# All Sesame URL's timed out...
else:
raise NameResolveError("All Sesame queries timed out. Unable to "
"retrieve coordinates.")
ra, dec = _parse_response(resp_data)
if ra is None and dec is None:
if db == "A":
err = "Unable to find coordinates for name '{0}'".format(name)
else:
err = "Unable to find coordinates for name '{0}' in database {1}"\
.format(name, database)
raise NameResolveError(err)
# Return SkyCoord object
sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')
return sc
|
{
"content_hash": "43473d0f119cd4d95ca9f3ec831776c8",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 91,
"avg_line_length": 33.2,
"alnum_prop": 0.6316695352839932,
"repo_name": "joergdietrich/astropy",
"id": "90d6d66d4147e8ae2c845bc43138b45530f5efad",
"size": "5875",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astropy/coordinates/name_resolve.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366874"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "7616749"
},
{
"name": "Shell",
"bytes": "425"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
}
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoWebProject.settings")
# This application object is used by the development server
# as well as any WSGI server configured to use this file.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "7e7ad1f08b0f4c885620b903f55a7625",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 76,
"avg_line_length": 41.714285714285715,
"alnum_prop": 0.8047945205479452,
"repo_name": "nayak16/TartanHacks-2015",
"id": "75bf82329649b3e957bc2a44d312d34dad67966b",
"size": "292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "124143"
},
{
"name": "HTML",
"bytes": "24932"
},
{
"name": "JavaScript",
"bytes": "16334"
},
{
"name": "Python",
"bytes": "28732"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from RSA import gen_keys
from RSA import crypt
import json
import os
import getpass
import argparse
import base64
from builtins import input
class Session(object):
def __init__(self, load=True):
self.public = None
self.private = None
self.contacts = None
self.config_location = os.path.expanduser("~/.azeroth")
if load is True:
if os.path.exists(self.config_location):
self.load_config()
else:
self.create_config()
def load_config(self):
with open(self.config_location,"r") as f:
config = json.load(f)
# passphrase = getpass.getpass("Please enter your passphrase: ")
# if len(passphrase) != 10:
# raise Exception("Passphrase is wrong length")
passphrase = ""
self.public = config["public"]
self.private = passphrase+config["private"]
self.contacts = config["contacts"]
def create_config(self):
self.public, self.private = gen_keys.gen_keys()
# passphrase = self.private[:10]
# priv_save = self.private[10:]
# print("Your passphrase is:\n{0}\n\nYour public key is:\n{1}".format(str(passphrase), str(self.public)))
priv_save = self.private
print("Your public key is:\n{0}".format(str(self.public)))
config = {"public":str(self.public), "private":str(priv_save), "contacts":{}}
with open(self.config_location, "w") as f:
json.dump(config, f)
def save_config(self):
with open(self.config_location,"r") as f:
config = json.load(f)
config["contacts"] = self.contacts
with open(self.config_location, "w") as f:
json.dump(config, f)
def add_contact(self, contact_name, contact_public):
self.contacts[contact_name] = contact_public
self.save_config()
def encrypt(self, contact_name, message):
signature = crypt.create_signature(self.public, self.private, message)
contact_key = self.contacts[contact_name]
M = crypt.encrypt(contact_key, message)
payload = {"message" : M, "signature" : signature}
return base64.b64encode(json.dumps(payload).encode("utf-8")).decode("utf-8")
def decrypt(self, contact_name, encoded):
decompressed = base64.b64decode(encoded.encode("utf-8")).decode("utf-8")
try:
payload = json.loads(decompressed)
except ValueError:
raise Exception("Message contents are invalid")
if "message" not in payload:
raise Exception("Message contents are invalid")
decoded = crypt.decrypt(self.public, self.private, payload["message"])
if contact_name is None or "signature" not in payload:
print("WARNING: No signature validation")
else:
contact_key = self.contacts[contact_name]
if crypt.verify_signature(contact_key, payload["signature"], decoded):
print("Message validated by signature")
else:
print("WARNING: Message signature could not be validated!")
return decoded
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Send and receive RSA encrypted messages")
parser.add_argument("--new", action="store_true", help="Generate a new identity")
parser.add_argument("--add", action="store_true", help="Add a new contact")
parser.add_argument("--send", action="store_true", help="Send a message to an existing contact")
parser.add_argument("--receive", action="store_true", help="Receive a message from an existing contact")
args = parser.parse_args()
if args.new or args.add or args.send or args.receive or args.receive_no_validate or args.test:
s = Session(load=(not args.new))
if args.new:
print("Generating new configuration...")
s.create_config()
print("")
if args.add:
contact_name = input("Enter contact name: ")
contact_public = input("Enter contact public key: ")
s.add_contact(contact_name, contact_public)
print("Contact added")
print("")
if args.send:
contact_name = input("Enter contact name: ")
message = input("Enter message to send: ")
encoded = s.encrypt(contact_name, message)
print("Encoded message:\n{0}".format(str(encoded)))
print("")
if args.receive:
contact_name = input("Enter contact name: ")
encoded = input("Enter received message: ")
message = s.decrypt(contact_name, encoded)
print("Received message:\n{0}".format(str(message)))
print("")
|
{
"content_hash": "e23d13a1991ba188e2ee8889229bb5bd",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 113,
"avg_line_length": 37.664,
"alnum_prop": 0.6166100254885302,
"repo_name": "DocSohl/Azeroth",
"id": "6303dc7d33ace0195e4d89a7420dd747c69ed5b5",
"size": "4708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10911"
}
],
"symlink_target": ""
}
|
import time
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.powermax import utils
LOG = logging.getLogger(__name__)
WRITE_DISABLED = "Write Disabled"
UNLINK_INTERVAL = 15
UNLINK_RETRIES = 30
class PowerMaxProvision(object):
"""Provisioning Class for Dell EMC PowerMax volume drivers.
It supports VMAX 3, All Flash and PowerMax arrays.
"""
def __init__(self, rest):
self.utils = utils.PowerMaxUtils()
self.rest = rest
def create_storage_group(
self, array, storagegroup_name, srp, slo, workload,
extra_specs, do_disable_compression=False):
"""Create a new storage group.
:param array: the array serial number
:param storagegroup_name: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extra_specs: additional info
:param do_disable_compression: disable compression flag
:returns: storagegroup - storage group object
"""
start_time = time.time()
@coordination.synchronized("emc-sg-{storage_group}-{array}")
def do_create_storage_group(storage_group, array):
# Check if storage group has been recently created
storagegroup = self.rest.get_storage_group(
array, storagegroup_name)
if storagegroup is None:
storagegroup = self.rest.create_storage_group(
array, storage_group, srp, slo, workload, extra_specs,
do_disable_compression)
LOG.debug("Create storage group took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
LOG.info("Storage group %(sg)s created successfully.",
{'sg': storagegroup_name})
else:
LOG.info("Storage group %(sg)s already exists.",
{'sg': storagegroup_name})
return storagegroup
return do_create_storage_group(storagegroup_name, array)
def create_volume_from_sg(self, array, volume_name, storagegroup_name,
volume_size, extra_specs, rep_info=None):
"""Create a new volume in the given storage group.
:param array: the array serial number
:param volume_name: the volume name -- string
:param storagegroup_name: the storage group name
:param volume_size: volume size -- string
:param extra_specs: extra specifications
:param rep_info: replication session info dict -- optional
:returns: volume info -- dict
"""
@coordination.synchronized("emc-sg-{storage_group}-{array}")
def do_create_volume_from_sg(storage_group, array):
start_time = time.time()
if rep_info and rep_info.get('initial_device_list', False):
local_device_list = self.rest.get_volume_list(
extra_specs['array'],
{'storageGroupId': storagegroup_name})
rep_info['initial_device_list'] = local_device_list
volume_dict = self.rest.create_volume_from_sg(
array, volume_name, storage_group,
volume_size, extra_specs, rep_info)
LOG.debug("Create volume from storage group "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
return volume_dict
return do_create_volume_from_sg(storagegroup_name, array)
def delete_volume_from_srp(self, array, device_id, volume_name):
"""Delete a volume from the srp.
:param array: the array serial number
:param device_id: the volume device id
:param volume_name: the volume name
"""
start_time = time.time()
LOG.debug("Delete volume %(volume_name)s with device id %(dev)s "
"from srp.", {'volume_name': volume_name,
'dev': device_id})
self.rest.delete_volume(array, device_id)
LOG.debug("Delete volume took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(
start_time, time.time())})
def create_volume_snapvx(self, array, source_device_id,
snap_name, extra_specs, ttl=0):
"""Create a snapVx of a volume.
:param array: the array serial number
:param source_device_id: source volume device id
:param snap_name: the snapshot name
:param extra_specs: the extra specifications
:param ttl: time to live in hours, defaults to 0
"""
@coordination.synchronized("emc-snapvx-{src_device_id}")
def do_create_volume_snap(src_device_id):
start_time = time.time()
LOG.debug("Create Snap Vx snapshot of: %(source)s.",
{'source': src_device_id})
self.rest.create_volume_snap(
array, snap_name, src_device_id, extra_specs, ttl)
LOG.debug("Create volume snapVx took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
do_create_volume_snap(source_device_id)
def create_volume_replica(
self, array, source_device_id, target_device_id,
snap_name, extra_specs, create_snap=False, copy_mode=False):
"""Create a snap vx of a source and copy to a target.
:param array: the array serial number
:param source_device_id: source volume device id
:param target_device_id: target volume device id
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
:param create_snap: Flag for create snapvx
:param copy_mode: If copy mode should be used for SnapVX target links
"""
start_time = time.time()
if create_snap:
# We are creating a temporary snapshot. Specify a ttl of 1 hour
self.create_volume_snapvx(array, source_device_id,
snap_name, extra_specs, ttl=1)
# Link source to target
@coordination.synchronized("emc-snapvx-{src_device_id}")
def do_modify_volume_snap(src_device_id):
self.rest.modify_volume_snap(
array, src_device_id, target_device_id, snap_name,
extra_specs, link=True, copy=copy_mode)
do_modify_volume_snap(source_device_id)
LOG.debug("Create element replica took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def unlink_snapvx_tgt_volume(
self, array, target_device_id, source_device_id, snap_name,
extra_specs, snap_id, loop=True):
"""Unlink a snapshot from its target volume.
:param array: the array serial number
:param source_device_id: source volume device id
:param target_device_id: target volume device id
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
:param snap_id: the unique snap id of the SnapVX
:param loop: if looping call is required for handling retries
"""
@coordination.synchronized("emc-snapvx-{src_device_id}")
def do_unlink_volume(src_device_id):
LOG.debug("Break snap vx link relationship between: %(src)s "
"and: %(tgt)s.",
{'src': src_device_id, 'tgt': target_device_id})
self._unlink_volume(array, src_device_id, target_device_id,
snap_name, extra_specs, snap_id=snap_id,
list_volume_pairs=None, loop=loop)
do_unlink_volume(source_device_id)
def _unlink_volume(
self, array, source_device_id, target_device_id, snap_name,
extra_specs, snap_id=None, list_volume_pairs=None, loop=True):
"""Unlink a target volume from its source volume.
:param array: the array serial number
:param source_device_id: the source device id
:param target_device_id: the target device id
:param snap_name: the snap name
:param extra_specs: extra specifications
:param snap_id: the unique snap id of the SnapVX
:param list_volume_pairs: list of volume pairs, optional
:param loop: if looping call is required for handling retries
:returns: return code
"""
def _unlink_vol():
"""Called at an interval until the synchronization is finished.
:raises: loopingcall.LoopingCallDone
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['modify_vol_success']:
self.rest.modify_volume_snap(
array, source_device_id, target_device_id, snap_name,
extra_specs, snap_id=snap_id, unlink=True,
list_volume_pairs=list_volume_pairs)
kwargs['modify_vol_success'] = True
except exception.VolumeBackendAPIException:
pass
if kwargs['retries'] > UNLINK_RETRIES:
LOG.error("_unlink_volume failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(retvalue=30)
if kwargs['modify_vol_success']:
raise loopingcall.LoopingCallDone()
if not loop:
self.rest.modify_volume_snap(
array, source_device_id, target_device_id, snap_name,
extra_specs, snap_id=snap_id, unlink=True,
list_volume_pairs=list_volume_pairs)
else:
kwargs = {'retries': 0,
'modify_vol_success': False}
timer = loopingcall.FixedIntervalLoopingCall(_unlink_vol)
rc = timer.start(interval=UNLINK_INTERVAL).wait()
return rc
def delete_volume_snap(self, array, snap_name,
source_device_ids, snap_id=None, restored=False):
"""Delete a snapVx snapshot of a volume.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device_ids: the source device ids
:param snap_id: the unique snap id of the SnapVX
:param restored: Flag to indicate if restored session is being deleted
"""
@coordination.synchronized("emc-snapvx-{src_device_id}")
def do_delete_volume_snap(src_device_id):
LOG.debug("Delete SnapVx: %(snap_name)s for source %(src)s and "
"devices %(devs)s.",
{'snap_name': snap_name, 'src': src_device_id,
'devs': source_device_ids})
self.rest.delete_volume_snap(
array, snap_name, source_device_ids, snap_id=snap_id,
restored=restored)
device_id = source_device_ids[0] if isinstance(
source_device_ids, list) else source_device_ids
if snap_id is None:
snap_id = self.rest.get_snap_id(array, device_id, snap_name)
do_delete_volume_snap(device_id)
def is_restore_complete(self, array, source_device_id,
snap_name, snap_id, extra_specs):
"""Check and wait for a restore to complete
:param array: the array serial number
:param source_device_id: source device id
:param snap_name: snapshot name
:param snap_id: unique snap id
:param extra_specs: extra specification
:returns: bool
"""
def _wait_for_restore():
"""Called at an interval until the restore is finished.
:raises: loopingcall.LoopingCallDone
:raises: VolumeBackendAPIException
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['wait_for_restore_called']:
if self._is_restore_complete(
array, source_device_id, snap_name, snap_id):
kwargs['wait_for_restore_called'] = True
except Exception:
exception_message = (_("Issue encountered waiting for "
"restore."))
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
if kwargs['wait_for_restore_called']:
raise loopingcall.LoopingCallDone()
if kwargs['retries'] > int(extra_specs[utils.RETRIES]):
LOG.error("_wait_for_restore failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(
retvalue=int(extra_specs[utils.RETRIES]))
kwargs = {'retries': 0,
'wait_for_restore_called': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_restore)
rc = timer.start(interval=int(extra_specs[utils.INTERVAL])).wait()
return rc
def _is_restore_complete(
self, array, source_device_id, snap_name, snap_id):
"""Helper function to check if restore is complete.
:param array: the array serial number
:param source_device_id: source device id
:param snap_name: the snapshot name
:param snap_id: unique snap id
:returns: restored -- bool
"""
restored = False
snap_details = self.rest.get_volume_snap(
array, source_device_id, snap_name, snap_id)
if snap_details:
linked_devices = snap_details.get("linkedDevices", [])
for linked_device in linked_devices:
if ('targetDevice' in linked_device and
source_device_id == linked_device['targetDevice']):
if ('state' in linked_device and
linked_device['state'] == "Restored"):
restored = True
return restored
def delete_temp_volume_snap(self, array, snap_name,
source_device_id, snap_id):
"""Delete the temporary snapshot created for clone operations.
There can be instances where the source and target both attempt to
delete a temp snapshot simultaneously, so we must lock the snap and
then double check it is on the array.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device_id: the source device id
:param snap_id: the unique snap id of the SnapVX
"""
snapvx = self.rest.get_volume_snap(
array, source_device_id, snap_name, snap_id)
if snapvx:
self.delete_volume_snap(
array, snap_name, source_device_id, snap_id=snap_id,
restored=False)
def delete_volume_snap_check_for_links(
self, array, snap_name, source_devices, extra_specs, snap_id):
"""Check if a snap has any links before deletion.
If a snapshot has any links, break the replication relationship
before deletion.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_devices: the source device ids
:param extra_specs: the extra specifications
:param snap_id: the unique snap id of the SnapVX
"""
list_device_pairs = []
if not isinstance(source_devices, list):
source_devices = [source_devices]
for source_device in source_devices:
LOG.debug("Check for linked devices to SnapVx: %(snap_name)s "
"for volume %(vol)s.",
{'vol': source_device, 'snap_name': snap_name})
linked_list = self.rest.get_snap_linked_device_list(
array, source_device, snap_name, snap_id)
if len(linked_list) == 1:
target_device = linked_list[0]['targetDevice']
list_device_pairs.append((source_device, target_device))
else:
for link in linked_list:
# If a single source volume has multiple targets,
# we must unlink each target individually
target_device = link['targetDevice']
self._unlink_volume(
array, source_device, target_device, snap_name,
extra_specs, snap_id=snap_id)
if list_device_pairs:
self._unlink_volume(
array, "", "", snap_name, extra_specs, snap_id=snap_id,
list_volume_pairs=list_device_pairs)
if source_devices:
self.delete_volume_snap(
array, snap_name, source_devices, snap_id, restored=False)
def extend_volume(self, array, device_id, new_size, extra_specs,
rdf_group=None):
"""Extend a volume.
:param array: the array serial number
:param device_id: the volume device id
:param new_size: the new size (GB)
:param extra_specs: the extra specifications
:param rdf_group: the rdf group number, if required
:returns: status_code
"""
start_time = time.time()
if rdf_group:
@coordination.synchronized('emc-rg-{rdf_group}')
def _extend_replicated_volume(rdf_group):
self.rest.extend_volume(array, device_id,
new_size, extra_specs, rdf_group)
_extend_replicated_volume(rdf_group)
else:
self.rest.extend_volume(array, device_id, new_size, extra_specs)
LOG.debug("Extend PowerMax/VMAX volume took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def get_srp_pool_stats(self, array, array_info):
"""Get the srp capacity stats.
:param array: the array serial number
:param array_info: the array dict
:returns: total_capacity_gb
:returns: remaining_capacity_gb
:returns: subscribed_capacity_gb
:returns: array_reserve_percent
"""
total_capacity_gb = 0
remaining_capacity_gb = 0
subscribed_capacity_gb = 0
array_reserve_percent = 0
srp = array_info['srpName']
LOG.debug(
"Retrieving capacity for srp %(srpName)s on array %(array)s.",
{'srpName': srp, 'array': array})
srp_details = self.rest.get_srp_by_name(array, srp)
if not srp_details:
LOG.error("Unable to retrieve srp instance of %(srpName)s on "
"array %(array)s.",
{'srpName': srp, 'array': array})
return 0, 0, 0, 0
try:
srp_capacity = srp_details['srp_capacity']
total_capacity_gb = srp_capacity['usable_total_tb'] * units.Ki
try:
used_capacity_gb = srp_capacity['usable_used_tb'] * units.Ki
remaining_capacity_gb = float(
total_capacity_gb - used_capacity_gb)
except KeyError:
LOG.error("Unable to retrieve remaining_capacity_gb.")
subscribed_capacity_gb = (
srp_capacity['subscribed_total_tb'] * units.Ki)
array_reserve_percent = srp_details['reserved_cap_percent']
except KeyError:
pass
return (total_capacity_gb, remaining_capacity_gb,
subscribed_capacity_gb, array_reserve_percent)
def verify_slo_workload(
self, array, slo, workload, is_next_gen=None, array_model=None):
"""Check if SLO and workload values are valid.
:param array: the array serial number
:param slo: Service Level Object e.g bronze
:param workload: workload e.g DSS
:param is_next_gen: can be None
:returns: boolean
"""
is_valid_slo, is_valid_workload = False, False
if workload and workload.lower() == 'none':
workload = None
if not workload:
is_valid_workload = True
if slo and slo.lower() == 'none':
slo = None
if is_next_gen or is_next_gen is None:
array_model, is_next_gen = self.rest.get_array_model_info(
array)
valid_slos = self.rest.get_slo_list(array, is_next_gen, array_model)
valid_workloads = self.rest.get_workload_settings(array, is_next_gen)
for valid_slo in valid_slos:
if slo == valid_slo:
is_valid_slo = True
break
for valid_workload in valid_workloads:
if workload == valid_workload:
is_valid_workload = True
break
if not slo:
is_valid_slo = True
if workload:
is_valid_workload = False
if not is_valid_slo:
LOG.error(
"SLO: %(slo)s is not valid. Valid values are: "
"%(valid_slos)s.", {'slo': slo, 'valid_slos': valid_slos})
if not is_valid_workload:
LOG.warning(
"Workload: %(workload)s is not valid. Valid values are "
"%(valid_workloads)s. Note you cannot "
"set a workload without an SLO.",
{'workload': workload, 'valid_workloads': valid_workloads})
return is_valid_slo, is_valid_workload
def get_slo_workload_settings_from_storage_group(
self, array, sg_name):
"""Get slo and workload settings from a storage group.
:param array: the array serial number
:param sg_name: the storage group name
:returns: storage group slo settings
"""
slo = 'NONE'
workload = 'NONE'
storage_group = self.rest.get_storage_group(array, sg_name)
if storage_group:
try:
slo = storage_group['slo']
workload = 'NONE' if self.rest.is_next_gen_array(array) else (
storage_group['workload'])
except KeyError:
pass
else:
exception_message = (_(
"Could not retrieve storage group %(sg_name)s. ") %
{'sg_name': sg_name})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload}
@coordination.synchronized('emc-rg-{rdf_group}')
def break_rdf_relationship(self, array, device_id, sg_name,
rdf_group, rep_extra_specs, state):
"""Break the rdf relationship between a pair of devices.
Resuming replication after suspending is necessary where this function
is called from. Doing so in here will disrupt the ability to perform
further actions on the RDFG without suspending again.
:param array: the array serial number
:param device_id: the source device id
:param sg_name: storage group
:param rdf_group: the rdf group number
:param rep_extra_specs: replication extra specs
:param state: the state of the rdf pair
"""
LOG.info("Suspending RDF group %(rdf)s to delete source device "
"%(dev)s RDF pair.", {'rdf': rdf_group, 'dev': device_id})
if state.lower() == utils.RDF_SYNCINPROG_STATE:
self.rest.wait_for_rdf_pair_sync(
array, rdf_group, device_id, rep_extra_specs)
if state.lower() != utils.RDF_SUSPENDED_STATE:
self.rest.srdf_suspend_replication(
array, sg_name, rdf_group, rep_extra_specs)
self.rest.srdf_delete_device_pair(array, rdf_group, device_id)
def get_or_create_volume_group(self, array, group, extra_specs):
"""Get or create a volume group.
Sometimes it may be necessary to recreate a volume group on the
backend - for example, when the last member volume has been removed
from the group, but the cinder group object has not been deleted.
:param array: the array serial number
:param group: the group object
:param extra_specs: the extra specifications
:returns: group name
"""
vol_grp_name = self.utils.update_volume_group_name(group)
return self.get_or_create_group(array, vol_grp_name, extra_specs)
def get_or_create_group(self, array, group_name, extra_specs):
"""Get or create a generic volume group.
:param array: the array serial number
:param group_name: the group name
:param extra_specs: the extra specifications
:returns: group name
"""
storage_group = self.rest.get_storage_group(array, group_name)
if not storage_group:
self.create_volume_group(array, group_name, extra_specs)
return group_name
def create_volume_group(self, array, group_name, extra_specs):
"""Create a generic volume group.
:param array: the array serial number
:param group_name: the name of the group
:param extra_specs: the extra specifications
:returns: volume_group
"""
return self.create_storage_group(array, group_name,
None, None, None, extra_specs)
def create_group_replica(
self, array, source_group, snap_name, extra_specs):
"""Create a replica (snapVx) of a volume group.
:param array: the array serial number
:param source_group: the source group name
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
"""
LOG.debug("Creating Snap Vx snapshot of storage group: %(srcGroup)s.",
{'srcGroup': source_group})
# Create snapshot
self.rest.create_storagegroup_snap(
array, source_group, snap_name, extra_specs)
def delete_group_replica(self, array, snap_name, source_group_name):
"""Delete the snapshot.
:param array: the array serial number
:param snap_name: the name for the snap shot
:param source_group_name: the source group name
"""
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
"snapshot: %(snap_name)s.",
{'srcGroup': source_group_name, 'snap_name': snap_name})
snap_id_list = self.rest.get_storage_group_snap_id_list(
array, source_group_name, snap_name)
if snap_id_list:
if not self.rest.is_snap_id:
snap_id_list.sort(reverse=True)
for snap_id in snap_id_list:
self.rest.delete_storagegroup_snap(
array, source_group_name, snap_name, snap_id,
force=True)
else:
LOG.debug("Unable to get snap ids for: %(srcGroup)s.",
{'srcGroup': source_group_name})
def link_and_break_replica(self, array, source_group_name,
target_group_name, snap_name, extra_specs,
list_volume_pairs, delete_snapshot=False,
snap_id=None):
"""Links a group snap and breaks the relationship.
:param array: the array serial
:param source_group_name: the source group name
:param target_group_name: the target group name
:param snap_name: the snapshot name
:param extra_specs: extra specifications
:param list_volume_pairs: the list of volume pairs
:param delete_snapshot: delete snapshot flag
:param snap_id: the unique snapVx identifier
"""
LOG.debug("Linking Snap Vx snapshot: source group: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'srcGroup': source_group_name,
'tgtGroup': target_group_name})
# Link the snapshot
self.rest.modify_volume_snap(
array, None, None, snap_name, extra_specs, snap_id=snap_id,
link=True, list_volume_pairs=list_volume_pairs)
# Unlink the snapshot
LOG.debug("Unlinking Snap Vx snapshot: source group: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'srcGroup': source_group_name,
'tgtGroup': target_group_name})
self._unlink_volume(
array, None, None, snap_name, extra_specs, snap_id=snap_id,
list_volume_pairs=list_volume_pairs)
# Delete the snapshot if necessary
if delete_snapshot:
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
"snapshot: %(snap_name)s.",
{'srcGroup': source_group_name,
'snap_name': snap_name})
source_devices = [a for a, b in list_volume_pairs]
self.delete_volume_snap(array, snap_name, source_devices)
def revert_volume_snapshot(self, array, source_device_id,
snap_name, snap_id, extra_specs):
"""Revert a volume snapshot
:param array: the array serial number
:param source_device_id: device id of the source
:param snap_name: snapvx snapshot name
:param snap_id: the unique snap identifier
:param extra_specs: the extra specifications
"""
start_time = time.time()
try:
self.rest.modify_volume_snap(
array, source_device_id, "", snap_name, extra_specs,
snap_id=snap_id, restore=True)
except exception.VolumeBackendAPIException as ex:
if utils.REVERT_SS_EXC in ex.message:
exception_message = _(
"Link must be fully copied for this operation to proceed. "
"Please reset the volume state from error to available "
"and wait for awhile before attempting another "
"revert to snapshot operation. You may want to delete "
"the latest snapshot taken in this revert to snapshot "
"operation, as you will only be able to revert to the "
"last snapshot.")
else:
exception_message = (_(
"Revert to snapshot failed with exception "
"%(e)s.") % {'e': ex})
raise exception.VolumeBackendAPIException(
message=exception_message)
LOG.debug("Restore volume snapshot took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
|
{
"content_hash": "5be97c83d6108a5eb46f1b46b2bca9ae",
"timestamp": "",
"source": "github",
"line_count": 731,
"max_line_length": 79,
"avg_line_length": 43.224350205198355,
"alnum_prop": 0.567142450232617,
"repo_name": "openstack/cinder",
"id": "638fc4c1eecdfc3cbde86552b49c0d653e778b65",
"size": "32247",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/dell_emc/powermax/provision.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "259"
},
{
"name": "Mako",
"bytes": "976"
},
{
"name": "Python",
"bytes": "25078349"
},
{
"name": "Shell",
"bytes": "6456"
},
{
"name": "Smarty",
"bytes": "67595"
}
],
"symlink_target": ""
}
|
from collections import Counter
from typing import Dict, List, Optional, Union
from tensorflow.keras.callbacks import Callback as KerasCallback
from ray.air import session
from ray.train.tensorflow import TensorflowCheckpoint
from ray.util.annotations import PublicAPI
class _Callback(KerasCallback):
"""Base class for Air's Keras callbacks."""
_allowed = [
"batch_begin",
"batch_end",
"epoch_begin",
"epoch_end",
"train_batch_begin",
"train_batch_end",
"test_batch_begin",
"test_batch_end",
"predict_batch_begin",
"predict_batch_end",
"train_begin",
"train_end",
"test_begin",
"test_end",
"predict_begin",
"predict_end",
]
def __init__(self, on: Union[str, List[str]] = "validation_end"):
super(_Callback, self).__init__()
if not isinstance(on, list):
on = [on]
if any(w not in self._allowed for w in on):
raise ValueError(
"Invalid trigger time selected: {}. Must be one of {}".format(
on, self._allowed
)
)
self._on = on
def _handle(self, logs: Dict, when: str):
raise NotImplementedError
def on_batch_begin(self, batch, logs=None):
if "batch_begin" in self._on:
self._handle(logs, "batch_begin")
def on_batch_end(self, batch, logs=None):
if "batch_end" in self._on:
self._handle(logs, "batch_end")
def on_epoch_begin(self, epoch, logs=None):
if "epoch_begin" in self._on:
self._handle(logs, "epoch_begin")
def on_epoch_end(self, epoch, logs=None):
if "epoch_end" in self._on:
self._handle(logs, "epoch_end")
def on_train_batch_begin(self, batch, logs=None):
if "train_batch_begin" in self._on:
self._handle(logs, "train_batch_begin")
def on_train_batch_end(self, batch, logs=None):
if "train_batch_end" in self._on:
self._handle(logs, "train_batch_end")
def on_test_batch_begin(self, batch, logs=None):
if "test_batch_begin" in self._on:
self._handle(logs, "test_batch_begin")
def on_test_batch_end(self, batch, logs=None):
if "test_batch_end" in self._on:
self._handle(logs, "test_batch_end")
def on_predict_batch_begin(self, batch, logs=None):
if "predict_batch_begin" in self._on:
self._handle(logs, "predict_batch_begin")
def on_predict_batch_end(self, batch, logs=None):
if "predict_batch_end" in self._on:
self._handle(logs, "predict_batch_end")
def on_train_begin(self, logs=None):
if "train_begin" in self._on:
self._handle(logs, "train_begin")
def on_train_end(self, logs=None):
if "train_end" in self._on:
self._handle(logs, "train_end")
def on_test_begin(self, logs=None):
if "test_begin" in self._on:
self._handle(logs, "test_begin")
def on_test_end(self, logs=None):
if "test_end" in self._on:
self._handle(logs, "test_end")
def on_predict_begin(self, logs=None):
if "predict_begin" in self._on:
self._handle(logs, "predict_begin")
def on_predict_end(self, logs=None):
if "predict_end" in self._on:
self._handle(logs, "predict_end")
@PublicAPI(stability="beta")
class Callback(_Callback):
"""
Keras callback for Ray AIR reporting and checkpointing.
You can use this in both TuneSession and TrainSession.
Example:
.. code-block: python
############# Using it in TrainSession ###############
from ray.air.integrations.keras import Callback
def train_loop_per_worker():
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
model = build_model()
#model.compile(...)
model.fit(dataset_shard, callbacks=[Callback()])
Args:
metrics: Metrics to report. If this is a list, each item describes
the metric key reported to Keras, and it will reported under the
same name. If this is a dict, each key will be the name reported
and the respective value will be the metric key reported to Keras.
If this is None, all Keras logs will be reported.
on: When to report metrics. Must be one of
the Keras event hooks (less the ``on_``), e.g.
"train_start", or "predict_end". Defaults to "epoch_end".
frequency: Checkpoint frequency. If this is an integer `n`,
checkpoints are saved every `n` times each hook was called. If
this is a list, it specifies the checkpoint frequencies for each
hook individually.
"""
def __init__(
self,
metrics: Optional[Union[str, List[str], Dict[str, str]]] = None,
on: Union[str, List[str]] = "epoch_end",
frequency: Union[int, List[int]] = 1,
):
if isinstance(frequency, list):
if not isinstance(on, list) or len(frequency) != len(on):
raise ValueError(
"If you pass a list for checkpoint frequencies, the `on` "
"parameter has to be a list with the same length."
)
self._frequency = frequency
super(Callback, self).__init__(on)
self._metrics = metrics
self._counter = Counter()
def _handle(self, logs: Dict, when: str = None):
self._counter[when] += 1
if isinstance(self._frequency, list):
index = self._on.index(when)
freq = self._frequency[index]
else:
freq = self._frequency
checkpoint = None
if freq > 0 and self._counter[when] % freq == 0:
checkpoint = TensorflowCheckpoint.from_model(self.model)
if not self._metrics:
report_dict = logs
else:
report_dict = {}
for key in self._metrics:
if isinstance(self._metrics, dict):
metric = self._metrics[key]
else:
metric = key
report_dict[key] = logs[metric]
session.report(report_dict, checkpoint=checkpoint)
|
{
"content_hash": "ed39ca75393d687aa92c568e8e52ef0b",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 78,
"avg_line_length": 33.518324607329845,
"alnum_prop": 0.563730084348641,
"repo_name": "ray-project/ray",
"id": "92759ac6fa91a6213df1a72b53a76925d7f29119",
"size": "6402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/air/integrations/keras.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
}
|
from thumbor.filters import BaseFilter, filter_method
from thumbor.ext.filters import _saturation
class Filter(BaseFilter):
@filter_method(BaseFilter.DecimalNumber)
def saturation(self, change):
mode, data = self.engine.image_data_as_rgb()
imgdata = _saturation.apply(mode, change, data)
self.engine.set_image_data(imgdata)
|
{
"content_hash": "a5f05a7e5d002b031f0355c3c12990d1",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 55,
"avg_line_length": 35.8,
"alnum_prop": 0.723463687150838,
"repo_name": "gi11es/thumbor",
"id": "3e4acaed8b9e593c30cc36a8430c17d567889fe6",
"size": "610",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "thumbor/filters/saturation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "58039"
},
{
"name": "JavaScript",
"bytes": "409"
},
{
"name": "Makefile",
"bytes": "9946"
},
{
"name": "Python",
"bytes": "557993"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _
from django.views.generic import (View, ListView, DetailView, CreateView,
UpdateView, DeleteView)
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.http import HttpResponse
from manifest.accounts.views import LoginRequiredMixin
from filizver.topic.models import Topic
from filizver.entry.models import Entry
from filizver.topic.forms import TopicForm
from filizver.entry.forms import EntryForm
from filizver.branch.forms import BranchForm
from filizver.entry.plugins import EntryPoint
class EntryList(ListView):
queryset = Entry.objects.select_related().all()
template_name = "entry/_entry_list.html"
class EntryDetail(ListView):
queryset = Entry.objects.select_related().all()
template_name = "entry/_entry_list.html"
class EntryCreate(CreateView, LoginRequiredMixin):
template_name = "entry/_entry_create.html"
def post(self, request, *args, **kwargs):
plugin = EntryPoint.get_model(request.POST.get('plugin')).get_plugin()
entry = plugin.create(request)
return redirect(entry.topic)
class EntryDelete(DeleteView, LoginRequiredMixin):
model = Entry
def get_success_url(self):
return self.object.topic.get_absolute_url()
class EntrySort(DetailView, UpdateView, LoginRequiredMixin):
model = Topic
template_name = 'topic/_topic_entries.html'
context = {}
def post(self, request, *args, **kwargs):
for position, pk in enumerate(request.POST.getlist('entry[]')):
Entry.objects.filter(pk=pk).update(position=position+1)
return super(EntrySort, self).post(request, *args, **kwargs)
|
{
"content_hash": "5f74958126f4c3b7036ca0b0273e303f",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 78,
"avg_line_length": 31.464285714285715,
"alnum_prop": 0.720771850170261,
"repo_name": "ozgurgunes/django-filizver",
"id": "c636265afcfa90f97638d659828c517765064701",
"size": "1786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filizver/_apps/entry/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "216424"
},
{
"name": "Python",
"bytes": "136231"
},
{
"name": "Shell",
"bytes": "5100"
}
],
"symlink_target": ""
}
|
import logging, os, sys, tempfile, shutil
def findInPath(fileName, path=os.environ['PATH']):
dirs = path.split(os.pathsep)
for dir in dirs:
if os.path.isfile(os.path.join(dir, fileName)):
return os.path.join(dir, fileName)
if sys.platform in ('cygwin', 'win32'):
if os.path.isfile(os.path.join(dir, fileName + ".exe")):
return os.path.join(dir, fileName + ".exe")
return None
CONSOLE_LOG_LEVEL = logging.INFO
FILE_LOG_LEVEL = logging.INFO
RUN_TEST = None
LOAD_TEST = None
SERVER_HTTP_PORT = 4444
PLATFORM = sys.platform
WINDMILL_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
JS_PATH = os.path.join(WINDMILL_PATH, 'html')
SAVES_PATH = None
EXTENSIONS_DIR = None
DISABLE_JS_COMPRESS = False
TEST_URL = 'http://tutorial.getwindmill.com/'
FORWARDING_TEST_URL = None
USECODE = False
EXIT_ON_DONE = False
CONTINUE_ON_FAILURE = False
ENABLE_PDB = False
BROWSER_DEBUGGING = False
START_FIREFOX = False
START_IE = False
START_SAFARI = False
START_CHROME = False
JAVASCRIPT_TEST_DIR = None
JAVASCRIPT_TEST_FILTER = None
JAVASCRIPT_TEST_PHASE = None
SCRIPT_APPEND_ONLY = False
# Browser prefs
# MOZILLA_COMMAND = None
INSTALL_FIREBUG = None
SAFARI_BINARY = None
SAFARI_COMMAND = None
# Mozilla prefs
# MOZILLA_CREATE_NEW_PROFILE = True
#
# MOZILLA_PROFILE_PATH = tempfile.mkdtemp(suffix='.windmill')
if PLATFORM == 'darwin':
NETWORK_INTERFACE_NAME = None
# firefoxApp = os.path.join('Applications', 'Firefox.app')
# firefoxDir = os.path.join(os.path.expanduser('~/'), firefoxApp)
#
# if not os.path.isdir(firefoxDir):
# firefoxDir = os.path.join('/', firefoxApp)
#
# MOZILLA_DEFAULT_PROFILE = os.path.join(firefoxDir, 'Contents', 'MacOS', 'defaults', 'profile')
# MOZILLA_BINARY = os.path.join(firefoxDir, 'Contents', 'MacOS', 'firefox-bin')
SAFARI_BINARY = '/Applications/Safari.app/Contents/MacOS/Safari'
CHROME_BINARY = '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
import distutils.version, commands
OS_VERSION = distutils.version.StrictVersion(commands.getoutput('sw_vers -productVersion'))
leopard = distutils.version.StrictVersion('10.5')
# TODO: Consider using "/usr/bin/env networksetup"
if OS_VERSION >= leopard:
NETWORKSETUP_BINARY = '/usr/sbin/networksetup'
else:
networksetup_base = '/System/Library/CoreServices/RemoteManagement/ARDAgent.app/Contents/Support/'
if os.path.isfile(os.path.join(networksetup_base, 'networksetup')):
NETWORKSETUP_BINARY = os.path.join(networksetup_base, 'networksetup')
elif os.path.isfile(os.path.join(networksetup_base, 'networksetup-panther')):
NETWORKSETUP_BINARY = os.path.join(networksetup_base, 'networksetup-panther')
elif PLATFORM == 'linux2':
#Get Chrome bin for linux
chromebin = findInPath('google-chrome')
if chromebin and os.path.isfile(chromebin):
CHROME_BINARY=chromebin
# firefoxBin = findInPath('firefox')
#
# if firefoxBin is not None and os.path.isfile(firefoxBin):
# MOZILLA_BINARY = firefoxBin
#
# for path in ('/usr/lib/iceweasel/defaults/profile',
# '/usr/share/firefox/defaults/profile',
# '/usr/lib/mozilla-firefox/defaults/profile',):
# if os.path.isdir(path):
# MOZILLA_DEFAULT_PROFILE = path
elif PLATFORM in ('cygwin', 'win32'):
if sys.platform == 'cygwin':
program_files = os.environ['PROGRAMFILES']
else:
program_files = os.environ['ProgramFiles']
IE_BINARY = os.path.join(program_files, 'Internet Explorer', 'iexplore.exe')
if os.path.isfile(os.path.join(program_files, 'Safari', 'Safari.exe')):
SAFARI_BINARY = os.path.join(program_files, 'Safari', 'Safari.exe')
if os.path.isfile(os.path.join(os.environ['USERPROFILE'], 'Local Settings', 'Application Data', 'Google', 'Chrome', 'Application', 'chrome.exe')):
CHROME_BINARY = os.path.join(os.environ['USERPROFILE'], 'Local Settings', 'Application Data', 'Google', 'Chrome', 'Application', 'chrome.exe')
# firefoxBin = findInPath('firefox')
#
# if firefoxBin is None:
# try:
# firefoxBin = os.path.join(os.environ['ProgramFiles'], 'Mozilla Firefox', 'firefox.exe')
# except:
# firefoxBin = None
#
# if firefoxBin is not None and os.path.isfile(firefoxBin):
# firefoxDir = os.path.dirname(firefoxBin)
#
# MOZILLA_BINARY = firefoxBin
# MOZILLA_DEFAULT_PROFILE = os.path.join(firefoxDir, 'defaults', 'profile')
if __name__ == '__main__':
if '--test' in sys.argv:
print 'running on ', PLATFORM
print 'we are at ', WINDMILL_PATH
print 'our JS is at ', JS_PATH
print 'firefox is at ', MOZILLA_BINARY
print 'default profile is at', MOZILLA_DEFAULT_PROFILE
|
{
"content_hash": "ae0e2a049788b945dc30b533003f11a1",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 150,
"avg_line_length": 37.84444444444444,
"alnum_prop": 0.63671951458211,
"repo_name": "windmill/windmill",
"id": "8f425b4bd64e7d852bc8258337d26ccabb0bc402",
"size": "5806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "windmill/conf/global_settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "76128"
},
{
"name": "CSS",
"bytes": "113500"
},
{
"name": "HTML",
"bytes": "226277"
},
{
"name": "JavaScript",
"bytes": "1065858"
},
{
"name": "Makefile",
"bytes": "2367"
},
{
"name": "PHP",
"bytes": "4708"
},
{
"name": "Python",
"bytes": "575202"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
}
|
from .model import Swagger
from .visitor import OpenAPIResolveVisitor
from ...parser import Parser
class OpenAPIParser(Parser):
@staticmethod
def parse(schema):
component = Swagger.unmarshal(schema)
component.accept(OpenAPIResolveVisitor(schema))
return component
|
{
"content_hash": "a0e13c620762f4eeaea89fb922fc2e38",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 24.916666666666668,
"alnum_prop": 0.7357859531772575,
"repo_name": "pennsignals/aptos",
"id": "2257a982d9c3c675be31beee253f11dfed7d01dd",
"size": "299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aptos/swagger/v3/parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "52368"
}
],
"symlink_target": ""
}
|
from sqlalchemy import Column, Table, ForeignKey, types
from sqlalchemy.ext.mutable import Mutable
#from werkzeug import generate_password_hash, check_password_hash
from geoalchemy2 import Geometry
from flask.ext.login import UserMixin
from .constants import *
from ..extensions import db
from ..utils import get_current_time, SEX_TYPE, STRING_LEN
'''
Author, location, datetime, status, expiration date,
keywords(classification, food, labor, commute), short text,
long html allow picture(how bbs post is stored) and so on.
—>biders many-many
—>conversation 1-many
—>deal 1-1
message all other
'''
class Chat(db.Model):
"""docstring for Conversation"""
id = Column(db.Integer, primary_key=True)
post = Column(db.Integer, db.ForeignKey('posts.id'))
poster = Column(db.Integer, db.ForeignKey('users.id'))
replier = Column(db.Integer, db.ForeignKey('users.id'))
class Message(db.Model):
"""docstring for Conversation"""
id = Column(db.Integer, primary_key=True)
in_chat = Column(db.Integer, db.ForeignKey('chat.id'))
content = Column(db.Integer, db.ForeignKey('users.id'))
|
{
"content_hash": "3751166e01ffdfebc0e72540ca7df325",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 65,
"avg_line_length": 33.029411764705884,
"alnum_prop": 0.7257346393588602,
"repo_name": "wandonye/vshare",
"id": "682a9fc26dc3f920697094870a6e4e392b5b21b8",
"size": "1154",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib/vshare/chat/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18470"
},
{
"name": "HTML",
"bytes": "59212"
},
{
"name": "JavaScript",
"bytes": "2108"
},
{
"name": "Python",
"bytes": "143266"
}
],
"symlink_target": ""
}
|
import logging, os, shutil, tarfile
from app.modules.common.pipeline import *
from app.modules.common.task import *
from app.modules.common.errors import *
from app.modules.common.network import *
log = logging.getLogger(__name__)
class DeployTomcatTask(Task):
'''
tomcat_source_path
tomcat_deploy_path
tomcat_http_port
tomcat_debug_port
tomcat_shotdwon_port
app_source_path
appname
'''
def __init__(self, context={}):
Task.__init__(self, 'DeployTomcatTask', context)
def __do_execute__(self, task_input):
log.info("DeployTomcatTask:Entered execute()")
tomcat_source_path = self.context['tomcat_source_path']
tomcat_deploy_path = self.context['tomcat_deploy_path']
tomcat_http_port = self.context['tomcat_http_port']
tomcat_debug_port = self.context['tomcat_debug_port']
tomcat_shotdwon_port = self.context['tomcat_shotdwon_port']
app_source_path = self.context['app_source_path']
appname = self.context['appname']
if not os.path.exists(tomcat_source_path):
raise TaskExcecuteError('', 'not found tomcat source path['+tomcat_source_path+'].')
if not os.path.exists(app_source_path):
raise TaskExcecuteError('', 'not found app[' + appname + '] source path['+app_source_path+'].')
if not os.path.exists(tomcat_deploy_path):
os.makedirs(tomcat_deploy_path)
if not is_open('127.0.0.1', tomcat_http_port):
raise TaskExcecuteError('', 'port['+tomcat_http_port+'] already used.')
if not is_open('127.0.0.1', tomcat_shotdwon_port):
raise TaskExcecuteError('', 'port['+tomcat_shotdwon_port+'] already used.')
if not is_open('127.0.0.1', tomcat_debug_port):
raise TaskExcecuteError('', 'port['+tomcat_debug_port+'] already used.')
shutil.copyfile(tomcat_source_path, tomcat_deploy_path)
tarfile
time.sleep(5)
log.info("DeployTomcatTask:Exit execute()")
|
{
"content_hash": "c5dc8a9978c23132ee19b4acc3ff9274",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 107,
"avg_line_length": 34.25,
"alnum_prop": 0.629683698296837,
"repo_name": "ssls/beetle-agent",
"id": "d232619abcc4e17bb657b58e9b0add94516bce93",
"size": "2071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/modules/apps/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14909"
},
{
"name": "Mako",
"bytes": "1637"
},
{
"name": "Python",
"bytes": "234058"
}
],
"symlink_target": ""
}
|
"""Support for Clementine Music Player as media player."""
from datetime import timedelta
import logging
import time
from clementineremote import ClementineRemote
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_HOST,
CONF_NAME,
CONF_PORT,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Clementine Remote"
DEFAULT_PORT = 5500
SCAN_INTERVAL = timedelta(seconds=5)
SUPPORT_CLEMENTINE = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_STEP
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_VOLUME_SET
| SUPPORT_NEXT_TRACK
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_ACCESS_TOKEN): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Clementine platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
token = config.get(CONF_ACCESS_TOKEN)
client = ClementineRemote(host, port, token, reconnect=True)
add_entities([ClementineDevice(client, config[CONF_NAME])])
class ClementineDevice(MediaPlayerDevice):
"""Representation of Clementine Player."""
def __init__(self, client, name):
"""Initialize the Clementine device."""
self._client = client
self._name = name
self._muted = False
self._volume = 0.0
self._track_id = 0
self._last_track_id = 0
self._track_name = ""
self._track_artist = ""
self._track_album_name = ""
self._state = None
def update(self):
"""Retrieve the latest data from the Clementine Player."""
try:
client = self._client
if client.state == "Playing":
self._state = STATE_PLAYING
elif client.state == "Paused":
self._state = STATE_PAUSED
elif client.state == "Disconnected":
self._state = STATE_OFF
else:
self._state = STATE_PAUSED
if client.last_update and (time.time() - client.last_update > 40):
self._state = STATE_OFF
self._volume = float(client.volume) if client.volume else 0.0
if client.current_track:
self._track_id = client.current_track["track_id"]
self._track_name = client.current_track["title"]
self._track_artist = client.current_track["track_artist"]
self._track_album_name = client.current_track["track_album"]
except Exception:
self._state = STATE_OFF
raise
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / 100.0
@property
def source(self):
"""Return current source name."""
source_name = "Unknown"
client = self._client
if client.active_playlist_id in client.playlists:
source_name = client.playlists[client.active_playlist_id]["name"]
return source_name
@property
def source_list(self):
"""List of available input sources."""
source_names = [s["name"] for s in self._client.playlists.values()]
return source_names
def select_source(self, source):
"""Select input source."""
client = self._client
sources = [s for s in client.playlists.values() if s["name"] == source]
if len(sources) == 1:
client.change_song(sources[0]["id"], 0)
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Title of current playing media."""
return self._track_name
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._track_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._track_album_name
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_CLEMENTINE
@property
def media_image_hash(self):
"""Hash value for media image."""
if self._client.current_track:
return self._client.current_track["track_id"]
return None
async def async_get_media_image(self):
"""Fetch media image of current playing image."""
if self._client.current_track:
image = bytes(self._client.current_track["art"])
return (image, "image/png")
return None, None
def volume_up(self):
"""Volume up the media player."""
newvolume = min(self._client.volume + 4, 100)
self._client.set_volume(newvolume)
def volume_down(self):
"""Volume down media player."""
newvolume = max(self._client.volume - 4, 0)
self._client.set_volume(newvolume)
def mute_volume(self, mute):
"""Send mute command."""
self._client.set_volume(0)
def set_volume_level(self, volume):
"""Set volume level."""
self._client.set_volume(int(100 * volume))
def media_play_pause(self):
"""Simulate play pause media player."""
if self._state == STATE_PLAYING:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._state = STATE_PLAYING
self._client.play()
def media_pause(self):
"""Send media pause command to media player."""
self._state = STATE_PAUSED
self._client.pause()
def media_next_track(self):
"""Send next track command."""
self._client.next()
def media_previous_track(self):
"""Send the previous track command."""
self._client.previous()
|
{
"content_hash": "78dea083da2ad84455041973ffda3a7c",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 84,
"avg_line_length": 28.858974358974358,
"alnum_prop": 0.6035835924774174,
"repo_name": "Teagan42/home-assistant",
"id": "9e05b831359fdd8be79d75fe5ebd02b2205bec02",
"size": "6753",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/clementine/media_player.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from tests.test_mixin.models import Account, Contact, Proxy2Contact
from salesforce.backend.test_helpers import current_user, uid_version as uid
def refresh(obj):
"""
Get the same object refreshed from the same db.
"""
db = obj._state.db
return type(obj).objects.using(db).get(pk=obj.pk)
class MixinTest(TestCase):
databases = '__all__'
def test_mixin(self):
"""Test that mixins from abstract classes work and also proxy models."""
# create the object with one field from the second ancestor and one from the first
test_account = Account(name='sf_test account' + uid, description='experimental')
test_account.save()
test_account = refresh(test_account)
test_contact = Contact(first_name='sf_test', last_name='my', account=test_account)
test_contact.save()
try:
test_contact = refresh(test_contact)
# verify foreign keys to and from the complicated model
self.assertEqual(test_contact.account.owner.username, current_user)
contacts = Contact.objects.filter(account__name='sf_test account' + uid) # description='experimental')
self.assertGreaterEqual(len(contacts), 1)
repr(test_contact.__dict__)
repr(test_account.__dict__)
# Verify that a proxy model correctly recognizes the db_table from
# a concrete model by two levels deeper.
self.assertEqual(Proxy2Contact.objects.get(pk=test_contact.pk).pk, test_contact.pk)
finally:
test_contact.delete()
test_account.delete()
|
{
"content_hash": "5c41d9f51caa0f9d2a8104fad99c7cca",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 115,
"avg_line_length": 43.44736842105263,
"alnum_prop": 0.6535433070866141,
"repo_name": "hynekcer/django-salesforce",
"id": "849ddaa077b7bd4622e582603175b6d412c23389",
"size": "1651",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/test_mixin/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1161"
},
{
"name": "Python",
"bytes": "507498"
},
{
"name": "Shell",
"bytes": "8401"
}
],
"symlink_target": ""
}
|
"""completed bool on logs
Revision ID: 1455d9c95734
Revises: 1ecc5d85f815
Create Date: 2015-09-25 13:34:37.684253
"""
# revision identifiers, used by Alembic.
revision = '1455d9c95734'
down_revision = '1ecc5d85f815'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key(None, 'administrators', 'schools', ['school_id'], ['id'])
op.create_foreign_key(None, 'coaches', 'schools', ['school_id'], ['id'])
op.add_column('logs', sa.Column('completed', sa.Boolean(), nullable=True))
op.create_index(op.f('ix_logs_completed'), 'logs', ['completed'], unique=False)
op.create_foreign_key(None, 'teachers', 'schools', ['school_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'teachers', type_='foreignkey')
op.drop_index(op.f('ix_logs_completed'), table_name='logs')
op.drop_column('logs', 'completed')
op.drop_constraint(None, 'coaches', type_='foreignkey')
op.drop_constraint(None, 'administrators', type_='foreignkey')
### end Alembic commands ###
|
{
"content_hash": "b25420525b0e33f3fa8a6865d6ffc2bf",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 83,
"avg_line_length": 35.029411764705884,
"alnum_prop": 0.670864819479429,
"repo_name": "jeffthemaximum/jeffPD",
"id": "594cb70d670b22d35463d042436490029ff3030e",
"size": "1191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/1455d9c95734_completed_bool_on_logs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "307884"
},
{
"name": "HTML",
"bytes": "54277"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "74812"
}
],
"symlink_target": ""
}
|
"""Tests for py_builtins module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from tensorflow.python.autograph.operators import data_structures
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class PyBuiltinsTest(test.TestCase):
def test_abs(self):
self.assertEqual(py_builtins.abs_(-1), 1)
with self.test_session() as sess:
t = py_builtins.abs_(constant_op.constant(-1))
self.assertEqual(sess.run(t), 1)
t = py_builtins.abs_(constant_op.constant([-1, 2, -3]))
self.assertAllEqual(sess.run(t), [1, 2, 3])
def test_float(self):
self.assertEqual(py_builtins.float_(10), 10.0)
self.assertEqual(py_builtins.float_('10.0'), 10.0)
with self.test_session() as sess:
t = py_builtins.float_(constant_op.constant(1, dtype=dtypes.int64))
self.assertEqual(sess.run(t), 1.0)
st = py_builtins.float_(constant_op.constant('1.0'))
self.assertEqual(sess.run(st), 1.0)
def test_int(self):
self.assertEqual(py_builtins.int_(10.0), 10)
self.assertEqual(py_builtins.int_('11', 2), 3)
with self.test_session() as sess:
t = py_builtins.int_(constant_op.constant(1, dtype=dtypes.float64))
self.assertEqual(sess.run(t), 1)
st = py_builtins.int_(constant_op.constant('1'))
self.assertEqual(sess.run(st), 1)
st = py_builtins.int_(constant_op.constant('1'), 10)
self.assertEqual(sess.run(st), 1)
def test_int_unsupported_base(self):
t = constant_op.constant(1, dtype=dtypes.float64)
with self.assertRaises(NotImplementedError):
py_builtins.int_(t, 2)
def test_len(self):
self.assertEqual(py_builtins.len_([1, 2, 3]), 3)
with self.test_session() as sess:
t = py_builtins.len_(constant_op.constant([[1], [2], [3]]))
self.assertEqual(t, 3)
ta = py_builtins.len_(tensor_array_ops.TensorArray(dtypes.int32, size=5))
self.assertEqual(sess.run(ta), 5)
tl = py_builtins.len_(data_structures.tf_tensor_list_new([3, 4, 5]))
self.assertEqual(sess.run(tl), 3)
def test_len_scalar(self):
with self.assertRaises(ValueError):
py_builtins.len_(constant_op.constant(1))
def test_len_dynamic_shape(self):
with self.test_session() as sess:
p = array_ops.placeholder(dtype=dtypes.int32, shape=None)
t = py_builtins.len_(p)
self.assertEqual(sess.run(t, {p: [1, 2, 3]}), 3)
with self.assertRaises(errors_impl.InvalidArgumentError):
t = py_builtins.len_(p)
sess.run(t, {p: 1})
def test_print_tensors(self):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
with self.test_session() as sess:
sess.run(py_builtins.print_(constant_op.constant('test message'), 1))
self.assertEqual(out_capturer.getvalue(), 'test message 1\n')
finally:
sys.stdout = sys.__stdout__
def test_print_complex(self):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
with self.test_session() as sess:
sess.run(
py_builtins.print_(constant_op.constant('test message'), [1, 2]))
self.assertEqual(out_capturer.getvalue(), 'test message [1, 2]\n')
finally:
sys.stdout = sys.__stdout__
def test_range(self):
self.assertListEqual(list(py_builtins.range_(3)), [0, 1, 2])
self.assertListEqual(list(py_builtins.range_(1, 3)), [1, 2])
self.assertListEqual(list(py_builtins.range_(2, 0, -1)), [2, 1])
def test_range_tensor(self):
with self.test_session() as sess:
r = py_builtins.range_(constant_op.constant(3))
self.assertAllEqual(sess.run(r), [0, 1, 2])
r = py_builtins.range_(1, constant_op.constant(3))
self.assertAllEqual(sess.run(r), [1, 2])
r = py_builtins.range_(2, 0, constant_op.constant(-1))
self.assertAllEqual(sess.run(r), [2, 1])
if __name__ == '__main__':
test.main()
|
{
"content_hash": "4a5ffe11d696699426d54e3a7023f60b",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 79,
"avg_line_length": 36.24786324786325,
"alnum_prop": 0.6571563310539967,
"repo_name": "AnishShah/tensorflow",
"id": "a021263ffa8ae29dd04527823b2e3f372acc2f4d",
"size": "4930",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/autograph/operators/py_builtins_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "337393"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "48452986"
},
{
"name": "CMake",
"bytes": "195768"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1210238"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "834103"
},
{
"name": "Jupyter Notebook",
"bytes": "2584246"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52618"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40782103"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "458367"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import datetime
from flask import Markup, url_for
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, Date, Text
from sqlalchemy.orm import relationship
from app import db
from flask.ext.appbuilder.models.mixins import AuditMixin, BaseMixin, FileColumn, ImageColumn
from flask.ext.appbuilder.filemanager import ImageManager
from flask.ext.appbuilder import Model
class PersonGroup(Model):
id = Column(Integer, primary_key=True)
name = Column(String(50), unique = True, nullable=False)
address = Column(String(264))
phone1 = Column(String(20))
phone2 = Column(String(20))
taxid = Column(Integer)
notes = Column(Text())
def __repr__(self):
return self.name
class Person(Model):
id = Column(Integer, primary_key=True)
name = Column(String(150), unique = True, nullable=False)
address = Column(String(564))
birthday = Column(Date)
photo = Column(ImageColumn(thumbnail_size=(30, 30, True), size=(300, 300, True)))
personal_phone = Column(String(20))
personal_celphone = Column(String(20))
personal_email = Column(String(64))
notes = Column(Text())
business_function = Column(String(64))
business_phone = Column(String(20))
business_celphone = Column(String(20))
business_email = Column(String(64))
person_group_id = Column(Integer, ForeignKey('person_group.id'))
person_group = relationship("PersonGroup")
def photo_img(self):
im = ImageManager()
if self.photo:
return Markup('<a href="' + url_for('PersonModelView.show',pk=str(self.id)) +\
'" class="thumbnail"><img src="' + im.get_url(self.photo) +\
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="'+ url_for('PersonModelView.show',pk=str(self.id)) +\
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
if self.photo:
return Markup('<a href="' + url_for('PersonModelView.show',pk=str(self.id)) +\
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +\
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="'+ url_for('PersonModelView.show',pk=str(self.id)) +\
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
|
{
"content_hash": "7800e3fd2d8c6d8f4ca74941d2354ef4",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 104,
"avg_line_length": 42.61666666666667,
"alnum_prop": 0.6171294485725459,
"repo_name": "zhounanshu/Flask-AppBuilder",
"id": "c713a0dd50451f5ef64629935f11aa89cb4a2227",
"size": "2557",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/quickimages/app/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "47287"
},
{
"name": "HTML",
"bytes": "79464"
},
{
"name": "JavaScript",
"bytes": "159467"
},
{
"name": "Python",
"bytes": "434821"
},
{
"name": "Shell",
"bytes": "627"
}
],
"symlink_target": ""
}
|
"""
HVAC channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/integrations/zha/
"""
from __future__ import annotations
import asyncio
from collections import namedtuple
from typing import Any
from zigpy.exceptions import ZigbeeException
from zigpy.zcl.clusters import hvac
from zigpy.zcl.foundation import Status
from homeassistant.core import callback
from .. import registries, typing as zha_typing
from ..const import (
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_OP,
SIGNAL_ATTR_UPDATED,
)
from ..helpers import retryable_req
from .base import ZigbeeChannel
AttributeUpdateRecord = namedtuple("AttributeUpdateRecord", "attr_id, attr_name, value")
REPORT_CONFIG_CLIMATE = (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 25)
REPORT_CONFIG_CLIMATE_DEMAND = (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 5)
REPORT_CONFIG_CLIMATE_DISCRETE = (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 1)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.Dehumidification.cluster_id)
class Dehumidification(ZigbeeChannel):
"""Dehumidification channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.Fan.cluster_id)
class FanChannel(ZigbeeChannel):
"""Fan channel."""
_value_attribute = 0
REPORT_CONFIG = ({"attr": "fan_mode", "config": REPORT_CONFIG_OP},)
@property
def fan_mode(self) -> int | None:
"""Return current fan mode."""
return self.cluster.get("fan_mode")
async def async_set_speed(self, value) -> None:
"""Set the speed of the fan."""
try:
await self.cluster.write_attributes({"fan_mode": value})
except ZigbeeException as ex:
self.error("Could not set speed: %s", ex)
return
async def async_update(self) -> None:
"""Retrieve latest state."""
await self.get_attribute_value("fan_mode", from_cache=False)
@callback
def attribute_updated(self, attrid: int, value: Any) -> None:
"""Handle attribute update from fan cluster."""
attr_name = self.cluster.attributes.get(attrid, [attrid])[0]
self.debug(
"Attribute report '%s'[%s] = %s", self.cluster.name, attr_name, value
)
if attr_name == "fan_mode":
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", attrid, attr_name, value
)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.Pump.cluster_id)
class Pump(ZigbeeChannel):
"""Pump channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.Thermostat.cluster_id)
class ThermostatChannel(ZigbeeChannel):
"""Thermostat channel."""
def __init__(
self, cluster: zha_typing.ZigpyClusterType, ch_pool: zha_typing.ChannelPoolType
) -> None:
"""Init Thermostat channel instance."""
super().__init__(cluster, ch_pool)
self._init_attrs = {
"abs_min_heat_setpoint_limit": True,
"abs_max_heat_setpoint_limit": True,
"abs_min_cool_setpoint_limit": True,
"abs_max_cool_setpoint_limit": True,
"ctrl_seqe_of_oper": False,
"local_temp": False,
"max_cool_setpoint_limit": True,
"max_heat_setpoint_limit": True,
"min_cool_setpoint_limit": True,
"min_heat_setpoint_limit": True,
"occupancy": False,
"occupied_cooling_setpoint": False,
"occupied_heating_setpoint": False,
"pi_cooling_demand": False,
"pi_heating_demand": False,
"running_mode": False,
"running_state": False,
"system_mode": False,
"unoccupied_heating_setpoint": False,
"unoccupied_cooling_setpoint": False,
}
self._abs_max_cool_setpoint_limit = 3200 # 32C
self._abs_min_cool_setpoint_limit = 1600 # 16C
self._ctrl_seqe_of_oper = 0xFF
self._abs_max_heat_setpoint_limit = 3000 # 30C
self._abs_min_heat_setpoint_limit = 700 # 7C
self._running_mode = None
self._max_cool_setpoint_limit = None
self._max_heat_setpoint_limit = None
self._min_cool_setpoint_limit = None
self._min_heat_setpoint_limit = None
self._local_temp = None
self._occupancy = None
self._occupied_cooling_setpoint = None
self._occupied_heating_setpoint = None
self._pi_cooling_demand = None
self._pi_heating_demand = None
self._running_state = None
self._system_mode = None
self._unoccupied_cooling_setpoint = None
self._unoccupied_heating_setpoint = None
self._report_config = [
{"attr": "local_temp", "config": REPORT_CONFIG_CLIMATE},
{"attr": "occupied_cooling_setpoint", "config": REPORT_CONFIG_CLIMATE},
{"attr": "occupied_heating_setpoint", "config": REPORT_CONFIG_CLIMATE},
{"attr": "unoccupied_cooling_setpoint", "config": REPORT_CONFIG_CLIMATE},
{"attr": "unoccupied_heating_setpoint", "config": REPORT_CONFIG_CLIMATE},
{"attr": "running_mode", "config": REPORT_CONFIG_CLIMATE},
{"attr": "running_state", "config": REPORT_CONFIG_CLIMATE_DEMAND},
{"attr": "system_mode", "config": REPORT_CONFIG_CLIMATE},
{"attr": "occupancy", "config": REPORT_CONFIG_CLIMATE_DISCRETE},
{"attr": "pi_cooling_demand", "config": REPORT_CONFIG_CLIMATE_DEMAND},
{"attr": "pi_heating_demand", "config": REPORT_CONFIG_CLIMATE_DEMAND},
]
@property
def abs_max_cool_setpoint_limit(self) -> int:
"""Absolute maximum cooling setpoint."""
return self._abs_max_cool_setpoint_limit
@property
def abs_min_cool_setpoint_limit(self) -> int:
"""Absolute minimum cooling setpoint."""
return self._abs_min_cool_setpoint_limit
@property
def abs_max_heat_setpoint_limit(self) -> int:
"""Absolute maximum heating setpoint."""
return self._abs_max_heat_setpoint_limit
@property
def abs_min_heat_setpoint_limit(self) -> int:
"""Absolute minimum heating setpoint."""
return self._abs_min_heat_setpoint_limit
@property
def ctrl_seqe_of_oper(self) -> int:
"""Control Sequence of operations attribute."""
return self._ctrl_seqe_of_oper
@property
def max_cool_setpoint_limit(self) -> int:
"""Maximum cooling setpoint."""
if self._max_cool_setpoint_limit is None:
return self.abs_max_cool_setpoint_limit
return self._max_cool_setpoint_limit
@property
def min_cool_setpoint_limit(self) -> int:
"""Minimum cooling setpoint."""
if self._min_cool_setpoint_limit is None:
return self.abs_min_cool_setpoint_limit
return self._min_cool_setpoint_limit
@property
def max_heat_setpoint_limit(self) -> int:
"""Maximum heating setpoint."""
if self._max_heat_setpoint_limit is None:
return self.abs_max_heat_setpoint_limit
return self._max_heat_setpoint_limit
@property
def min_heat_setpoint_limit(self) -> int:
"""Minimum heating setpoint."""
if self._min_heat_setpoint_limit is None:
return self.abs_min_heat_setpoint_limit
return self._min_heat_setpoint_limit
@property
def local_temp(self) -> int | None:
"""Thermostat temperature."""
return self._local_temp
@property
def occupancy(self) -> int | None:
"""Is occupancy detected."""
return self._occupancy
@property
def occupied_cooling_setpoint(self) -> int | None:
"""Temperature when room is occupied."""
return self._occupied_cooling_setpoint
@property
def occupied_heating_setpoint(self) -> int | None:
"""Temperature when room is occupied."""
return self._occupied_heating_setpoint
@property
def pi_cooling_demand(self) -> int:
"""Cooling demand."""
return self._pi_cooling_demand
@property
def pi_heating_demand(self) -> int:
"""Heating demand."""
return self._pi_heating_demand
@property
def running_mode(self) -> int | None:
"""Thermostat running mode."""
return self._running_mode
@property
def running_state(self) -> int | None:
"""Thermostat running state, state of heat, cool, fan relays."""
return self._running_state
@property
def system_mode(self) -> int | None:
"""System mode."""
return self._system_mode
@property
def unoccupied_cooling_setpoint(self) -> int | None:
"""Temperature when room is not occupied."""
return self._unoccupied_cooling_setpoint
@property
def unoccupied_heating_setpoint(self) -> int | None:
"""Temperature when room is not occupied."""
return self._unoccupied_heating_setpoint
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute update cluster."""
attr_name = self.cluster.attributes.get(attrid, [attrid])[0]
self.debug(
"Attribute report '%s'[%s] = %s", self.cluster.name, attr_name, value
)
setattr(self, f"_{attr_name}", value)
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
AttributeUpdateRecord(attrid, attr_name, value),
)
async def _chunk_attr_read(self, attrs, cached=False):
chunk, attrs = attrs[:4], attrs[4:]
while chunk:
res, fail = await self.cluster.read_attributes(chunk, allow_cache=cached)
self.debug("read attributes: Success: %s. Failed: %s", res, fail)
for attr in chunk:
self._init_attrs.pop(attr, None)
if attr in fail:
continue
if isinstance(attr, str):
setattr(self, f"_{attr}", res[attr])
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
AttributeUpdateRecord(None, attr, res[attr]),
)
chunk, attrs = attrs[:4], attrs[4:]
async def configure_reporting(self):
"""Configure attribute reporting for a cluster.
This also swallows DeliveryError exceptions that are thrown when
devices are unreachable.
"""
kwargs = {}
if self.cluster.cluster_id >= 0xFC00 and self._ch_pool.manufacturer_code:
kwargs["manufacturer"] = self._ch_pool.manufacturer_code
chunk, rest = self._report_config[:4], self._report_config[4:]
while chunk:
attrs = {record["attr"]: record["config"] for record in chunk}
try:
res = await self.cluster.configure_reporting_multiple(attrs, **kwargs)
self._configure_reporting_status(attrs, res[0])
except (ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"failed to set reporting on '%s' cluster for: %s",
self.cluster.ep_attribute,
str(ex),
)
break
chunk, rest = rest[:4], rest[4:]
def _configure_reporting_status(
self, attrs: dict[int | str, tuple], res: list | tuple
) -> None:
"""Parse configure reporting result."""
if not isinstance(res, list):
# assume default response
self.debug(
"attr reporting for '%s' on '%s': %s",
attrs,
self.name,
res,
)
return
if res[0].status == Status.SUCCESS and len(res) == 1:
self.debug(
"Successfully configured reporting for '%s' on '%s' cluster: %s",
attrs,
self.name,
res,
)
return
failed = [
self.cluster.attributes.get(r.attrid, [r.attrid])[0]
for r in res
if r.status != Status.SUCCESS
]
attrs = {self.cluster.attributes.get(r, [r])[0] for r in attrs}
self.debug(
"Successfully configured reporting for '%s' on '%s' cluster",
attrs - set(failed),
self.name,
)
self.debug(
"Failed to configure reporting for '%s' on '%s' cluster: %s",
failed,
self.name,
res,
)
@retryable_req(delays=(1, 1, 3))
async def async_initialize_channel_specific(self, from_cache: bool) -> None:
"""Initialize channel."""
cached = [a for a, cached in self._init_attrs.items() if cached]
uncached = [a for a, cached in self._init_attrs.items() if not cached]
await self._chunk_attr_read(cached, cached=True)
await self._chunk_attr_read(uncached, cached=False)
async def async_set_operation_mode(self, mode) -> bool:
"""Set Operation mode."""
if not await self.write_attributes({"system_mode": mode}):
self.debug("couldn't set '%s' operation mode", mode)
return False
self._system_mode = mode
self.debug("set system to %s", mode)
return True
async def async_set_heating_setpoint(
self, temperature: int, is_away: bool = False
) -> bool:
"""Set heating setpoint."""
if is_away:
data = {"unoccupied_heating_setpoint": temperature}
else:
data = {"occupied_heating_setpoint": temperature}
if not await self.write_attributes(data):
self.debug("couldn't set heating setpoint")
return False
if is_away:
self._unoccupied_heating_setpoint = temperature
else:
self._occupied_heating_setpoint = temperature
self.debug("set heating setpoint to %s", temperature)
return True
async def async_set_cooling_setpoint(
self, temperature: int, is_away: bool = False
) -> bool:
"""Set cooling setpoint."""
if is_away:
data = {"unoccupied_cooling_setpoint": temperature}
else:
data = {"occupied_cooling_setpoint": temperature}
if not await self.write_attributes(data):
self.debug("couldn't set cooling setpoint")
return False
if is_away:
self._unoccupied_cooling_setpoint = temperature
else:
self._occupied_cooling_setpoint = temperature
self.debug("set cooling setpoint to %s", temperature)
return True
async def get_occupancy(self) -> bool | None:
"""Get unreportable occupancy attribute."""
try:
res, fail = await self.cluster.read_attributes(["occupancy"])
self.debug("read 'occupancy' attr, success: %s, fail: %s", res, fail)
if "occupancy" not in res:
return None
self._occupancy = res["occupancy"]
return bool(self.occupancy)
except ZigbeeException as ex:
self.debug("Couldn't read 'occupancy' attribute: %s", ex)
async def write_attributes(self, data, **kwargs):
"""Write attributes helper."""
try:
res = await self.cluster.write_attributes(data, **kwargs)
except ZigbeeException as exc:
self.debug("couldn't write %s: %s", data, exc)
return False
self.debug("wrote %s attrs, Status: %s", data, res)
return self.check_result(res)
@staticmethod
def check_result(res: list) -> bool:
"""Normalize the result."""
if not isinstance(res, list):
return False
return all(record.status == Status.SUCCESS for record in res[0])
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.UserInterface.cluster_id)
class UserInterface(ZigbeeChannel):
"""User interface (thermostat) channel."""
|
{
"content_hash": "27be89f43620385239f273589f9e5174",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 88,
"avg_line_length": 36.08783783783784,
"alnum_prop": 0.5932097609686077,
"repo_name": "sander76/home-assistant",
"id": "6b0cd9e5e28f8fa388b2cded25a60e805f885241",
"size": "16023",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zha/core/channels/hvac.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import six
from syn.five import xrange
from nose.tools import assert_raises
from syn.type.a import (Type, ValuesType, MultiType, TypeType, AnyType,
TypeExtension, Set, Schema)
from syn.base_utils import is_hashable, feq
from syn.base_utils import ngzwarn, on_error, elog
from syn.globals import TEST_SAMPLES as SAMPLES
SAMPLES //= 2
SAMPLES = max(SAMPLES, 1)
ngzwarn(SAMPLES, 'SAMPLES')
#-------------------------------------------------------------------------------
# Type
def test_type():
t = Type()
assert t == Type()
assert t != 1
assert is_hashable(t)
assert_raises(NotImplementedError, t.check, 1)
assert_raises(NotImplementedError, t.coerce, 1)
assert_raises(NotImplementedError, t.display)
assert_raises(NotImplementedError, t.enumeration_value, 1)
assert_raises(NotImplementedError, t.generate)
assert_raises(NotImplementedError, t.rst)
assert_raises(NotImplementedError, t.validate, 1)
#-------------------------------------------------------------------------------
# AnyType
def test_anytype():
t = AnyType()
assert t == AnyType()
t.check(1)
assert t.coerce(1) == 1
assert t.display() == t.rst() == 'any'
t.validate(1)
#-------------------------------------------------------------------------------
# TypeType
class Foo(object):
def __init__(self, value):
self.value = value
def validate(self):
assert self.value > 5
class Bar(Foo):
@classmethod
def coerce(cls, value):
return Bar(value + 1)
def test_typetype():
t = TypeType(int)
assert t.type is int
assert not t.call_coerce
assert not t.call_validate
assert t == TypeType(int)
assert t != TypeType(float)
t.check(1)
assert_raises(TypeError, t.check, 1.2)
assert t.query(1)
assert not t.query(1.2)
res, e = t.query_exception(1)
assert res
assert e is None
res, e = t.query_exception(1.2)
assert not res
assert isinstance(e, TypeError)
assert t.coerce(1.2) == 1
assert_raises(TypeError, t.coerce, 'abc')
assert t.display() == 'int'
assert t.rst() == '*int*'
t.validate(1)
assert_raises(TypeError, t.validate, 1.2)
f = TypeType(Foo)
assert f.type is Foo
assert not f.call_coerce
assert f.call_validate
f.check(Foo(2))
assert_raises(TypeError, f.check, 2)
f1 = f.coerce(1)
assert isinstance(f1, Foo)
assert f1.value == 1
assert_raises(TypeError, f.validate, 6)
assert_raises(AssertionError, f.validate, Foo(5))
assert f.display() == 'Foo'
assert f.rst() == '*Foo*'
f.validate(Foo(6))
b = TypeType(Bar)
assert b.type is Bar
assert b.call_coerce
assert b.call_validate
b.check(Bar(2))
assert_raises(TypeError, b.check, Foo(2))
b1 = b.coerce(1)
assert isinstance(b1, Bar)
assert b1.value == 2
assert_raises(TypeError, b.validate, 6)
assert_raises(AssertionError, b.validate, Bar(5))
b.validate(Bar(6))
#-------------------------------------------------------------------------------
# ValuesType
def test_valuestype():
t = ValuesType({1, 1.2, u'b'})
assert t == ValuesType([1, 1.2, u'b'])
assert t != ValuesType([1, 1.3, u'b'])
t.check(1)
t.check(1.2)
t.check(u'b')
assert_raises(TypeError, t.check, 2)
assert t.coerce(1) == 1
assert_raises(TypeError, t.coerce, 2)
t.validate(1)
assert_raises(TypeError, t.validate, 2)
t = ValuesType({1, 1.2})
assert t.display() in ('[1, 1.2]', '[1.2, 1]')
assert t.rst() in ('[1, 1.2]', '[1.2, 1]')
assert t.display() == t.rst()
#-------------------------------------------------------------------------------
# MultiType
def test_multitype():
import math
t = MultiType((int, float))
assert t == MultiType((int, float))
assert t != MultiType((int, str))
assert t.is_typelist
assert t.query(1)
assert t.query(1.2)
assert not t.query(u'a')
assert t.coerce(1.2) == 1
assert t.coerce(u'inf') == float(u'inf')
assert_raises(TypeError, t.coerce, u'abc')
assert t.display() == 'int | float'
assert t.rst() == '*int* | *float*'
t.validate(1)
assert_raises(TypeError, t.validate, u'abc')
t = MultiType((int, Foo, ValuesType([math.pi, math.e])))
assert not t.is_typelist
assert t.query(1)
assert t.query(Foo(2))
assert t.query(math.pi)
assert not t.query(3.4)
assert t.coerce(1) == 1
f = t.coerce(u'abc')
assert isinstance(f, Foo)
assert f.value == u'abc'
t.validate(1)
t.validate(Foo(6))
assert_raises(TypeError, t.validate, 3.4)
assert_raises(AssertionError, t.validate, Foo(5))
t = MultiType(six.string_types)
t.validate('abc')
t.validate('abc')
t.validate(u'abc')
assert_raises(TypeError, t.validate, 3.4)
#-------------------------------------------------------------------------------
# Set
def test_set():
from syn.sets.b import Range
t = Set(Range(1, 5))
assert t == Set(Range(1, 5))
assert t != Set(Range(0, 5))
assert Type.dispatch(t) is t
assert t.query(1)
assert not t.query(0)
t.validate(1)
assert_raises(TypeError, t.validate, 0)
assert t.coerce(1) == 1
assert_raises(TypeError, t.coerce, 0)
s = set(xrange(1, 6))
for k in xrange(SAMPLES):
val = t.generate()
with on_error(elog, s.__contains__, (val,)):
assert val in s
assert t.display() == t.rst() == '<Set>'
#-------------------------------------------------------------------------------
# Schema
def test_schema():
from syn.schema.b.sequence import Sequence
from syn.type.a import List
t = Schema(Sequence(1, 2, 3))
assert t == Schema(Sequence(1, 2, 3))
assert t != Schema(Sequence(1, 3, 2))
assert Type.dispatch(t) is t
assert t.query([1, 2, 3])
assert not t.query([1, 3, 2])
t.validate([1, 2, 3])
assert_raises(TypeError, t.validate, [1, 3, 2])
assert t.generate() == [1, 2, 3]
assert t.display() == t.rst() == '<Schema>'
assert t.coerce(1) == 1
t = Schema(Sequence(int, float))
assert t.query([1, 2.3])
assert not t.query([1, 2])
val = t.generate()
assert t.query(val)
t = Schema(Sequence(int, List(float)))
assert not t.query([1, 1.2])
assert not t.query([1, [1, 2]])
assert t.query([1, [1.2, 3.4]])
assert t.query([1, []])
val = t.generate()
assert t.query(val)
#-------------------------------------------------------------------------------
# dispatch_type
def test_dispatch_type():
t = Type.dispatch(None)
assert isinstance(t, AnyType)
t = Type.dispatch(int)
assert isinstance(t, TypeType)
assert t.type is int
t = Type.dispatch((int, float))
assert isinstance(t, MultiType)
assert t.typelist == (int, float)
t = Type.dispatch([1, 2])
assert isinstance(t, ValuesType)
assert t.values == [1, 2]
t = Type.dispatch(six.string_types)
assert isinstance(t, TypeType)
t.validate('abc')
t.validate(u'abc')
assert_raises(TypeError, t.validate, 1)
te = TypeExtension()
assert Type.dispatch(te) is te
assert Type.dispatch(TypeExtension) is not TypeExtension
assert isinstance(Type.dispatch(TypeExtension), TypeExtension)
assert_raises(TypeError, Type.dispatch, 1)
assert_raises(TypeError, Type.dispatch, b'abc')
assert_raises(TypeError, Type.dispatch, u'abc')
#-------------------------------------------------------------------------------
# Test generation
def test_generation():
from syn.base_utils.rand import PRIMITIVE_TYPES
from syn.types import Type as Type_
anys = [AnyType().generate() for k in xrange(SAMPLES)]
if len(anys) > 2:
assert any(x is not None for x in anys)
class Foo(object): pass
assert isinstance(AnyType().generate(types=[Foo]), tuple(PRIMITIVE_TYPES))
class Bar(object):
@classmethod
def _generate(cls, **kwargs):
return cls()
class BarType(Type_): type = Bar
assert isinstance(TypeType(int).generate(), int)
assert isinstance(TypeType(Bar).generate(), Bar)
assert_raises(NotImplementedError, TypeType(Foo).generate)
assert ValuesType([1, 2, 3]).generate() in {1, 2, 3}
t = MultiType([int, float])
assert isinstance(t.generate(), (int, float))
assert isinstance(t.generate(exclude_types=[float]), int)
#-------------------------------------------------------------------------------
# Test enumeration values
def test_enumeration_values():
assert TypeType(int).enumeration_value(0) == 0
v = ValuesType([1, 2, 3])
assert v.enumeration_value(0) == 1
assert v.enumeration_value(1) == 2
assert v.enumeration_value(2) == 3
assert v.enumeration_value(3) == 1
m = MultiType([int, float])
assert m.enumeration_value(0) == 0
assert feq(m.enumeration_value(1), 0.1)
assert m.enumeration_value(2) == 2
assert feq(m.enumeration_value(3), 0.3)
anys = [AnyType().enumeration_value(k) for k in xrange(SAMPLES)]
if len(anys) > 2:
assert any(x is not None for x in anys)
class Foo(object): pass
assert AnyType().enumeration_value(0, types=[Foo]) == 0
#-------------------------------------------------------------------------------
if __name__ == '__main__': # pragma: no cover
from syn.base_utils import run_all_tests
run_all_tests(globals(), verbose=True, print_errors=False)
|
{
"content_hash": "be2c6c0a827c2bb60d937a86c6f57d74",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 80,
"avg_line_length": 27.568115942028985,
"alnum_prop": 0.561980864262433,
"repo_name": "mbodenhamer/syn",
"id": "9c7c66bfd8bd8d92b963b2f14a13506b6cf6434b",
"size": "9511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "syn/type/a/tests/test_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3447"
},
{
"name": "Python",
"bytes": "571295"
},
{
"name": "Shell",
"bytes": "231"
}
],
"symlink_target": ""
}
|
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from json import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a): raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, hard=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
''' Turn all capturing groups in a regular expression pattern into
non-capturing groups. '''
if '(' not in p: return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new rule or replace the target for an existing rule. '''
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
target = None
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
''' Return the callback. If the callback is a decorated function, try to
recover the original function. '''
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
''' Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. '''
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
''' Lookup a config field and return its value, first checking the
route.config, then route.app.config.'''
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
''' Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
'''
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
''' Remove a callback from a hook. '''
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
''' Trigger a hook and return a list of results. '''
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
try:
_raise(*exc_info)
finally:
exc_info = None
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def run(self, **kwargs):
''' Calls :func:`run` with the same parameters. '''
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
def __enter__(self):
''' Use this application as default for all module-level shortcuts. '''
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', ''):
return json_loads(self._get_body_string())
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
def _iter_chunked(self, read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
body_iter = self._iter_chunked if self.chunked else self._iter_body
read_func = self.environ['wsgi.input'].read
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
''' read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. '''
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request to large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request to large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
''' True if Chunked transfer encoding was. '''
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
''' The Content-Type header as a lowercase-string (default: empty). '''
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
''' Returns a copy of self. '''
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for name, vals in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(self):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(self, value): ls.var = value
def fdel(self): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
''' A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). '''
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
''' A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
'''
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, response):
response._status_code = self._status_code
response._status_line = self._status_line
response._headers = self._headers
response._cookies = self._cookies
response.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, route):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
''' Return the value as a unicode string, or the default. '''
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
'''
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_config(self, filename):
''' Load values from an *.ini style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
'''
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
''' Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
'''
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
''' If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` '''
prefix = ''
if a and isinstance(a[0], str):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix+key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
''' Return the value of a meta field for a key. '''
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
''' Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. '''
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
''' Return an iterable of meta field names defined for a key. '''
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
''' This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). '''
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
''' This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
'''
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = open
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
''' Iterate over all existing files in all registered paths. '''
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
''' Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. '''
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
''' Find a resource and return a file object, or raise IOError. '''
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
''' Wrapper for file uploads. '''
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
''' Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
'''
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2**16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2**16):
''' Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
'''
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
''' Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n',' ')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
from wsgiref.simple_server import make_server
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
srv = make_server(self.host, self.port, app, server_cls, handler_cls)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port,address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self,handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
try:
wsgi.server(listen((self.host, self.port)), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO':GeventSocketIOServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
lockfile = None
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.', True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source or open(self.filename, 'rb').read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError): pass
class StplParser(object):
''' Parser for stpl templates. '''
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 5: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 6: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=$))'
# 7: And finally, a single newline. The 8th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))'
# Match inline statements (may contain python strings)
_re_inl = '%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
def get_syntax(self):
''' Tokens as a space separated string (default: <% %> % {{ }}) '''
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
self.offset += m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+line+sep)
self.offset += len(line+sep)+1
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment = '', ''
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups()
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
def process_inline(self, chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, HTTP_CODES, request, touni
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0], host=host, port=int(port), server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
|
{
"content_hash": "771dff95ca78fb2480f88b859022eec0",
"timestamp": "",
"source": "github",
"line_count": 3567,
"max_line_length": 103,
"avg_line_length": 39.62573591253154,
"alnum_prop": 0.5808412041458841,
"repo_name": "Gehn/JustABlog",
"id": "4da3557f2f1e15c288d06b2162a9f9a5f53c406c",
"size": "141391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bottle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176113"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import traceback
import logging
from time import time
from datetime import datetime
from random import choice
from io import open
import click
import sqlparse
from prompt_toolkit import CommandLineInterface, Application, AbortAction
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.shortcuts import create_default_layout, create_eventloop
from prompt_toolkit.document import Document
from prompt_toolkit.filters import Always, HasFocus, IsDone
from prompt_toolkit.layout.processors import (HighlightMatchingBracketProcessor,
ConditionalProcessor)
from prompt_toolkit.history import FileHistory
from pygments.token import Token
from configobj import ConfigObj, ConfigObjError
from .packages.tabulate import tabulate, table_formats
from .packages.expanded import expanded_table
from .packages.special.main import (COMMANDS, NO_QUERY)
import mycli.packages.special as special
from .sqlcompleter import SQLCompleter
from .clitoolbar import create_toolbar_tokens_func
from .clistyle import style_factory
from .sqlexecute import SQLExecute
from .clibuffer import CLIBuffer
from .config import (write_default_config, load_config, get_mylogin_cnf_path,
open_mylogin_cnf)
from .key_bindings import mycli_bindings
from .encodingutils import utf8tounicode
from .lexer import MyCliLexer
from .__init__ import __version__
click.disable_unicode_literals_warning = True
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from pymysql import OperationalError
from collections import namedtuple
# Query tuples are used for maintaining history
Query = namedtuple('Query', ['query', 'successful', 'mutating'])
PACKAGE_ROOT = os.path.dirname(__file__)
class MyCli(object):
default_prompt = '\\t \\u@\\h:\\d> '
defaults_suffix = None
# In order of being loaded. Files lower in list override earlier ones.
cnf_files = [
'/etc/my.cnf',
'/etc/mysql/my.cnf',
'/usr/local/etc/my.cnf',
os.path.expanduser('~/.my.cnf')
]
def __init__(self, sqlexecute=None, prompt=None,
logfile=None, defaults_suffix=None, defaults_file=None,
login_path=None):
self.sqlexecute = sqlexecute
self.logfile = logfile
self.defaults_suffix = defaults_suffix
self.login_path = login_path
# self.cnf_files is a class variable that stores the list of mysql
# config files to read in at launch.
# If defaults_file is specified then override the class variable with
# defaults_file.
if defaults_file:
self.cnf_files = [defaults_file]
default_config = os.path.join(PACKAGE_ROOT, 'myclirc')
write_default_config(default_config, '~/.myclirc')
# Load config.
c = self.config = load_config('~/.myclirc', default_config)
self.multi_line = c['main'].as_bool('multi_line')
self.destructive_warning = c['main'].as_bool('destructive_warning')
self.key_bindings = c['main']['key_bindings']
special.set_timing_enabled(c['main'].as_bool('timing'))
self.table_format = c['main']['table_format']
self.syntax_style = c['main']['syntax_style']
self.cli_style = c['colors']
self.wider_completion_menu = c['main'].as_bool('wider_completion_menu')
self.logger = logging.getLogger(__name__)
self.initialize_logging()
prompt_cnf = self.read_my_cnf_files(self.cnf_files, ['prompt'])['prompt']
self.prompt_format = prompt or prompt_cnf or c['main']['prompt'] or \
self.default_prompt
self.query_history = []
# Initialize completer.
smart_completion = c['main'].as_bool('smart_completion')
completer = SQLCompleter(smart_completion)
self.completer = completer
# Register custom special commands.
self.register_special_commands()
# Load .mylogin.cnf if it exists.
mylogin_cnf_path = get_mylogin_cnf_path()
if mylogin_cnf_path:
mylogin_cnf = open_mylogin_cnf(mylogin_cnf_path)
if mylogin_cnf_path and mylogin_cnf:
# .mylogin.cnf gets read last, even if defaults_file is specified.
self.cnf_files.append(mylogin_cnf)
elif mylogin_cnf_path and not mylogin_cnf:
# There was an error reading the login path file.
print('Error: Unable to read login path file.')
def register_special_commands(self):
special.register_special_command(self.change_db, 'use',
'\\u', 'Change to a new database.', aliases=('\\u',))
special.register_special_command(self.change_db, 'connect',
'\\r', 'Reconnect to the database. Optional database argument.',
aliases=('\\r', ))
special.register_special_command(self.refresh_dynamic_completions, 'rehash',
'\\#', 'Refresh auto-completions.', arg_type=NO_QUERY, aliases=('\\#',))
special.register_special_command(self.change_table_format, 'tableformat',
'\\T', 'Change Table Type.', aliases=('\\T',), case_sensitive=True)
special.register_special_command(self.execute_from_file, 'source', '\\. filename',
'Execute commands from file.', aliases=('\\.',))
def change_table_format(self, arg, **_):
if not arg in table_formats():
msg = "Table type %s not yet implemented. Allowed types:" % arg
for table_type in table_formats():
msg += "\n\t%s" % table_type
yield (None, None, None, msg)
else:
self.table_format = arg
yield (None, None, None, "Changed table Type to %s" % self.table_format)
def change_db(self, arg, **_):
if arg is None:
self.sqlexecute.connect()
else:
self.sqlexecute.connect(database=arg)
yield (None, None, None, 'You are now connected to database "%s" as '
'user "%s"' % (self.sqlexecute.dbname, self.sqlexecute.user))
def execute_from_file(self, arg, **_):
if not arg:
message = 'Missing required argument, filename.'
return [(None, None, None, message)]
try:
with open(os.path.expanduser(arg), encoding='utf-8') as f:
query = f.read()
except IOError as e:
return [(None, None, None, str(e))]
return self.sqlexecute.run(query)
def initialize_logging(self):
log_file = self.config['main']['log_file']
log_level = self.config['main']['log_level']
level_map = {'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG
}
handler = logging.FileHandler(os.path.expanduser(log_file))
formatter = logging.Formatter(
'%(asctime)s (%(process)d/%(threadName)s) '
'%(name)s %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root_logger = logging.getLogger('mycli')
root_logger.addHandler(handler)
root_logger.setLevel(level_map[log_level.upper()])
root_logger.debug('Initializing mycli logging.')
root_logger.debug('Log file %r.', log_file)
def connect_uri(self, uri):
uri = urlparse(uri)
database = uri.path[1:] # ignore the leading fwd slash
self.connect(database, uri.username, uri.password, uri.hostname,
uri.port)
def read_my_cnf_files(self, files, keys):
"""
Reads a list of config files and merges them. The last one will win.
:param files: list of files to read
:param keys: list of keys to retrieve
:returns: tuple, with None for missing keys.
"""
cnf = ConfigObj()
for _file in files:
try:
cnf.merge(ConfigObj(_file, interpolation=False))
except ConfigObjError as e:
self.logger.error('Error parsing %r.', _file)
self.logger.error('Recovering partially parsed config values.')
cnf.merge(e.config)
pass
sections = ['client']
if self.login_path and self.login_path != 'client':
sections.append(self.login_path)
if self.defaults_suffix:
sections.extend([sect + self.defaults_suffix for sect in sections])
def get(key):
result = None
for sect in cnf:
if sect in sections and key in cnf[sect]:
result = cnf[sect][key]
return result
return dict([(x, get(x)) for x in keys])
def connect(self, database='', user='', passwd='', host='', port='',
socket='', charset=''):
cnf = {'database': None,
'user': None,
'password': None,
'host': None,
'port': None,
'socket': None,
'default-character-set': None}
cnf = self.read_my_cnf_files(self.cnf_files, cnf.keys())
# Fall back to config values only if user did not specify a value.
database = database or cnf['database']
if port or host:
socket = ''
else:
socket = socket or cnf['socket']
user = user or cnf['user'] or os.getenv('USER')
host = host or cnf['host'] or 'localhost'
port = int(port or cnf['port'] or 3306)
passwd = passwd or cnf['password']
charset = charset or cnf['default-character-set'] or 'utf8'
# Connect to the database.
try:
try:
sqlexecute = SQLExecute(database, user, passwd, host, port,
socket, charset)
except OperationalError as e:
if ('Access denied for user' in e.args[1]):
passwd = click.prompt('Password', hide_input=True,
show_default=False, type=str)
sqlexecute = SQLExecute(database, user, passwd, host, port,
socket, charset)
else:
raise e
except Exception as e: # Connecting to a database could fail.
self.logger.debug('Database connection failed: %r.', e)
self.logger.error("traceback: %r", traceback.format_exc())
self.output(str(e), err=True, fg='red')
exit(1)
self.sqlexecute = sqlexecute
def handle_editor_command(self, cli, document):
"""
Editor command is any query that is prefixed or suffixed
by a '\e'. The reason for a while loop is because a user
might edit a query multiple times.
For eg:
"select * from \e"<enter> to edit it in vim, then come
back to the prompt with the edited query "select * from
blah where q = 'abc'\e" to edit it again.
:param cli: CommandLineInterface
:param document: Document
:return: Document
"""
while special.editor_command(document.text):
filename = special.get_filename(document.text)
sql, message = special.open_external_editor(filename,
sql=document.text)
if message:
# Something went wrong. Raise an exception and bail.
raise RuntimeError(message)
cli.current_buffer.document = Document(sql, cursor_position=len(sql))
document = cli.run(False)
continue
return document
def run_cli(self):
sqlexecute = self.sqlexecute
logger = self.logger
original_less_opts = self.adjust_less_opts()
self.set_pager_from_config()
self.initialize_completions()
completer = self.completer
def set_key_bindings(value):
if value not in ('emacs', 'vi'):
value = 'emacs'
self.key_bindings = value
project_root = os.path.dirname(PACKAGE_ROOT)
author_file = os.path.join(project_root, 'AUTHORS')
sponsor_file = os.path.join(project_root, 'SPONSORS')
key_binding_manager = mycli_bindings(get_key_bindings=lambda: self.key_bindings,
set_key_bindings=set_key_bindings)
print('Version:', __version__)
print('Chat: https://gitter.im/dbcli/mycli')
print('Mail: https://groups.google.com/forum/#!forum/mycli-users')
print('Home: http://mycli.net')
print('Thanks to the contributor -', thanks_picker([author_file, sponsor_file]))
def prompt_tokens(cli):
return [(Token.Prompt, self.get_prompt(self.prompt_format))]
get_toolbar_tokens = create_toolbar_tokens_func(lambda: self.key_bindings)
layout = create_default_layout(lexer=MyCliLexer,
reserve_space_for_menu=True,
multiline=True,
get_prompt_tokens=prompt_tokens,
get_bottom_toolbar_tokens=get_toolbar_tokens,
display_completions_in_columns=self.wider_completion_menu,
extra_input_processors=[
ConditionalProcessor(
processor=HighlightMatchingBracketProcessor(chars='[](){}'),
filter=HasFocus(DEFAULT_BUFFER) & ~IsDone()),
])
buf = CLIBuffer(always_multiline=self.multi_line, completer=completer,
history=FileHistory(os.path.expanduser('~/.mycli-history')),
complete_while_typing=Always())
application = Application(style=style_factory(self.syntax_style, self.cli_style),
layout=layout, buffer=buf,
key_bindings_registry=key_binding_manager.registry,
on_exit=AbortAction.RAISE_EXCEPTION,
ignore_case=True)
cli = CommandLineInterface(application=application, eventloop=create_eventloop())
try:
while True:
document = cli.run()
special.set_expanded_output(False)
# The reason we check here instead of inside the sqlexecute is
# because we want to raise the Exit exception which will be
# caught by the try/except block that wraps the
# sqlexecute.run() statement.
if quit_command(document.text):
raise EOFError
try:
document = self.handle_editor_command(cli, document)
except RuntimeError as e:
logger.error("sql: %r, error: %r", document.text, e)
logger.error("traceback: %r", traceback.format_exc())
self.output(str(e), err=True, fg='red')
continue
if self.destructive_warning:
destroy = confirm_destructive_query(document.text)
if destroy is None:
pass # Query was not destructive. Nothing to do here.
elif destroy is True:
self.output('Your call!')
else:
self.output('Wise choice!')
continue
# Keep track of whether or not the query is mutating. In case
# of a multi-statement query, the overall query is considered
# mutating if any one of the component statements is mutating
mutating = False
try:
logger.debug('sql: %r', document.text)
if self.logfile:
self.logfile.write('\n# %s\n' % datetime.now())
self.logfile.write(document.text)
self.logfile.write('\n')
successful = False
start = time()
res = sqlexecute.run(document.text)
duration = time() - start
successful = True
output = []
total = 0
for title, cur, headers, status in res:
logger.debug("headers: %r", headers)
logger.debug("rows: %r", cur)
logger.debug("status: %r", status)
start = time()
threshold = 1000
if (is_select(status) and
cur and cur.rowcount > threshold):
self.output('The result set has more than %s rows.'
% threshold, fg='red')
if not click.confirm('Do you want to continue?'):
self.output("Aborted!", err=True, fg='red')
break
output.extend(format_output(title, cur, headers,
status, self.table_format))
end = time()
total += end - start
mutating = mutating or is_mutating(status)
except KeyboardInterrupt:
# Restart connection to the database
sqlexecute.connect()
logger.debug("cancelled query, sql: %r", document.text)
self.output("cancelled query", err=True, fg='red')
except NotImplementedError:
self.output('Not Yet Implemented.', fg="yellow")
except OperationalError as e:
logger.debug("Exception: %r", e)
reconnect = True
if (e.args[0] in (2003, 2006, 2013)):
reconnect = click.prompt('Connection reset. Reconnect (Y/n)',
show_default=False, type=bool, default=True)
if reconnect:
logger.debug('Attempting to reconnect.')
try:
sqlexecute.connect()
logger.debug('Reconnected successfully.')
self.output('Reconnected!\nTry the command again.', fg='green')
except OperationalError as e:
logger.debug('Reconnect failed. e: %r', e)
self.output(str(e), err=True, fg='red')
continue # If reconnection failed, don't proceed further.
else: # If user chooses not to reconnect, don't proceed further.
continue
else:
logger.error("sql: %r, error: %r", document.text, e)
logger.error("traceback: %r", traceback.format_exc())
self.output(str(e), err=True, fg='red')
except Exception as e:
logger.error("sql: %r, error: %r", document.text, e)
logger.error("traceback: %r", traceback.format_exc())
self.output(str(e), err=True, fg='red')
else:
try:
self.output_via_pager('\n'.join(output))
except KeyboardInterrupt:
pass
if special.is_timing_enabled():
self.output('Command Time: %0.03fs' % duration)
self.output('Format Time: %0.03fs' % total)
# Refresh the table names and column names if necessary.
if need_completion_refresh(document.text):
self.refresh_dynamic_completions()
query = Query(document.text, successful, mutating)
self.query_history.append(query)
except EOFError:
self.output('Goodbye!')
finally: # Reset the less opts back to original.
logger.debug('Restoring env var LESS to %r.', original_less_opts)
os.environ['LESS'] = original_less_opts
os.environ['PAGER'] = special.get_original_pager()
def output(self, text, **kwargs):
if self.logfile:
self.logfile.write(utf8tounicode(text))
self.logfile.write('\n')
click.secho(text, **kwargs)
def output_via_pager(self, text):
if self.logfile:
self.logfile.write(text)
self.logfile.write('\n')
click.echo_via_pager(text)
def adjust_less_opts(self):
less_opts = os.environ.get('LESS', '')
self.logger.debug('Original value for LESS env var: %r', less_opts)
os.environ['LESS'] = '-SRXF'
return less_opts
def set_pager_from_config(self):
cnf = self.read_my_cnf_files(self.cnf_files, ['pager'])
if cnf['pager']:
special.set_pager(cnf['pager'])
def initialize_completions(self):
completer = self.completer
# special_commands
completer.extend_special_commands(COMMANDS.keys())
# Items to complete after the SHOW command.
completer.extend_show_items(self.sqlexecute.show_candidates())
return self.refresh_dynamic_completions()
def refresh_dynamic_completions(self):
sqlexecute = self.sqlexecute
completer = self.completer
completer.reset_completions()
# databases
completer.extend_database_names(sqlexecute.databases())
# schemata - In MySQL Schema is the same as database. But for mycli
# schemata will be the name of the current database.
completer.extend_schemata(self.sqlexecute.dbname)
completer.set_dbname(self.sqlexecute.dbname)
# tables
completer.extend_relations(sqlexecute.tables(), kind='tables')
completer.extend_columns(sqlexecute.table_columns(), kind='tables')
# users
completer.extend_users(sqlexecute.users())
# views
#completer.extend_relations(sqlexecute.views(), kind='views')
#completer.extend_columns(sqlexecute.view_columns(), kind='views')
# functions
completer.extend_functions(sqlexecute.functions())
return [(None, None, None, 'Auto-completion refreshed.')]
def get_completions(self, text, cursor_positition):
return self.completer.get_completions(
Document(text=text, cursor_position=cursor_positition), None)
def get_prompt(self, string):
sqlexecute = self.sqlexecute
string = string.replace('\\u', sqlexecute.user or '(none)')
string = string.replace('\\h', sqlexecute.host or '(none)')
string = string.replace('\\d', sqlexecute.dbname or '(none)')
string = string.replace('\\t', sqlexecute.server_type()[0] or 'mycli')
string = string.replace('\\n', "\n")
return string
@click.command()
@click.option('-h', '--host', envvar='MYSQL_HOST', help='Host address of the database.')
@click.option('-P', '--port', envvar='MYSQL_TCP_PORT', help='Port number to use for connection. Honors '
'$MYSQL_TCP_PORT')
@click.option('-u', '--user', help='User name to connect to the database.')
@click.option('-S', '--socket', envvar='MYSQL_UNIX_PORT', help='The socket file to use for connection.')
@click.option('-p', '--password', 'password', envvar='MYSQL_PWD', type=str,
help='Password to connect to the database')
@click.option('--pass', 'password', envvar='MYSQL_PWD', type=str,
help='Password to connect to the database')
@click.option('-v', '--version', is_flag=True, help='Version of mycli.')
@click.option('-D', '--database', 'dbname', help='Database to use.')
@click.option('-R', '--prompt', 'prompt',
help='Prompt format (Default: "{0}")'.format(
MyCli.default_prompt))
@click.option('-l', '--logfile', type=click.File(mode='a', encoding='utf-8'),
help='Log every query and its results to a file.')
@click.option('--defaults-group-suffix', type=str,
help='Read config group with the specified suffix.')
@click.option('--defaults-file', type=click.Path(),
help='Only read default options from the given file')
@click.option('--login-path', type=str,
help='Read this path from the login file.')
@click.argument('database', default='', nargs=1)
def cli(database, user, host, port, socket, password, dbname,
version, prompt, logfile, defaults_group_suffix, defaults_file,
login_path):
if version:
print('Version:', __version__)
sys.exit(0)
mycli = MyCli(prompt=prompt, logfile=logfile,
defaults_suffix=defaults_group_suffix,
defaults_file=defaults_file, login_path=login_path)
# Choose which ever one has a valid value.
database = database or dbname
if database and '://' in database:
mycli.connect_uri(database)
else:
mycli.connect(database, user, password, host, port, socket)
mycli.logger.debug('Launch Params: \n'
'\tdatabase: %r'
'\tuser: %r'
'\thost: %r'
'\tport: %r', database, user, host, port)
mycli.run_cli()
def format_output(title, cur, headers, status, table_format):
output = []
if title: # Only print the title if it's not None.
output.append(title)
if cur:
headers = [utf8tounicode(x) for x in headers]
if special.is_expanded_output():
output.append(expanded_table(cur, headers))
else:
output.append(tabulate(cur, headers, tablefmt=table_format,
missingval='<null>'))
if status: # Only print the status if it's not None.
output.append(status)
return output
def need_completion_refresh(queries):
"""Determines if the completion needs a refresh by checking if the sql
statement is an alter, create, drop or change db."""
for query in sqlparse.split(queries):
try:
first_token = query.split()[0]
res = first_token.lower() in ('alter', 'create', 'use', '\\r',
'\\u', 'connect', 'drop')
return res
except Exception:
return False
def is_mutating(status):
"""Determines if the statement is mutating based on the status."""
if not status:
return False
mutating = set(['insert', 'update', 'delete', 'alter', 'create', 'drop',
'replace', 'truncate', 'load'])
return status.split(None, 1)[0].lower() in mutating
def is_select(status):
"""Returns true if the first word in status is 'select'."""
if not status:
return False
return status.split(None, 1)[0].lower() == 'select'
def confirm_destructive_query(queries):
"""Checks if the query is destructive and prompts the user to confirm.
Returns:
None if the query is non-destructive.
True if the query is destructive and the user wants to proceed.
False if the query is destructive and the user doesn't want to proceed.
"""
destructive = set(['drop', 'shutdown'])
queries = queries.strip()
for query in sqlparse.split(queries):
try:
first_token = query.split()[0]
if first_token.lower() in destructive:
destroy = click.prompt("You're about to run a destructive command.\nDo you want to proceed? (y/n)",
type=bool)
return destroy
except Exception:
return False
def quit_command(sql):
return (sql.strip().lower() == 'exit'
or sql.strip().lower() == 'quit'
or sql.strip() == '\q'
or sql.strip() == ':q')
def thanks_picker(files=()):
for filename in files:
with open(filename) as f:
contents = f.readlines()
return choice([x.split('*')[1].strip() for x in contents if x.startswith('*')])
if __name__ == "__main__":
cli()
|
{
"content_hash": "2c3b72e31ff11ef23c96bd0653d5ff2b",
"timestamp": "",
"source": "github",
"line_count": 689,
"max_line_length": 115,
"avg_line_length": 41.471698113207545,
"alnum_prop": 0.5614544690977812,
"repo_name": "jinstrive/mycli",
"id": "640633c0e25feb94ab619f75c76eaf90d9c5cb5b",
"size": "28596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mycli/main.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "191550"
},
{
"name": "Shell",
"bytes": "529"
}
],
"symlink_target": ""
}
|
"""Presubmit script for ui/accessibility."""
import os, re
AX_IDL = 'ui/accessibility/ax_enums.idl'
AUTOMATION_IDL = 'chrome/common/extensions/api/automation.idl'
def InitialLowerCamelCase(unix_name):
words = unix_name.split('_')
return words[0] + ''.join(word.capitalize() for word in words[1:])
# Given a full path to an IDL file containing enum definitions,
# parse the file for enums and return a dict mapping the enum name
# to a list of values for that enum.
def GetEnumsFromFile(fullpath):
enum_name = None
enums = {}
for line in open(fullpath).readlines():
# Strip out comments
line = re.sub('//.*', '', line)
# Look for lines of the form "enum ENUM_NAME {" and get the enum_name
m = re.search('enum ([\w]+) {', line)
if m:
enum_name = m.group(1)
continue
# Look for a "}" character signifying the end of an enum
if line.find('}') >= 0:
enum_name = None
continue
if not enum_name:
continue
# If we're inside an enum definition, add the first string consisting of
# alphanumerics plus underscore ("\w") to the list of values for that enum.
m = re.search('([\w]+)', line)
if m:
enums.setdefault(enum_name, [])
enums[enum_name].append(m.group(1))
return enums
def CheckMatchingEnum(ax_enums,
ax_enum_name,
automation_enums,
automation_enum_name,
errs,
output_api):
if ax_enum_name not in ax_enums:
errs.append(output_api.PresubmitError(
'Expected %s to have an enum named %s' % (AX_IDL, ax_enum_name)))
return
if automation_enum_name not in automation_enums:
errs.append(output_api.PresubmitError(
'Expected %s to have an enum named %s' % (
AUTOMATION_IDL, automation_enum_name)))
return
src = ax_enums[ax_enum_name]
dst = automation_enums[automation_enum_name]
for value in src:
if InitialLowerCamelCase(value) not in dst:
errs.append(output_api.PresubmitError(
'Found %s.%s in %s, but did not find %s.%s in %s' % (
ax_enum_name, value, AX_IDL,
automation_enum_name, InitialLowerCamelCase(value),
AUTOMATION_IDL)))
def CheckEnumsMatch(input_api, output_api):
repo_root = input_api.change.RepositoryRoot()
ax_enums = GetEnumsFromFile(os.path.join(repo_root, AX_IDL))
automation_enums = GetEnumsFromFile(os.path.join(repo_root, AUTOMATION_IDL))
errs = []
CheckMatchingEnum(ax_enums, 'AXRole', automation_enums, 'RoleType', errs,
output_api)
CheckMatchingEnum(ax_enums, 'AXState', automation_enums, 'StateType', errs,
output_api)
CheckMatchingEnum(ax_enums, 'AXEvent', automation_enums, 'EventType', errs,
output_api)
return errs
def CheckChangeOnUpload(input_api, output_api):
if AX_IDL not in input_api.LocalPaths():
return []
return CheckEnumsMatch(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
if AX_IDL not in input_api.LocalPaths():
return []
return CheckEnumsMatch(input_api, output_api)
|
{
"content_hash": "b0a839d5b2382c99a58b284c99fcf4c5",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 34.68131868131868,
"alnum_prop": 0.6387832699619772,
"repo_name": "Samsung/ChromiumGStreamerBackend",
"id": "ef41ab093348f5b1dd6d8f57890d00785c688324",
"size": "3319",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ui/accessibility/PRESUBMIT.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from amqpstorm.management.basic import Basic
from amqpstorm.tests.utility import FakeHTTPClient
from amqpstorm.tests.utility import TestFramework
class BasicTests(TestFramework):
def test_basic_get_with_payload(self):
def on_post_with_payload(name):
return [{'payload': name}]
api = Basic(FakeHTTPClient(on_post=on_post_with_payload))
messages = api.get(queue='test')
self.assertEqual(messages[0].body, 'queues/%2F/test/get')
def test_basic_get_with_body(self):
def on_post_with_body(name):
return [{'body': name}]
api = Basic(FakeHTTPClient(on_post=on_post_with_body))
messages = api.get(queue='test')
self.assertEqual(messages[0].body, 'queues/%2F/test/get')
|
{
"content_hash": "f35ee3b5f2c5e8095b7d3947d1c62bb2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 65,
"avg_line_length": 31.875,
"alnum_prop": 0.6640522875816993,
"repo_name": "eandersson/amqp-storm",
"id": "1e99da37404cf29b14333a406e90c8ae02380490",
"size": "765",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "amqpstorm/tests/unit/management/basic_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "159150"
}
],
"symlink_target": ""
}
|
"""Extracts information about symbols loaded from other .bzl files."""
import ast
from collections import namedtuple
LoadSymbol = namedtuple('LoadSymbol', ['label', 'symbol', 'alias'])
"""Information about a symbol loaded from another .bzl file."""
class LoadExtractorError(Exception):
"""Error raised by LoadExtractor"""
pass
class LoadExtractor(object):
"""Extracts information on symbols load()ed from other .bzl files."""
def _extract_loads(self, bzl_file):
"""Walks the AST and extracts information on loaded symbols."""
load_symbols = []
try:
tree = None
with open(bzl_file) as f:
tree = ast.parse(f.read(), bzl_file)
key = None
for node in ast.iter_child_nodes(tree):
if not isinstance(node, ast.Expr):
continue
call = node.value
if (not isinstance(call, ast.Call) or
not isinstance(call.func, ast.Name) or
call.func.id != 'load'):
continue
args = []
for arg in call.args:
if not isinstance(arg, ast.Str):
raise LoadExtractorError(
'Only string literals in load statments are supported.')
args.append(arg.s)
kwargs = {}
for keyword in call.keywords:
if not isinstance(keyword.value, ast.Str):
raise LoadExtractorError(
'Only string literals in load statments are supported.')
kwargs[keyword.arg] = keyword.value.s
label = args[0]
for arg in args[1:]:
load_symbol = LoadSymbol(label, arg, None)
load_symbols.append(load_symbol)
for alias, symbol in kwargs.items():
load_symbol = LoadSymbol(label, symbol, alias)
load_symbols.append(load_symbol)
except IOError as e:
print("Failed to parse {0}: {1}".format(bzl_file, e.strerror))
pass
return load_symbols
def _validate_loads(self, load_symbols):
"""Checks that there are no collisions from the extracted symbols."""
symbols = set()
for load in load_symbols:
if load.alias:
if load.alias in symbols:
raise LoadExtractorError(
"Load symbol conflict: %s (aliased from %s) loaded from %s" %
(load.alias, load.symbol, load.label))
else:
symbols.add(load.alias)
elif load.symbol in symbols:
raise LoadExtractorError(
"Load symbol conflict: %s loaded from %s" %
(load.alias, load.label))
else:
symbols.add(load.symbol)
def extract(self, bzl_file):
"""Extracts symbols loaded from other .bzl files.
Walks the AST of the .bzl files and extracts information about symbols
loaded from other .bzl files from load() calls. Then, validate the
extracted symbols to check that all symbols are unique.
Note that only load() calls where all arguments are string literals
(ast.Str) are supported.
Args:
bzl_file: The .bzl file to extract load symbols from.
Returns:
List of LoadSymbol objects.
"""
load_symbols = self._extract_loads(bzl_file)
self._validate_loads(load_symbols)
return load_symbols
|
{
"content_hash": "a9d99b7940bd099e0f01ea98d37cd162",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 75,
"avg_line_length": 32.45918367346939,
"alnum_prop": 0.6233888714240805,
"repo_name": "bazelbuild/skydoc",
"id": "e083a3d926fc80d7d6efe6eb8001375666d0b0f2",
"size": "3784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skydoc/load_extractor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16208"
},
{
"name": "HTML",
"bytes": "25410"
},
{
"name": "Python",
"bytes": "95857"
},
{
"name": "Shell",
"bytes": "9187"
},
{
"name": "Starlark",
"bytes": "42067"
}
],
"symlink_target": ""
}
|
def gpu_queue(options):
"""
Queued up containers waiting for GPU resources
"""
import docker
import json
import time
from vent.helpers.meta import GpuUsage
status = (False, None)
print("gpu queue", str(options))
print("gpu queue", str(GpuUsage(base_dir="/vent/",
meta_dir="/vent")))
options = json.loads(options)
configs = options['configs']
gpu_options = configs['gpu_options']
devices = []
print(str(configs['devices']))
# device specified, remove all other devices
if 'device' in gpu_options:
dev = '/dev/nvidia' + gpu_options['device'] + ':/dev/nvidia'
dev += gpu_options['device'] + ':rwm'
if 'devices' in configs:
d = list(configs['devices'])
print(str(d))
for device in d:
print(dev + " compared to " + device)
if any(str.isdigit(str(char)) for char in device):
if dev == device:
devices.append(device)
else:
print(dev + " doesn't match, removing: " + device)
configs['devices'].remove(device)
else:
d = configs['devices']
for device in d:
if any(str.isdigit(str(char)) for char in device):
devices.append(device)
# check if devices is still an empty list
if not devices:
status = (False, "no valid devices match the requested device")
print(str(status))
return status
mem_needed = 0
dedicated = False
# need a gpu to itself
if ('dedicated' in configs['gpu_options'] and
configs['gpu_options']['dedicated'] == 'yes'):
dedicated = True
if 'mem_mb' in configs['gpu_options']:
# TODO input error checking
mem_needed = int(configs['gpu_options']['mem_mb'])
print("mem_needed: ", mem_needed)
print("dedicated: ", dedicated)
device = None
while not device:
usage = GpuUsage(base_dir="/vent/", meta_dir="/vent")
if usage[0]:
usage = usage[1]
else:
return usage
print(usage)
# {"device": "0",
# "mem_mb": "1024",
# "dedicated": "yes",
# "enabled": "yes"}
for d in devices:
dev = str(d.split(":")[0].split('nvidia')[1])
print(dev)
# if the device is already dedicated, can't be used
dedicated_gpus = usage['vent_usage']['dedicated']
is_dedicated = False
for gpu in dedicated_gpus:
if dev in gpu:
is_dedicated = True
print("is_dedicated: ", is_dedicated)
if not is_dedicated:
ram_used = 0
if dev in usage['vent_usage']['mem_mb']:
ram_used = usage['vent_usage']['mem_mb'][dev]
# check for vent usage/processes running
if (dedicated and
dev not in usage['vent_usage']['mem_mb'] and
mem_needed <= usage[int(dev)]['global_memory'] and
not usage[int(dev)]['processes']):
device = dev
# check for ram constraints
elif mem_needed <= (usage[int(dev)]['global_memory'] - ram_used):
device = dev
# TODO make this sleep incremental up to a point, potentially kill
# after a set time configured from vent.cfg, outputting as it goes
time.sleep(1)
# lock jobs to a specific gpu (no shared GPUs for a single process) this is
# needed to calculate if memory requested (but not necessarily in use)
# would become oversubscribed
# store which device was mapped
options['labels']['vent.gpu.device'] = device
gpu_device = '/dev/nvidia' + device + ':/dev/nvidia' + device + ':rwm'
if 'devices' in configs:
d = configs['devices']
for dev in d:
if any(str.isdigit(str(char)) for char in dev):
if gpu_device != dev:
configs['devices'].remove(dev)
try:
d_client = docker.from_env()
del options['configs']
del configs['gpu_options']
params = options.copy()
params.update(configs)
print(str(params))
d_client.containers.run(**params)
status = (True, None)
except Exception as e: # pragma: no cover
status = (False, str(e))
print(str(status))
return status
def file_queue(path, template_path="/vent/", r_host="redis"):
"""
Processes files that have been added from the rq-worker, starts plugins
that match the mime type for the new file.
"""
import ConfigParser
import ast
import docker
import json
import requests
import os
import sys
from redis import Redis
from rq import Queue
from subprocess import check_output, Popen, PIPE
from string import punctuation
status = (True, None)
images = []
configs = {}
try:
d_client = docker.from_env()
# get the correct path for binding
vent_config = ConfigParser.RawConfigParser()
vent_config.optionxform = str
vent_config.read(template_path+'vent.cfg')
if (vent_config.has_section('main') and
vent_config.has_option('main', 'files')):
files = vent_config.get('main', 'files')
else:
files = '/'
# deal with ~
files = os.path.expanduser(files)
chars = set(punctuation)
chars.discard('/')
chars.discard('_')
chars.discard('-')
file_name = ''
# escape any funky symbols to allow users FREEDOM of directory name
for char in files:
if char in chars:
if char == '\\':
file_name += '\\' + char
else:
file_name += '\\\\' + char
else:
file_name += char
files = file_name
_, path = path.split('_', 1)
directory = path.rsplit('/', 1)[0]
path = path.replace('/files', files, 1)
path_copy = path
# read in configuration of plugins to get the ones that should run
# against the path.
# keep track of images that failed getting configurations for
failed_images = set()
config = ConfigParser.RawConfigParser()
config.optionxform = str
config.read(template_path+'plugin_manifest.cfg')
sections = config.sections()
name_maps = {}
orig_path_d = {}
path_cmd = {}
labels_d = {}
for section in sections:
path = path_copy
orig_path = ''
repo = config.get(section, 'repo')
t_type = config.get(section, 'type')
labels = {'vent-plugin': '', 'file': path, 'vent.section': section, 'vent.repo': repo, 'vent.type': t_type}
image_name = config.get(section, 'image_name')
link_name = config.get(section, 'link_name')
name_maps[link_name] = image_name.replace(':', '-').replace('/', '-')
# doesn't matter if it's a repository or registry because both in manifest
if config.has_option(section, 'groups'):
if 'replay' in config.get(section, 'groups'):
try:
# read the vent.cfg file to grab the network-mapping
# specified. For replay_pcap
n_name = 'network-mapping'
n_map = []
if vent_config.has_section(n_name):
# make sure that the options aren't empty
if vent_config.options(n_name):
options = vent_config.options(n_name)
for option in options:
if vent_config.get(n_name, option):
n_map.append(vent_config.get(
n_name, option))
orig_path = path
path = str(n_map[0]) + " " + path
except Exception as e: # pragma: no cover
failed_images.add(image_name)
status = (False, str(e))
if config.has_option(section, 'service'):
try:
options_dict = json.loads(config.get(section, 'service'))
for option in options_dict:
value = options_dict[option]
labels[option] = value
except Exception as e: # pragma: no cover
failed_images.add(image_name)
status = (False, str(e))
if config.has_option(section, 'settings'):
try:
options_dict = json.loads(config.get(section, 'settings'))
in_base = directory == '/files'
# process base by default
process_file = in_base
# check if this tool shouldn't process the base by default
if 'process_base' in options_dict:
if options_dict['process_base'] == 'no':
process_file = False
# check if this tool should look at subdirs created by
# other tools' output
if 'process_from_tool' in options_dict and not in_base:
for tool in options_dict['process_from_tool'].split(','):
if tool.replace(' ', '-') in directory:
process_file = True
if 'ext_types' in options_dict and process_file:
ext_types = options_dict['ext_types'].split(',')
for ext_type in ext_types:
if path.endswith(ext_type):
images.append(image_name)
configs[image_name] = {}
except Exception as e: # pragma: no cover
failed_images.add(image_name)
status = (False, str(e))
if image_name in configs:
if config.has_option(section, 'docker'):
try:
options_dict = ast.literal_eval(config.get(section, 'docker'))
for option in options_dict:
try:
configs[image_name][option] = ast.literal_eval(options_dict[option])
except Exception as e: # pragma: no cover
configs[image_name][option] = options_dict[option]
if 'links' in configs[image_name]:
for link in configs[image_name]['links']:
if link in name_maps:
configs[image_name]['links'][name_maps[link]] = configs[image_name]['links'].pop(link)
# TODO network_mode
# TODO volumes_from
# TODO external services
except Exception as e: # pragma: no cover
failed_images.add(image_name)
status = (False, str(e))
if config.has_option(section, 'gpu') and image_name in configs:
try:
options_dict = json.loads(config.get(section, 'gpu'))
if 'enabled' in options_dict:
enabled = options_dict['enabled']
if enabled == 'yes':
configs[image_name]['gpu_options'] = options_dict
labels['vent.gpu'] = 'yes'
if 'dedicated' in options_dict:
labels['vent.gpu.dedicated'] = options_dict['dedicated']
if 'device' in options_dict:
labels['vent.gpu.device'] = options_dict['device']
if 'mem_mb' in options_dict:
labels['vent.gpu.mem_mb'] = options_dict['mem_mb']
port = ''
host = ''
if (vent_config.has_section('nvidia-docker-plugin') and
vent_config.has_option('nvidia-docker-plugin', 'port')):
port = vent_config.get('nvidia-docker-plugin', 'port')
else:
port = '3476'
if (vent_config.has_section('nvidia-docker-plugin') and
vent_config.has_option('nvidia-docker-plugin', 'host')):
host = vent_config.get('nvidia-docker-plugin', 'host')
else:
# grab the default gateway
try:
route = Popen(('/sbin/ip', 'route'),
stdout=PIPE)
h = check_output(('awk', '/default/ {print$3}'),
stdin=route.stdout)
route.wait()
host = h.strip()
except Exception as e: # pragma no cover
pass
nd_url = 'http://' + host + ':' + port + '/v1.0/docker/cli'
params = {'vol': 'nvidia_driver'}
try:
r = requests.get(nd_url, params=params)
if r.status_code == 200:
options = r.text.split()
for option in options:
if option.startswith('--volume-driver='):
configs[image_name]['volume_driver'] = option.split("=", 1)[1]
elif option.startswith('--volume='):
vol = option.split("=", 1)[1].split(":")
if 'volumes' in configs[image_name]:
# !! TODO handle if volumes is a list
configs[image_name]['volumes'][vol[0]] = {'bind': vol[1],
'mode': vol[2]}
else:
configs[image_name]['volumes'] = {vol[0]:
{'bind': vol[1],
'mode': vol[2]}}
elif option.startswith('--device='):
dev = option.split("=", 1)[1]
if 'devices' in configs[image_name]:
configs[image_name]['devices'].append(dev +
":" +
dev +
":rwm")
else:
configs[image_name]['devices'] = [dev + ":" + dev + ":rwm"]
else:
# unable to parse option provided by
# nvidia-docker-plugin
pass
except Exception as e: # pragma: no cover
failed_images.add(image_name)
status = (False, str(e))
print("Failure with nvidia-docker-plugin: " +
str(e))
except Exception as e: # pragma: no cover
failed_images.add(image_name)
status = (False, str(e))
print("Unable to process gpu options: " + str(e))
path_cmd[image_name] = path
orig_path_d[image_name] = orig_path
labels_d[image_name] = labels
# TODO get syslog address rather than hardcode
# TODO add group label
# TODO get group and name for syslog tag
log_config = {'type': 'syslog',
'config': {'syslog-address': 'tcp://0.0.0.0:514',
'syslog-facility': 'daemon',
'tag': path.rsplit('.', 1)[-1]}}
# setup gpu queue
can_queue_gpu = True
try:
q = Queue(connection=Redis(host=r_host), default_timeout=86400)
except Exception as e: # pragma: no cover
can_queue_gpu = False
print("Unable to connect to redis: " + str(e))
# start containers
for image in images:
if image not in failed_images:
orig_path = orig_path_d[image]
labels = labels_d[image]
if orig_path:
# replay_pcap is special so we can't bind it like normal
# since the plugin takes in an additional argument
dir_path = orig_path.rsplit('/', 1)[0]
else:
dir_path = path.rsplit('/', 1)[0]
volumes = {dir_path: {'bind': dir_path, 'mode': 'rw'}}
if 'volumes' in configs[image]:
for volume in volumes:
configs[image]['volumes'][volume] = volumes[volume]
else:
configs[image]['volumes'] = volumes
if 'vent.gpu' in labels and labels['vent.gpu'] == 'yes':
if can_queue_gpu:
# queue up containers requiring a gpu
q_str = json.dumps({'image': image,
'command': path_cmd[image],
'labels': labels,
'detach': True,
'remove': True,
'log_config': log_config,
'configs': configs[image]})
q.enqueue('watch.gpu_queue', q_str, ttl=2592000)
else:
failed_images.add(image)
else:
if 'gpu_options' in configs[image]:
del configs[image]['gpu_options']
print(str(configs[image]))
d_client.containers.run(image=image,
command=path_cmd[image],
labels=labels,
detach=True,
remove=True,
log_config=log_config,
**configs[image])
if failed_images:
status = (False, failed_images)
else:
status = (True, images)
except Exception as e: # pragma: no cover
status = (False, str(e))
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
print("Failed to process job: " + str(e))
print(str(configs))
print(str(status))
return status
|
{
"content_hash": "88469a67d21bc80b22ef1a6b82aaa3b4",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 122,
"avg_line_length": 45.77045454545455,
"alnum_prop": 0.4334376086200904,
"repo_name": "bpagon13/vent",
"id": "92cea9690c5a3c72b29b312afa838eb2cb9b139f",
"size": "20139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vent/core/rq_worker/watch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "227"
},
{
"name": "Makefile",
"bytes": "4747"
},
{
"name": "Python",
"bytes": "434887"
},
{
"name": "Shell",
"bytes": "2103"
}
],
"symlink_target": ""
}
|
""" Cisco_IOS_XR_show_fpd_loc_ng_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR show\-fpd\-loc\-ng package operational data.
This module contains definitions
for the following management objects\:
show\-fpd\: Show hw\-module fpd
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class ShowFpd(object):
"""
Show hw\-module fpd
.. attribute:: help_locations
help location table
**type**\: :py:class:`HelpLocations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.HelpLocations>`
.. attribute:: hw_module_fpd
Display fpds on all locations \-show hw\-module fpd
**type**\: :py:class:`HwModuleFpd <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.HwModuleFpd>`
.. attribute:: hw_module_fpd_help_fpd
Display help\-fpd \-show hw\-module fpd help\-fpd
**type**\: :py:class:`HwModuleFpdHelpFpd <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.HwModuleFpdHelpFpd>`
.. attribute:: location_help
fpd upgradable locations
**type**\: :py:class:`LocationHelp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.LocationHelp>`
.. attribute:: locations
location table
**type**\: :py:class:`Locations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.Locations>`
.. attribute:: package
gets fpd package info
**type**\: :py:class:`Package <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.Package>`
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.help_locations = ShowFpd.HelpLocations()
self.help_locations.parent = self
self.hw_module_fpd = ShowFpd.HwModuleFpd()
self.hw_module_fpd.parent = self
self.hw_module_fpd_help_fpd = ShowFpd.HwModuleFpdHelpFpd()
self.hw_module_fpd_help_fpd.parent = self
self.location_help = ShowFpd.LocationHelp()
self.location_help.parent = self
self.locations = ShowFpd.Locations()
self.locations.parent = self
self.package = ShowFpd.Package()
self.package.parent = self
class Locations(object):
"""
location table
.. attribute:: location
location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.Locations.Location>`
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.location = YList()
self.location.parent = self
self.location.name = 'location'
class Location(object):
"""
location
.. attribute:: location_name <key>
Fpd location
**type**\: str
**length:** 1..32
.. attribute:: fpd
Display fpds on given locations
**type**\: list of :py:class:`Fpd <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.Locations.Location.Fpd>`
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.location_name = None
self.fpd = YList()
self.fpd.parent = self
self.fpd.name = 'fpd'
class Fpd(object):
"""
Display fpds on given locations
.. attribute:: fpd_name <key>
Fpd Name
**type**\: str
**length:** 1..32
.. attribute:: fpd_info_detaile
fpd list with all detailes
**type**\: list of :py:class:`FpdInfoDetaile <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.Locations.Location.Fpd.FpdInfoDetaile>`
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fpd_name = None
self.fpd_info_detaile = YList()
self.fpd_info_detaile.parent = self
self.fpd_info_detaile.name = 'fpd_info_detaile'
class FpdInfoDetaile(object):
"""
fpd list with all detailes
.. attribute:: card_name
Name of card on which fpd is located
**type**\: str
.. attribute:: fpd_name
fpd name
**type**\: str
.. attribute:: hw_version
hadware version
**type**\: str
.. attribute:: location
fpd location
**type**\: str
.. attribute:: programd_version
image programd version
**type**\: str
.. attribute:: running_version
image running version
**type**\: str
.. attribute:: secure_boot_attr
secure boot attribur
**type**\: str
.. attribute:: status
status of the fpd
**type**\: str
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.card_name = None
self.fpd_name = None
self.hw_version = None
self.location = None
self.programd_version = None
self.running_version = None
self.secure_boot_attr = None
self.status = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-show-fpd-loc-ng-oper:fpd-info-detaile'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.card_name is not None:
return True
if self.fpd_name is not None:
return True
if self.hw_version is not None:
return True
if self.location is not None:
return True
if self.programd_version is not None:
return True
if self.running_version is not None:
return True
if self.secure_boot_attr is not None:
return True
if self.status is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.Locations.Location.Fpd.FpdInfoDetaile']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.fpd_name is None:
raise YPYModelError('Key property fpd_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-show-fpd-loc-ng-oper:fpd[Cisco-IOS-XR-show-fpd-loc-ng-oper:fpd-name = ' + str(self.fpd_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fpd_name is not None:
return True
if self.fpd_info_detaile is not None:
for child_ref in self.fpd_info_detaile:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.Locations.Location.Fpd']['meta_info']
@property
def _common_path(self):
if self.location_name is None:
raise YPYModelError('Key property location_name is None')
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:locations/Cisco-IOS-XR-show-fpd-loc-ng-oper:location[Cisco-IOS-XR-show-fpd-loc-ng-oper:location-name = ' + str(self.location_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.location_name is not None:
return True
if self.fpd is not None:
for child_ref in self.fpd:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.Locations.Location']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:locations'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.location is not None:
for child_ref in self.location:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.Locations']['meta_info']
class HwModuleFpd(object):
"""
Display fpds on all locations \-show hw\-module
fpd
.. attribute:: fpd_info_detaile
fpd list with all detailes
**type**\: list of :py:class:`FpdInfoDetaile <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.HwModuleFpd.FpdInfoDetaile>`
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fpd_info_detaile = YList()
self.fpd_info_detaile.parent = self
self.fpd_info_detaile.name = 'fpd_info_detaile'
class FpdInfoDetaile(object):
"""
fpd list with all detailes
.. attribute:: card_name
Name of card on which fpd is located
**type**\: str
.. attribute:: fpd_name
fpd name
**type**\: str
.. attribute:: hw_version
hadware version
**type**\: str
.. attribute:: location
fpd location
**type**\: str
.. attribute:: programd_version
image programd version
**type**\: str
.. attribute:: running_version
image running version
**type**\: str
.. attribute:: secure_boot_attr
secure boot attribur
**type**\: str
.. attribute:: status
status of the fpd
**type**\: str
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.card_name = None
self.fpd_name = None
self.hw_version = None
self.location = None
self.programd_version = None
self.running_version = None
self.secure_boot_attr = None
self.status = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:hw-module-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:fpd-info-detaile'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.card_name is not None:
return True
if self.fpd_name is not None:
return True
if self.hw_version is not None:
return True
if self.location is not None:
return True
if self.programd_version is not None:
return True
if self.running_version is not None:
return True
if self.secure_boot_attr is not None:
return True
if self.status is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.HwModuleFpd.FpdInfoDetaile']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:hw-module-fpd'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fpd_info_detaile is not None:
for child_ref in self.fpd_info_detaile:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.HwModuleFpd']['meta_info']
class HelpLocations(object):
"""
help location table
.. attribute:: help_location
location
**type**\: list of :py:class:`HelpLocation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.HelpLocations.HelpLocation>`
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.help_location = YList()
self.help_location.parent = self
self.help_location.name = 'help_location'
class HelpLocation(object):
"""
location
.. attribute:: location_name <key>
Fpd location
**type**\: str
**length:** 1..32
.. attribute:: help_fpd
Display fpds on given locations
**type**\: :py:class:`HelpFpd <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.HelpLocations.HelpLocation.HelpFpd>`
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.location_name = None
self.help_fpd = ShowFpd.HelpLocations.HelpLocation.HelpFpd()
self.help_fpd.parent = self
class HelpFpd(object):
"""
Display fpds on given locations
.. attribute:: fpd_name
Fpd name list
**type**\: list of :py:class:`FpdName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.HelpLocations.HelpLocation.HelpFpd.FpdName>`
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fpd_name = YList()
self.fpd_name.parent = self
self.fpd_name.name = 'fpd_name'
class FpdName(object):
"""
Fpd name list
.. attribute:: fpd_name
fpd name
**type**\: str
.. attribute:: location
fpd location
**type**\: str
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fpd_name = None
self.location = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-show-fpd-loc-ng-oper:fpd-name'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fpd_name is not None:
return True
if self.location is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.HelpLocations.HelpLocation.HelpFpd.FpdName']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-show-fpd-loc-ng-oper:help-fpd'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fpd_name is not None:
for child_ref in self.fpd_name:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.HelpLocations.HelpLocation.HelpFpd']['meta_info']
@property
def _common_path(self):
if self.location_name is None:
raise YPYModelError('Key property location_name is None')
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:help-locations/Cisco-IOS-XR-show-fpd-loc-ng-oper:help-location[Cisco-IOS-XR-show-fpd-loc-ng-oper:location-name = ' + str(self.location_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.location_name is not None:
return True
if self.help_fpd is not None and self.help_fpd._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.HelpLocations.HelpLocation']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:help-locations'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.help_location is not None:
for child_ref in self.help_location:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.HelpLocations']['meta_info']
class HwModuleFpdHelpFpd(object):
"""
Display help\-fpd \-show hw\-module fpd help\-fpd
.. attribute:: fpd_name
Fpd name list
**type**\: list of :py:class:`FpdName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.HwModuleFpdHelpFpd.FpdName>`
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fpd_name = YList()
self.fpd_name.parent = self
self.fpd_name.name = 'fpd_name'
class FpdName(object):
"""
Fpd name list
.. attribute:: fpd_name
fpd name
**type**\: str
.. attribute:: location
fpd location
**type**\: str
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fpd_name = None
self.location = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:hw-module-fpd-help-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:fpd-name'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fpd_name is not None:
return True
if self.location is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.HwModuleFpdHelpFpd.FpdName']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:hw-module-fpd-help-fpd'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fpd_name is not None:
for child_ref in self.fpd_name:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.HwModuleFpdHelpFpd']['meta_info']
class Package(object):
"""
gets fpd package info
.. attribute:: fpd_pkg_data
fpd pkg list
**type**\: list of :py:class:`FpdPkgData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.Package.FpdPkgData>`
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.fpd_pkg_data = YList()
self.fpd_pkg_data.parent = self
self.fpd_pkg_data.name = 'fpd_pkg_data'
class FpdPkgData(object):
"""
fpd pkg list
.. attribute:: card_type
card type
**type**\: str
.. attribute:: fpd_desc
fpd desc
**type**\: str
.. attribute:: fpd_ver
fpd version
**type**\: str
.. attribute:: min_hw_ver
minimum hw version
**type**\: str
.. attribute:: min_sw_ver
minimum sw version
**type**\: str
.. attribute:: upgrade_method
reload or not
**type**\: str
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.card_type = None
self.fpd_desc = None
self.fpd_ver = None
self.min_hw_ver = None
self.min_sw_ver = None
self.upgrade_method = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:package/Cisco-IOS-XR-show-fpd-loc-ng-oper:fpd-pkg-data'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.card_type is not None:
return True
if self.fpd_desc is not None:
return True
if self.fpd_ver is not None:
return True
if self.min_hw_ver is not None:
return True
if self.min_sw_ver is not None:
return True
if self.upgrade_method is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.Package.FpdPkgData']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:package'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fpd_pkg_data is not None:
for child_ref in self.fpd_pkg_data:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.Package']['meta_info']
class LocationHelp(object):
"""
fpd upgradable locations
.. attribute:: location_name
card location list
**type**\: list of :py:class:`LocationName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_show_fpd_loc_ng_oper.ShowFpd.LocationHelp.LocationName>`
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.location_name = YList()
self.location_name.parent = self
self.location_name.name = 'location_name'
class LocationName(object):
"""
card location list
.. attribute:: location_name
card location
**type**\: str
"""
_prefix = 'show-fpd-loc-ng-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.location_name = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:location-help/Cisco-IOS-XR-show-fpd-loc-ng-oper:location-name'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.location_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.LocationHelp.LocationName']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd/Cisco-IOS-XR-show-fpd-loc-ng-oper:location-help'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.location_name is not None:
for child_ref in self.location_name:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd.LocationHelp']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-show-fpd-loc-ng-oper:show-fpd'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.help_locations is not None and self.help_locations._has_data():
return True
if self.hw_module_fpd is not None and self.hw_module_fpd._has_data():
return True
if self.hw_module_fpd_help_fpd is not None and self.hw_module_fpd_help_fpd._has_data():
return True
if self.location_help is not None and self.location_help._has_data():
return True
if self.locations is not None and self.locations._has_data():
return True
if self.package is not None and self.package._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_show_fpd_loc_ng_oper as meta
return meta._meta_table['ShowFpd']['meta_info']
|
{
"content_hash": "7e93871a46228df68009c391c9b7a689",
"timestamp": "",
"source": "github",
"line_count": 1048,
"max_line_length": 248,
"avg_line_length": 31.920801526717558,
"alnum_prop": 0.47965802768062654,
"repo_name": "111pontes/ydk-py",
"id": "b18c388ba69ebf9caeae70c0ae3b6a486afb557c",
"size": "33453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_show_fpd_loc_ng_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
}
|
from flask import Flask
from flask.ext.restplus import Api
app = Flask("dmon-agent")
api = Api(app, version='0.0.6', title='DICE Monitoring Agent API',
description="RESTful API for the DICE Monitoring Platform Agent (dmon-agent)",
)
# changes the descriptor on the Swagger WUI and appends to api /dmon and then /v1
agent = api.namespace('agent', description='dmon agent operations')
|
{
"content_hash": "46a1fc530fed900155b8678e515d8d2a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 89,
"avg_line_length": 36.90909090909091,
"alnum_prop": 0.7142857142857143,
"repo_name": "igabriel85/IeAT-DICE-Repository",
"id": "2fb065c51850c140d246677c1d256c0ea77893b0",
"size": "406",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "dmon-agent/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "651922"
},
{
"name": "Ruby",
"bytes": "27604"
},
{
"name": "Shell",
"bytes": "74587"
}
],
"symlink_target": ""
}
|
import urwid
class Popup(urwid.WidgetWrap):
"""
Creates a popup menu on top of another BoxWidget.
Attributes:
selected -- Contains the item the user has selected by pressing <RETURN>,
or None if nothing has been selected.
"""
selected = None
def __init__(self, menu_list, attr, pos, body):
"""
menu_list -- a list of strings with the menu entries
attr -- a tuple (background, active_item) of attributes
pos -- a tuple (x, y), position of the menu widget
body -- widget displayed beneath the message widget
"""
content = [w for w in menu_list]
# Calculate width and height of the menu widget:
height = len(menu_list)
width = 0
for entry in menu_list:
if len(entry.original_widget.text) > width:
width = len(entry.original_widget.text)
# Create the ListBox widget and put it on top of body:
self._listbox = urwid.AttrWrap(urwid.ListBox(content), attr[0])
overlay = urwid.Overlay(self._listbox, body, 'center',
width + 2, 'middle', height)
urwid.WidgetWrap.__init__(self, overlay)
def keypress(self, size, key):
"""
<RETURN> key selects an item, other keys will be passed to
the ListBox widget.
"""
if key == "enter":
(widget, foo) = self._listbox.get_focus()
(text, foo) = widget.get_text()
self.selected = text[1:] # Get rid of the leading space...
else:
return self._listbox.keypress(size, key)
|
{
"content_hash": "6a3c8cf934d00b77aa7960e7e279e2b3",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 32,
"alnum_prop": 0.571078431372549,
"repo_name": "toxinu/pyhn",
"id": "3a3a51d28de7d9f58a57ce154ec724a9fe5fe933",
"size": "1656",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pyhn/popup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54928"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from . import models
admin.site.register(models.SpectralIndicesTask)
admin.site.register(models.UserHistory)
admin.site.register(models.ToolInfo)
admin.site.register(models.ResultType)
|
{
"content_hash": "80705b4d0486a17d62d68b063b5fe0e5",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 31.285714285714285,
"alnum_prop": 0.8401826484018264,
"repo_name": "ceos-seo/data_cube_ui",
"id": "a9acd9fe8abae712417d9545e3b6a123f88d4530",
"size": "1224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/spectral_indices/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "402543"
},
{
"name": "GLSL",
"bytes": "167522"
},
{
"name": "HTML",
"bytes": "8002318"
},
{
"name": "JavaScript",
"bytes": "46178533"
},
{
"name": "PHP",
"bytes": "28128"
},
{
"name": "PLSQL",
"bytes": "14578"
},
{
"name": "Python",
"bytes": "908507"
},
{
"name": "Shell",
"bytes": "21979"
},
{
"name": "TSQL",
"bytes": "31758"
}
],
"symlink_target": ""
}
|
from iotronicclient.common import utils
from iotronicclient.v1 import board_shell
from iotronicclient.v1 import plugin_injection_shell
from iotronicclient.v1 import plugin_shell
COMMAND_MODULES = [
board_shell,
plugin_shell,
plugin_injection_shell,
]
def enhance_parser(parser, subparsers, cmd_mapper):
"""Enhance parser with API version specific options.
Take a basic (nonversioned) parser and enhance it with
commands and options specific for this version of API.
:param parser: top level parser
:param subparsers: top level parser's subparsers collection
where subcommands will go
"""
for command_module in COMMAND_MODULES:
utils.define_commands_from_module(subparsers, command_module,
cmd_mapper)
|
{
"content_hash": "4e48fb9744aa2dbf4f70055bc41f8cc3",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 69,
"avg_line_length": 32.76,
"alnum_prop": 0.6971916971916972,
"repo_name": "MDSLab/python-iotronicclient",
"id": "6589eef7884eb4d622b5a6eb21cad2955b7e659b",
"size": "1393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iotronicclient/v1/shell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "172299"
}
],
"symlink_target": ""
}
|
"""Tool for converting raw object detection data into the COCO format."""
import abc
import collections
import hashlib
import json
import os
from typing import Any, Generator, Iterable, Mapping, MutableMapping, Optional, Set, Tuple
import six
import tensorflow.compat.v1 as tf
import tensorflow_datasets.public_api as tfds
import image_utils
from object_detection.object_detection_data import bbox_utils
# The Type for a processed example. It is expected to contain the ID and the
# TFDS-compatible map.
ProcessedExample = Tuple[int, Mapping[str, Any]]
_VERSION = '0.1.0'
class ObjectDetectionConfig(tfds.core.BuilderConfig, abc.ABC):
"""Base Class for an input config to ImageClassificationData.
An implementation of ImageClassificationDataConfig includes an example
generator that yields `dict` objects with the essential inputs necessary for
converting raw data into the Object Detection format.
"""
@property
@abc.abstractmethod
def num_labels(self) -> int:
"""The number of distinct labels in the dataset."""
raise NotImplementedError
@property
@abc.abstractmethod
def bbox_format(self) -> bbox_utils.BBoxFormat:
"""Refer to documentation in bbox_utils for more information."""
raise NotImplementedError
@property
@abc.abstractmethod
def supported_modes(self) -> Set[str]:
"""Returns a list of the supported modes for this dataset.
Returns:
A `Set` consisting of a set of 'train', 'test', 'validation'.
"""
raise NotImplementedError
@abc.abstractmethod
def example_generator(self, mode: str):
"""The example generator for the dataset that yields essential inputs.
Args:
mode: `str` indicating the mode. One of the following:
'train', 'validation', 'test'
Yields:
`dict` with the following:
'image_path_or_name': `str` representing the path to the image that is
loadable with `tf.io.gfile.GFile` or the file name. If a file name is
provided instead, then 'image_fobj' must be provided.
'image_fobj': An optional key representing an opened image, (e.g.
open(image_path, 'rb')). This must be provided if 'image_path_or_name'
is not a loadable path.
'image_id': An optional key that can be provided that represents an
integer ID for the image. If not provided, one will be generated,
but note that generated IDs may not be consistent between runs.
'bbox_info': The list of corresponding bounding box information. Each
bounding box should be represented as a dict with keys:
'bbox': the tuple representing a bounding box with the format
specified in `bbox_format`.
'label': the class label of the corresponding bounding box, or the
string representation of the label.
'label_id': An optional field that can be provided if 'label' is
the string representation of the label. If not provided, then an
id will be generated, but note that generated IDs may not be
consistent between runs.
'annotation_id': An optional field that represents the ID of the
bounding box annotation. If not provided, an id will be generated,
but note that generated IDs may not be consistent between runs.
"""
raise NotImplementedError
class ObjectDetectionBuilder(tfds.core.GeneratorBasedBuilder):
"""A TFDS Dataset Builder for Object Detection Datasets.
This Builder processes TFRecords in a COCO style format given an
implementation of ObjectDetectionConfig. It will also create a JSON file
in the same format as COCO.
Example usage:
```
config = [implementation of ObjectDetectionConfig](...)
dataset = ObjectDetectionBuilder(config=config)
dataset.download_and_prepare()
```
"""
VERSION = tfds.core.Version(_VERSION)
def __init__(self,
data_dir: Optional[str] = None,
config: ObjectDetectionConfig = None,
version: Optional[tfds.core.Version] = None,
**kwargs):
"""Refer to `tensorflow_datasets.core.dataset_builder`.
Args:
data_dir: The directory used to save TFDS converted data.
config: The ObjectDetectionConfig implemententation.
version: A TFDS version, if applicable.
**kwargs: Keyword arguments passed to super.
"""
super(ObjectDetectionBuilder, self).__init__(data_dir=data_dir,
config=config,
version=version,
**kwargs)
self._label_id_map = {}
self._id_manager = collections.Counter()
self._json_dict = {}
def _info(self) -> tfds.core.DatasetInfo:
"""Refer to `tensorflow_datasets.core.dataset_builder`."""
if not issubclass(type(self.builder_config), ObjectDetectionConfig):
raise ValueError('Provided config is not the correct type. Please provide'
' a config inheriting ObjectDetectionConfig.')
n_labels = self.builder_config.num_labels
return tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict({
'image': {
'height': tfds.features.Tensor(shape=(), dtype=tf.uint8),
'width': tfds.features.Tensor(shape=(), dtype=tf.uint8),
'filename': tfds.features.Text(),
'source_id': tfds.features.Tensor(shape=(), dtype=tf.int64),
'encoded': tfds.features.Image(encoding_format='jpeg'),
'format': tfds.features.Text(),
'key': {
'sha256': tfds.features.Text(),
},
'object': tfds.features.Sequence({
'bbox': tfds.features.BBoxFeature(),
'class': {
'text': tfds.features.Text(),
'label': tfds.features.ClassLabel(num_classes=n_labels),
}})
}
}))
def _split_generators(
self,
dl_manager: tfds.download.DownloadManager
) -> Iterable[tfds.core.SplitGenerator]:
"""Defines the splits for TFDS builder."""
split_generators = []
if 'train' in self.builder_config.supported_modes:
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'mode': 'train',
},
),
)
if 'validation' in self.builder_config.supported_modes:
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'mode': 'validation',
},
),
)
if 'test' in self.builder_config.supported_modes:
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
'mode': 'test',
},
),
)
return split_generators
def _get_id(self, id_family: str) -> int:
"""Simple ID generator based on a counter.
This is a simple ID generator that assigns IDs based on the number of items
counted.
Args:
id_family: The string representation of the 'family' of which to generate
an id.
Returns:
The family member's ID.
"""
res = self._id_manager[id_family]
self._id_manager[id_family] += 1
return res
def _convert_raw_example(
self,
mode_dict: MutableMapping[str, Any],
example: Mapping[str, Any]) -> ProcessedExample:
"""Converts the raw data in the example into a TFDS compatible format.
Args:
mode_dict: `defaultdict(list)` used to populate the COCO style JSON file.
example: A `dict` as specified in ObjectDetectionConfig.
Returns:
A tuple consisting of image_id (`int`) and a `dict` for TFDS.
Raises:
ImageDecodingError if the example image is not formatted properly.
InvalidBBoxError if the example bounding box is not formatted properly.
"""
img_path = example['image_path_or_name']
base_name = os.path.basename(img_path)
img_fobj = example.get('image_fobj', tf.io.gfile.GFile(img_path, 'rb'))
img_bytes, img_shape = image_utils.image_to_jpeg(fobj=img_fobj,
filename=base_name)
img_format = 'JPEG'
key = hashlib.sha256(img_bytes.read()).hexdigest()
img_bytes.seek(0)
bboxes = example['bbox_info']
processed_bboxes = []
img_height = img_shape[0]
img_width = img_shape[1]
img_id = example.get('image_id', self._get_id('image'))
mode_dict['images'].append({
'id': img_id,
'width': img_width,
'height': img_height,
})
for bbox_info in bboxes:
annotations_bbox = bbox_info['bbox']
bbox = bbox_utils.BBox(bbox=annotations_bbox,
fmt=self.builder_config.bbox_format,
img_width=img_width,
img_height=img_height)
label = bbox_info['label']
if isinstance(label, int):
text = str(label)
elif isinstance(label, six.string_types):
text = label
label = bbox_info.get('label_id', self._get_label_id(text))
else:
raise TypeError(
'The provided label was not a string or int. Got: {}'.format(
type(label)))
if label >= self.builder_config.num_labels:
raise ValueError('Provided label {} for {} is greater than '
'the number of classes specified. num_classes: '
'{}'.format(label,
base_name,
self.builder_config.num_labels))
annotation_id = example.get('annotation_id', self._get_id('annotation'))
bbox.convert(bbox_utils.BBoxFormat.NORMALIZED_MIN_MAX)
xmin, xmax, ymin, ymax = bbox.as_tuple()
bbox = bbox.convert(bbox_utils.BBoxFormat.WIDTH_HEIGHT)
mode_dict['annotations'].append({
'id': annotation_id,
'image_id': img_id,
'category_id': label,
'bbox': annotations_bbox,
})
processed_bboxes.append({
'bbox': tfds.features.BBox(ymin=ymin,
xmin=xmin,
ymax=ymax,
xmax=xmax),
'class': {
'text': text,
'label': label,
}
})
return img_id, {
'image': {
'height': img_width,
'width': img_shape[1],
'filename': img_path,
'source_id': img_id,
'encoded': img_bytes,
'format': img_format,
'key': {
'sha256': key,
},
'object': processed_bboxes,
}
}
def _generate_examples(
self, mode: str) -> Generator[ProcessedExample, None, None]:
"""Process specified examples into required TFDS outputs."""
if mode not in self._json_dict:
self._json_dict[mode] = collections.defaultdict(list)
generator = self.builder_config.example_generator(mode)
for example in generator:
img_id, processed_example = self._convert_raw_example(
self._json_dict[mode], example)
yield img_id, processed_example
def _get_label_id(self, label: str) -> int:
"""If the class label was not provided as an int, create the class id."""
try:
return self._label_id_map[label]
except KeyError:
label_id = self._get_id('label')
self._label_id_map[label] = label_id
return label_id
def download_and_prepare(self, **kwargs) -> None:
super(ObjectDetectionBuilder, self).download_and_prepare(**kwargs)
categories_list = list(range(self.builder_config.num_labels))
for mode in self.builder_config.supported_modes:
self._json_dict[mode]['categories'] = categories_list
json_path = os.path.join(self._data_dir, 'instances_{}.json'.format(mode))
with open(json_path, 'w') as f:
json.dump(self._json_dict[mode], f)
tf.logging.info('Created JSON file {}'.format(json_path))
|
{
"content_hash": "fa6db3c628b3f85192dd2f33948957ba",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 90,
"avg_line_length": 35.854651162790695,
"alnum_prop": 0.5973731149667586,
"repo_name": "tensorflow/tpu",
"id": "9596c77a7f1239b108a6a9a95d66160814844821",
"size": "13023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/data_converter/object_detection/object_detection_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "754301"
},
{
"name": "Dockerfile",
"bytes": "2734"
},
{
"name": "Go",
"bytes": "226317"
},
{
"name": "Jupyter Notebook",
"bytes": "56231509"
},
{
"name": "Makefile",
"bytes": "2369"
},
{
"name": "Python",
"bytes": "3444271"
},
{
"name": "Shell",
"bytes": "21032"
},
{
"name": "Starlark",
"bytes": "164"
}
],
"symlink_target": ""
}
|
from functools import wraps
from django.contrib.auth.decorators import login_required as dj_login_required
from constance import config
def login_required(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
if config.ENABLE_PERMISSION:
return dj_login_required(func)(*args, **kwargs)
else:
return func(*args, **kwargs)
return func_wrapper
|
{
"content_hash": "9f7e71b3c833957749b5509895fcce2b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.357142857142858,
"alnum_prop": 0.6775818639798489,
"repo_name": "ctripcorp/tars",
"id": "fab835843e0cc4e64ab76d8321603034f7c46130",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tars/surface/decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "372246"
},
{
"name": "Dockerfile",
"bytes": "188"
},
{
"name": "HTML",
"bytes": "175411"
},
{
"name": "JavaScript",
"bytes": "1190261"
},
{
"name": "Makefile",
"bytes": "1731"
},
{
"name": "Python",
"bytes": "305797"
},
{
"name": "Shell",
"bytes": "12737"
}
],
"symlink_target": ""
}
|
import asposebarcodecloud
from asposebarcodecloud.BarcodeApi import BarcodeApi
from asposebarcodecloud.BarcodeApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(r'../../data/config.properties'))
apiKey = config.get('AppConfig', 'api_key')
appSid = config.get('AppConfig', 'app_sid')
out_folder = config.get('AppConfig', 'out_folder')
data_folder = "../../data/" #resouece data folder
#ExStart:1
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Barcode API SDK
api_client = asposebarcodecloud.ApiClient.ApiClient(apiKey, appSid, True)
barcodeApi = BarcodeApi(api_client);
#Set the barcode file name created on server
name = "sample-barcode"
#Set Text to encode inside barcode
text = "Aspose.BarCode"
#Set Barcode Symbology
type = "Code128"
#Set Generated Barcode Image Format
format = "PNG"
#Set Location of the code
codeLocation = "Above"
#Set if checksum will be added to barcode image
enableChecksum = "Yes"
try:
#invoke Aspose.BarCode Cloud SDK API to generate barcode with checksum and save in cloud storage
response = barcodeApi.PutBarcodeGenerateFile(name, file= None, text=text, type=type, format=format, enableChecksum=enableChecksum, codeLocation=codeLocation)
if response.Status == "OK":
#download generated barcode from cloud storage
response = storageApi.GetDownload(Path=name)
outfilename = out_folder + name + "." + format
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
#ExEnd:1
|
{
"content_hash": "a61db0b08bca5178b8924b9ffda0e88a",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 161,
"avg_line_length": 33.078125,
"alnum_prop": 0.6939064714218234,
"repo_name": "farooqsheikhpk/Aspose_BarCode_Cloud",
"id": "0be9b77501ab9c06fc30a71206ff5a51835b2ff5",
"size": "2117",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Examples/Python/generating-saving/cloud-storage/generate-barcode-with-checksum-option.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "56179"
},
{
"name": "JavaScript",
"bytes": "46992"
},
{
"name": "Objective-C",
"bytes": "71718"
},
{
"name": "PHP",
"bytes": "49676"
},
{
"name": "Python",
"bytes": "60095"
},
{
"name": "Ruby",
"bytes": "302"
}
],
"symlink_target": ""
}
|
from statsmodels.regression.linear_model import OLS
from sklearn import cross_validation
import numpy as N
def get_balanced_folds(y,nfolds,pthresh=0.8):
"""
This function uses anova across CV folds to find
a set of folds that are balanced in their distriutions
of the X value - see Kohavi, 1995
"""
nsubs=len(y)
# cycle through until we find a split that is good enough
good_split=0
while good_split==0:
cv=cross_validation.KFold(n=nsubs,n_folds=nfolds,shuffle=True)
ctr=0
idx=N.zeros((nsubs,nfolds)) # this is the design matrix
for train,test in cv:
idx[test,ctr]=1
ctr+=1
lm_y=OLS(y-N.mean(y),idx).fit()
if lm_y.f_pvalue>pthresh:
good_split=1
return cv
|
{
"content_hash": "6fe3637255ab91d009b465b3f6078486",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 70,
"avg_line_length": 25.774193548387096,
"alnum_prop": 0.6220275344180225,
"repo_name": "poldracklab/poldracklab-base",
"id": "c105a3be73ecc53656e68c113e8a505ef9485f96",
"size": "801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "machine-learning/get_balanced_folds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5450"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import collections
from django import forms
from django.core.exceptions import ValidationError
from django.forms.utils import ErrorList
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.html import format_html, format_html_join
import six
from .base import Block, DeclarativeSubBlocksMetaclass
from .utils import js_dict
__all__ = ['BaseStructBlock', 'StructBlock', 'StructValue']
class BaseStructBlock(Block):
class Meta:
default = {}
template = "wagtailadmin/blocks/struct.html"
def __init__(self, local_blocks=None, **kwargs):
self._constructor_kwargs = kwargs
super(BaseStructBlock, self).__init__(**kwargs)
self.child_blocks = self.base_blocks.copy() # create a local (shallow) copy of base_blocks so that it can be supplemented by local_blocks
if local_blocks:
for name, block in local_blocks:
block.set_name(name)
self.child_blocks[name] = block
self.child_js_initializers = {}
for name, block in self.child_blocks.items():
js_initializer = block.js_initializer()
if js_initializer is not None:
self.child_js_initializers[name] = js_initializer
self.dependencies = self.child_blocks.values()
def get_default(self):
"""
Any default value passed in the constructor or self.meta is going to be a dict
rather than a StructValue; for consistency, we need to convert it to a StructValue
for StructBlock to work with
"""
return StructValue(self, self.meta.default.items())
def js_initializer(self):
# skip JS setup entirely if no children have js_initializers
if not self.child_js_initializers:
return None
return "StructBlock(%s)" % js_dict(self.child_js_initializers)
@property
def media(self):
return forms.Media(js=['wagtailadmin/js/blocks/struct.js'])
def render_form(self, value, prefix='', errors=None):
if errors:
if len(errors) > 1:
# We rely on StructBlock.clean throwing a single ValidationError with a specially crafted
# 'params' attribute that we can pull apart and distribute to the child blocks
raise TypeError('StructBlock.render_form unexpectedly received multiple errors')
error_dict = errors.as_data()[0].params
else:
error_dict = {}
child_renderings = [
block.render_form(value.get(name, block.get_default()), prefix="%s-%s" % (prefix, name),
errors=error_dict.get(name))
for name, block in self.child_blocks.items()
]
list_items = format_html_join('\n', "<li>{0}</li>", [
[child_rendering]
for child_rendering in child_renderings
])
if self.label:
return format_html('<div class="struct-block"><label>{0}</label> <ul>{1}</ul></div>', self.label, list_items)
else:
return format_html('<div class="struct-block"><ul>{0}</ul></div>', list_items)
def value_from_datadict(self, data, files, prefix):
return StructValue(self, [
(name, block.value_from_datadict(data, files, '%s-%s' % (prefix, name)))
for name, block in self.child_blocks.items()
])
def clean(self, value):
result = [] # build up a list of (name, value) tuples to be passed to the StructValue constructor
errors = {}
for name, val in value.items():
try:
result.append((name, self.child_blocks[name].clean(val)))
except ValidationError as e:
errors[name] = ErrorList([e])
if errors:
# The message here is arbitrary - StructBlock.render_form will suppress it
# and delegate the errors contained in the 'params' dict to the child blocks instead
raise ValidationError('Validation error in StructBlock', params=errors)
return StructValue(self, result)
def to_python(self, value):
# recursively call to_python on children and return as a StructValue
return StructValue(self, [
(
name,
(child_block.to_python(value[name]) if name in value else child_block.get_default())
# NB the result of get_default is NOT passed through to_python, as it's expected
# to be in the block's native type already
)
for name, child_block in self.child_blocks.items()
])
def get_prep_value(self, value):
# recursively call get_prep_value on children and return as a plain dict
return dict([
(name, self.child_blocks[name].get_prep_value(val))
for name, val in value.items()
])
def get_searchable_content(self, value):
content = []
for name, block in self.child_blocks.items():
content.extend(block.get_searchable_content(value.get(name, block.get_default())))
return content
def deconstruct(self):
"""
Always deconstruct StructBlock instances as if they were plain StructBlocks with all of the
field definitions passed to the constructor - even if in reality this is a subclass of StructBlock
with the fields defined declaratively, or some combination of the two.
This ensures that the field definitions get frozen into migrations, rather than leaving a reference
to a custom subclass in the user's models.py that may or may not stick around.
"""
path = 'wagtail.wagtailcore.blocks.StructBlock'
args = [self.child_blocks.items()]
kwargs = self._constructor_kwargs
return (path, args, kwargs)
class StructBlock(six.with_metaclass(DeclarativeSubBlocksMetaclass, BaseStructBlock)):
pass
@python_2_unicode_compatible # provide equivalent __unicode__ and __str__ methods on Py2
class StructValue(collections.OrderedDict):
def __init__(self, block, *args):
super(StructValue, self).__init__(*args)
self.block = block
def __str__(self):
return self.block.render(self)
@cached_property
def bound_blocks(self):
return collections.OrderedDict([
(name, block.bind(self.get(name)))
for name, block in self.block.child_blocks.items()
])
|
{
"content_hash": "dd21262328d805209cccc2daa8d0fe2d",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 146,
"avg_line_length": 38.06395348837209,
"alnum_prop": 0.629295860699557,
"repo_name": "stevenewey/wagtail",
"id": "94a73a563e67042faced81214e4e1f0096e1e43a",
"size": "6547",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtail/wagtailcore/blocks/struct_block.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "149334"
},
{
"name": "HTML",
"bytes": "239663"
},
{
"name": "JavaScript",
"bytes": "84658"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "1378786"
},
{
"name": "Shell",
"bytes": "15249"
}
],
"symlink_target": ""
}
|
import unittest
import pyxb.binding.datatypes as xsd
class Test_decimal (unittest.TestCase):
def testRange (self):
self.fail("Datatype decimal test not implemented")
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c6b4dcd557d476c2e33207f7f29da82c",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 58,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.6872246696035242,
"repo_name": "jonfoster/pyxb1",
"id": "c0e71d85ebc8eb0667e988b4492ecf12e269f25f",
"size": "227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/datatypes/totest-decimal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1564427"
},
{
"name": "Shell",
"bytes": "18946"
}
],
"symlink_target": ""
}
|
import mechanize
import urllib
import re
import sys
################################################################################
# Constantes
################################################################################
### URLs
MAIN_URL = 'http://www.daconline.unicamp.br/altmatr/menupublico.do'
REQUEST_URL = 'http://www.daconline.unicamp.br/altmatr/conspub_matriculadospordisciplinaturma.do?org.apache.struts.taglib.html.TOKEN=%s&txtDisciplina=%s&txtTurma=%s&cboSubG=%s&cboSubP=%s&cboAno=%s&btnAcao=Continuar'
### Regex patterns
TOKEN_PATTERN = 'var token = "((?P<token>[0-9a-f]{32,32}))";'
DISCIPLINE_PATTERN = 'Disciplina:</span> (?P<disciplina>[A-Za-z][A-Za-z ][0-9]{3}) (?P<turma>[A-Za-z0-9]) - (?P<materia>.+)</td>'
PROFESSOR_PATTERN = 'Docente:</span> (?P<professor>.+)</td>'
RA_PATTERN = '<td height="18" bgcolor="white" align="center" class="corpo" width="80">([0-9]+)</td>'
NAME_PATTERN = '<td height="18" bgcolor="white" width="270" align="left" class="corpo"> (.+)</td>'
################################################################################
# Funções
################################################################################
def get_students(info):
"""
Busca lista de alunos e informações de turmas de uma determinada disciplina.
Recebe como parâmetro um dicionário da seguinte forma:
info = {
'course': 'MC868', # Código da disciplina
'classes': 'A', # or 'AB', 'XYWZ'
'year': '2013', # Ano do oferecimento
'semester': '2',
'type': 'undergrad' # or 'grad'
}
Retorna uma lista com a seguinte tupla para cada turma:
(
'MC868', # Código da disciplina
'A', # Identificador da turma
'Linguagens Formais e Autômatos', # Nome da disciplina
'Arnaldo Vieira Moura', # Nome do professor responsável
[ lista de tuplas (ra, nome) para cada aluno matriculado ]
)
"""
# Recebe as informações da disciplina
course = info["course"]
classes = info["classes"]
year = info["year"]
if info["type"] == "undergrad":
undergrad = info["semester"]
grad = "0"
elif info["type"] == "grad":
grad = "2" + info["semester"]
undergrad = "0"
else:
sys.stderr.write('dac_parser: Tipo %s Inválido.\n' % (info['type']))
return None
# Abre a página de consultas da DAC
mech = mechanize.Browser()
f = mech.open(MAIN_URL)
site = f.read()
# Procura pelo token da DAC
token_pattern = re.compile(TOKEN_PATTERN)
matches = re.search(token_pattern, site)
if matches == None:
sys.stderr.write("dac_parser: Não foi possível acessar o site da DAC.\n")
sys.exit(1)
token = matches.group("token")
# Inicializa lista de turmas
result = []
# Percorre a lista com as turmas pegando os dados de seus alunos
for cls in classes:
# URL para onde são enviados os requerimentos
url = REQUEST_URL % (token, course, cls,undergrad, grad, year)
# Abre a página que contém as informações dos alunos
f = mech.open(url)
site = f.read()
# Obtem informações através de regex
# Nome do professor
matches = re.search(PROFESSOR_PATTERN, site)
if matches == None:
# Turma inválida (não há docente responsável)
sys.stderr.write("dac_parser: Turma %s Inválida.\n" % (course+cls))
continue
prof = matches.group("professor").strip()
# Nome da disciplina
matches = re.search(DISCIPLINE_PATTERN, site)
if matches == None:
# Turma inválida (não encontrou nome da disciplina)
sys.stderr.write("dac_parser: Turma %s Inválida.\n" % (course+cls))
continue
disc = matches.group("disciplina").strip()
class_id = matches.group("turma").strip()
disc_name = matches.group("materia").strip()
# Lista de matrículados
ra_list = re.findall(RA_PATTERN, site)
names = re.findall(NAME_PATTERN, site)
# Turma vazia, descarta-a
if len(names) == 0:
sys.stderr.write("dac_parser: Turma %s Inválida.\n" % (course+cls))
continue
# Erro de parsing
if len(names) != len(ra_list):
sys.stderr.write("dac_parser: Problema lendo alunos da Turma %s.\n" % (course+cls))
continue
# Gera dicionário onde chave é letra da turma e itens uma lista de
# (ra,nome) de cada aluno matriculado na turma
students = []
for i in range(len(ra_list)):
students.append( (ra_list[i], (names[i]).strip()) )
result.append( (disc, class_id, disc_name, prof, students) )
return result
|
{
"content_hash": "69239c330eabeaa7772244e28810feb2",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 215,
"avg_line_length": 37.604651162790695,
"alnum_prop": 0.5611214182642754,
"repo_name": "cacounicamp/gda",
"id": "d1cb0611f45cee1dae985b72070f19ba6f44f157",
"size": "5126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/dac_parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
'''
Script used to query Wikipedia for summary of object
'''
import sys
import wikipedia
import nltk
def main():
# Check that we have the right number of arguments
if (len(sys.argv) != 2):
print 'Incorrect number of arguments; please pass in only one string that contains the query'
exit()
# Get the noun from the query (uses the first noun it finds for now)
tokens = nltk.word_tokenize(sys.argv[1])
tagged = nltk.pos_tag(tokens)
# Find first noun in query and provide Wikipedia summary for it
for tag in tagged:
if tag[1][0] == 'N':
print wikipedia.summary(tag[0]).encode('utf-8')
return
if __name__ == '__main__':
main()
|
{
"content_hash": "a5787b456973145b824c4afc90849e11",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 95,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.682370820668693,
"repo_name": "christopher18/Celsearch",
"id": "af91a026133b1be1420f6839e26701ff6e7f6cfd",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/celSearch/api/scripts/query_wikipedia.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "13117"
},
{
"name": "JavaScript",
"bytes": "3015"
},
{
"name": "Python",
"bytes": "1756"
}
],
"symlink_target": ""
}
|
import json
from unittest import mock
import os
import pytest
import tempfile
import zipfile
from django.conf import settings
from django.forms import ValidationError
from django.utils.encoding import force_text
from waffle.testutils import override_switch
from olympia import amo
from olympia.addons.models import Category
from olympia.addons.utils import (
build_static_theme_xpi_from_lwt, build_webext_dictionary_from_legacy,
get_addon_recommendations, get_addon_recommendations_invalid,
get_creatured_ids, get_featured_ids, is_outcome_recommended,
TAAR_LITE_FALLBACK_REASON_EMPTY, TAAR_LITE_FALLBACK_REASON_TIMEOUT,
TAAR_LITE_FALLBACKS, TAAR_LITE_OUTCOME_CURATED,
TAAR_LITE_OUTCOME_REAL_FAIL, TAAR_LITE_OUTCOME_REAL_SUCCESS,
TAAR_LITE_FALLBACK_REASON_INVALID,
verify_mozilla_trademark)
from olympia.amo.tests import (
AMOPaths, TestCase, addon_factory, collection_factory, user_factory)
from olympia.bandwagon.models import FeaturedCollection
from olympia.constants.categories import CATEGORIES_BY_ID
@pytest.mark.django_db
@pytest.mark.parametrize('name, allowed, email, content_optmzn_waffle', (
# First with the content optimization waffle off:
# Regular name, obviously always allowed
('Fancy new Add-on', True, 'foo@bar.com', False),
# We allow the 'for ...' postfix to be used
('Fancy new Add-on for Firefox', True, 'foo@bar.com', False),
('Fancy new Add-on for Mozilla', True, 'foo@bar.com', False),
# But only the postfix
('Fancy new Add-on for Firefox Browser', False, 'foo@bar.com', False),
('For Firefox fancy new add-on', False, 'foo@bar.com', False),
# But users with @mozilla.com or @mozilla.org email addresses
# are allowed
('Firefox makes everything better', False, 'bar@baz.com', False),
('Firefox makes everything better', True, 'foo@mozilla.com', False),
('Firefox makes everything better', True, 'foo@mozilla.org', False),
('Mozilla makes everything better', True, 'foo@mozilla.com', False),
('Mozilla makes everything better', True, 'foo@mozilla.org', False),
# A few more test-cases...
('Firefox add-on for Firefox', False, 'foo@bar.com', False),
('Firefox add-on for Firefox', True, 'foo@mozilla.com', False),
('Foobarfor Firefox', False, 'foo@bar.com', False),
('Better Privacy for Firefox!', True, 'foo@bar.com', False),
('Firefox awesome for Mozilla', False, 'foo@bar.com', False),
('Firefox awesome for Mozilla', True, 'foo@mozilla.org', False),
# And with the content optimization waffle onL
# Regular name, obviously always allowed
('Fancy new Add-on', True, 'foo@bar.com', True),
# We don't allow the 'for ...' postfix to be used anymore
('Fancy new Add-on for Firefox', False, 'foo@bar.com', True),
('Fancy new Add-on for Mozilla', False, 'foo@bar.com', True),
# Or the postfix
('Fancy new Add-on for Firefox Browser', False, 'foo@bar.com', True),
('For Firefox fancy new add-on', False, 'foo@bar.com', True),
# But users with @mozilla.com or @mozilla.org email addresses
# are allowed
('Firefox makes everything better', False, 'bar@baz.com', True),
('Firefox makes everything better', True, 'foo@mozilla.com', True),
('Firefox makes everything better', True, 'foo@mozilla.org', True),
('Mozilla makes everything better', True, 'foo@mozilla.com', True),
('Mozilla makes everything better', True, 'foo@mozilla.org', True),
('Fancy new Add-on for Firefox', True, 'foo@mozilla.org', True),
('Fancy new Add-on for Mozilla', True, 'foo@mozilla.org', True),
# A few more test-cases...
('Firefox add-on for Firefox', False, 'foo@bar.com', True),
('Firefox add-on for Firefox', True, 'foo@mozilla.com', True),
('Foobarfor Firefox', False, 'foo@bar.com', True),
('Better Privacy for Firefox!', False, 'foo@bar.com', True),
('Firefox awesome for Mozilla', False, 'foo@bar.com', True),
('Firefox awesome for Mozilla', True, 'foo@mozilla.org', True),
))
def test_verify_mozilla_trademark(name, allowed, email, content_optmzn_waffle):
user = user_factory(email=email)
with override_switch('content-optimization', active=content_optmzn_waffle):
if not allowed:
with pytest.raises(ValidationError) as exc:
verify_mozilla_trademark(name, user)
assert exc.value.message == (
'Add-on names cannot contain the Mozilla or Firefox '
'trademarks.'
)
else:
verify_mozilla_trademark(name, user)
class TestGetFeaturedIds(TestCase):
fixtures = ['addons/featured', 'bandwagon/featured_collections',
'base/addon_3615', 'base/collections', 'base/featured',
'base/users']
no_locale = (1001, 1003, 2464, 7661, 15679)
en_us_locale = (3481,)
all_locales = no_locale + en_us_locale
no_locale_type_one = (1001, 1003, 2464, 7661)
def setUp(self):
super(TestGetFeaturedIds, self).setUp()
def test_by_app(self):
assert set(get_featured_ids(amo.FIREFOX)) == (
set(self.all_locales))
def test_by_type(self):
assert set(get_featured_ids(amo.FIREFOX, 'xx', 1)) == (
set(self.no_locale_type_one))
def test_by_locale(self):
assert set(get_featured_ids(amo.FIREFOX)) == (
set(self.all_locales))
assert set(get_featured_ids(amo.FIREFOX, 'xx')) == (
set(self.no_locale))
assert set(get_featured_ids(amo.FIREFOX, 'en-US')) == (
set(self.no_locale + self.en_us_locale))
def test_locale_shuffle(self):
# Make sure the locale-specific add-ons are at the front.
ids = get_featured_ids(amo.FIREFOX, 'en-US')
assert (ids[0],) == self.en_us_locale
class TestGetCreaturedIds(TestCase):
fixtures = ['addons/featured', 'bandwagon/featured_collections',
'base/addon_3615', 'base/collections', 'base/featured',
'base/users']
category_id = 22
no_locale = (1001,)
en_us_locale = (3481,)
def setUp(self):
super(TestGetCreaturedIds, self).setUp()
def test_by_category_static(self):
category = CATEGORIES_BY_ID[self.category_id]
assert set(get_creatured_ids(category, None)) == (
set(self.no_locale))
def test_by_category_dynamic(self):
category = Category.objects.get(pk=self.category_id)
assert set(get_creatured_ids(category, None)) == (
set(self.no_locale))
def test_by_category_id(self):
assert set(get_creatured_ids(self.category_id, None)) == (
set(self.no_locale))
def test_by_category_app(self):
# Add an addon to the same category, but in a featured collection
# for a different app: it should not be returned.
extra_addon = addon_factory(
category=Category.objects.get(pk=self.category_id))
collection = collection_factory()
collection.add_addon(extra_addon)
FeaturedCollection.objects.create(
application=amo.ANDROID.id, collection=collection)
assert set(get_creatured_ids(self.category_id, None)) == (
set(self.no_locale))
def test_by_locale(self):
assert set(get_creatured_ids(self.category_id, 'en-US')) == (
set(self.no_locale + self.en_us_locale))
def test_by_category_app_and_locale(self):
# Add an addon to the same category and locale, but in a featured
# collection for a different app: it should not be returned.
extra_addon = addon_factory(
category=Category.objects.get(pk=self.category_id))
collection = collection_factory()
collection.add_addon(extra_addon)
FeaturedCollection.objects.create(
application=amo.ANDROID.id, collection=collection,
locale='en-US')
assert set(get_creatured_ids(self.category_id, 'en-US')) == (
set(self.no_locale + self.en_us_locale))
def test_shuffle(self):
ids = get_creatured_ids(self.category_id, 'en-US')
assert (ids[0],) == self.en_us_locale
class TestGetAddonRecommendations(TestCase):
def setUp(self):
patcher = mock.patch(
'olympia.addons.utils.call_recommendation_server')
self.recommendation_server_mock = patcher.start()
self.addCleanup(patcher.stop)
self.a101 = addon_factory(id=101, guid='101@mozilla')
addon_factory(id=102, guid='102@mozilla')
addon_factory(id=103, guid='103@mozilla')
addon_factory(id=104, guid='104@mozilla')
self.recommendation_guids = [
'101@mozilla', '102@mozilla', '103@mozilla', '104@mozilla'
]
self.recommendation_server_mock.return_value = (
self.recommendation_guids)
def test_recommended(self):
recommendations, outcome, reason = get_addon_recommendations(
'a@b', True)
assert recommendations == self.recommendation_guids
assert outcome == TAAR_LITE_OUTCOME_REAL_SUCCESS
assert reason is None
self.recommendation_server_mock.assert_called_with(
settings.TAAR_LITE_RECOMMENDATION_ENGINE_URL, 'a@b', {})
def test_recommended_no_results(self):
self.recommendation_server_mock.return_value = []
recommendations, outcome, reason = get_addon_recommendations(
'a@b', True)
assert recommendations == TAAR_LITE_FALLBACKS
assert outcome == TAAR_LITE_OUTCOME_REAL_FAIL
assert reason is TAAR_LITE_FALLBACK_REASON_EMPTY
self.recommendation_server_mock.assert_called_with(
settings.TAAR_LITE_RECOMMENDATION_ENGINE_URL, 'a@b', {})
def test_recommended_timeout(self):
self.recommendation_server_mock.return_value = None
recommendations, outcome, reason = get_addon_recommendations(
'a@b', True)
assert recommendations == TAAR_LITE_FALLBACKS
assert outcome == TAAR_LITE_OUTCOME_REAL_FAIL
assert reason is TAAR_LITE_FALLBACK_REASON_TIMEOUT
self.recommendation_server_mock.assert_called_with(
settings.TAAR_LITE_RECOMMENDATION_ENGINE_URL, 'a@b', {})
def test_not_recommended(self):
recommendations, outcome, reason = get_addon_recommendations(
'a@b', False)
assert not self.recommendation_server_mock.called
assert recommendations == TAAR_LITE_FALLBACKS
assert outcome == TAAR_LITE_OUTCOME_CURATED
assert reason is None
def test_invalid_fallback(self):
recommendations, outcome, reason = get_addon_recommendations_invalid()
assert not self.recommendation_server_mock.called
assert recommendations == TAAR_LITE_FALLBACKS
assert outcome == TAAR_LITE_OUTCOME_REAL_FAIL
assert reason == TAAR_LITE_FALLBACK_REASON_INVALID
def test_is_outcome_recommended(self):
assert is_outcome_recommended(TAAR_LITE_OUTCOME_REAL_SUCCESS)
assert not is_outcome_recommended(TAAR_LITE_OUTCOME_REAL_FAIL)
assert not is_outcome_recommended(TAAR_LITE_OUTCOME_CURATED)
assert not self.recommendation_server_mock.called
class TestBuildStaticThemeXpiFromLwt(TestCase):
def setUp(self):
self.background_png = os.path.join(
settings.ROOT, 'src/olympia/versions/tests/static_themes/weta.png')
def test_lwt(self):
# Create our persona.
lwt = addon_factory(
type=amo.ADDON_PERSONA, persona_id=0, name=u'Amáze',
description=u'It does all d£ things')
lwt.persona.accentcolor, lwt.persona.textcolor = '123', '456789'
# Give it a background header file.
lwt.persona.header = 'weta.png'
lwt.persona.header_path = self.background_png # It's a cached_property
static_xpi = tempfile.NamedTemporaryFile(suffix='.xpi').name
build_static_theme_xpi_from_lwt(lwt, static_xpi)
with zipfile.ZipFile(static_xpi, 'r', zipfile.ZIP_DEFLATED) as xpi:
manifest = force_text(xpi.read('manifest.json'))
manifest_json = json.loads(manifest)
assert manifest_json['name'] == u'Amáze'
assert manifest_json['description'] == u'It does all d£ things'
assert manifest_json['theme']['images']['theme_frame'] == (
u'weta.png')
assert manifest_json['theme']['colors']['frame'] == (
u'#123')
assert manifest_json['theme']['colors']['tab_background_text'] == (
u'#456789')
assert (xpi.read('weta.png') ==
open(self.background_png, 'rb').read())
def test_lwt_missing_info(self):
# Create our persona.
lwt = addon_factory(
type=amo.ADDON_PERSONA, persona_id=0)
lwt.update(name='')
# Give it a background header file with multiple dots.
lwt.persona.header = 'weta......png'
lwt.persona.header_path = self.background_png # It's a cached_property
static_xpi = tempfile.NamedTemporaryFile(suffix='.xpi').name
build_static_theme_xpi_from_lwt(lwt, static_xpi)
with zipfile.ZipFile(static_xpi, 'r', zipfile.ZIP_DEFLATED) as xpi:
manifest = force_text(xpi.read('manifest.json'))
manifest_json = json.loads(manifest)
assert manifest_json['name'] == lwt.slug
assert 'description' not in manifest_json.keys()
assert manifest_json['theme']['images']['theme_frame'] == (
u'weta.png')
assert manifest_json['theme']['colors']['frame'] == (
amo.THEME_FRAME_COLOR_DEFAULT)
assert manifest_json['theme']['colors']['tab_background_text'] == (
u'#000')
assert (xpi.read('weta.png') ==
open(self.background_png, 'rb').read())
class TestBuildWebextDictionaryFromLegacy(AMOPaths, TestCase):
def setUp(self):
self.addon = addon_factory(
target_locale='ar', type=amo.ADDON_DICT,
version_kw={'version': '1.0'},
file_kw={'is_webextension': False})
self.xpi_copy_over(
self.addon.current_version.all_files[0], 'dictionary-test.xpi')
def check_xpi_file_contents(self, xpi_file_path, expected_version):
with zipfile.ZipFile(xpi_file_path, 'r', zipfile.ZIP_DEFLATED) as xpi:
# Check that manifest is present, contains proper version and
# dictionaries properties.
manifest = force_text(xpi.read('manifest.json'))
manifest_json = json.loads(manifest)
assert (
manifest_json['browser_specific_settings']['gecko']['id'] ==
self.addon.guid)
assert manifest_json['version'] == expected_version
expected_dict_obj = {'ar': 'dictionaries/ar.dic'}
assert manifest_json['dictionaries'] == expected_dict_obj
# Check that we haven't included any useless files.
expected_files = sorted([
'dictionaries/',
'dictionaries/ar.aff',
'dictionaries/ar.dic',
'dictionaries/license.txt',
'manifest.json'
])
assert sorted([x.filename for x in xpi.filelist]) == expected_files
def test_basic(self):
with tempfile.NamedTemporaryFile(suffix='.xpi') as destination:
build_webext_dictionary_from_legacy(self.addon, destination)
self.check_xpi_file_contents(destination, '1.0.1webext')
def test_current_not_valid_raises(self):
mod = 'olympia.files.utils.SafeZip.initialize_and_validate'
with mock.patch(mod) as is_valid:
is_valid.return_value = False
with tempfile.NamedTemporaryFile(suffix='.xpi') as destination:
with self.assertRaises(ValidationError):
build_webext_dictionary_from_legacy(
self.addon, destination)
def test_addon_has_no_target_locale(self):
self.addon.update(target_locale=None)
with tempfile.NamedTemporaryFile(suffix='.xpi') as destination:
build_webext_dictionary_from_legacy(self.addon, destination)
self.check_xpi_file_contents(destination, '1.0.1webext')
self.addon.reload()
def test_invalid_dictionary_path_raises(self):
self.xpi_copy_over(
self.addon.current_version.all_files[0], 'extension.xpi')
with tempfile.NamedTemporaryFile(suffix='.xpi') as destination:
with self.assertRaises(ValidationError):
build_webext_dictionary_from_legacy(self.addon, destination)
def test_version_number_typefix(self):
self.addon.current_version.update(version='1.1-typefix')
with tempfile.NamedTemporaryFile(suffix='.xpi') as destination:
build_webext_dictionary_from_legacy(self.addon, destination)
self.check_xpi_file_contents(destination, '1.2webext')
|
{
"content_hash": "1743a98a1ba141893ff5f72b902f7ecd",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 79,
"avg_line_length": 44.031007751937985,
"alnum_prop": 0.6369131455399061,
"repo_name": "kumar303/olympia",
"id": "479ac43e4b3ca164afb58d892cc74a3f7f9eae22",
"size": "17068",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/olympia/addons/tests/test_utils_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "663668"
},
{
"name": "HTML",
"bytes": "1600995"
},
{
"name": "JavaScript",
"bytes": "1314186"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "3999917"
},
{
"name": "Shell",
"bytes": "9101"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
}
|
from chatterbot import ChatBot
# Create a new instance of a ChatBot
bot = ChatBot(
'Default Response Example Bot',
storage_adapter='chatterbot.storage.JsonFileStorageAdapter',
logic_adapters=[
{
'import_path': 'chatterbot.logic.BestMatch'
},
{
'import_path': 'chatterbot.logic.LowConfidenceAdapter',
'threshold': 0.65,
'default_response': 'I am sorry, but I do not understand.'
}
],
trainer='chatterbot.trainers.ListTrainer'
)
# Train the chat bot with a few responses
bot.train([
'How can I help you?',
'I want to create a chat bot',
'Have you read the documentation?',
'No, I have not',
'This should help get you started: http://chatterbot.rtfd.org/en/latest/quickstart.html'
])
# Get a response for some unexpected input
response = bot.get_response('How do I make an omelette?')
print(response)
|
{
"content_hash": "8ce8a07be2663c0417aac55e8885ac12",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 92,
"avg_line_length": 28.8125,
"alnum_prop": 0.6464208242950108,
"repo_name": "davizucon/ChatterBot",
"id": "18bdd5bd6e297e28ca12d35b874661200a182763",
"size": "946",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/default_response_example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "336866"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._application_package_operations import (
build_activate_request,
build_create_request,
build_delete_request,
build_get_request,
build_list_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationPackageOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.batch.aio.BatchManagementClient`'s
:attr:`application_package` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@overload
async def activate(
self,
resource_group_name: str,
account_name: str,
application_name: str,
version_name: str,
parameters: _models.ActivateApplicationPackageParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ApplicationPackage:
"""Activates the specified application package. This should be done after the
``ApplicationPackage`` was created and uploaded. This needs to be done before an
``ApplicationPackage`` can be used on Pools or Tasks.
:param resource_group_name: The name of the resource group that contains the Batch account.
Required.
:type resource_group_name: str
:param account_name: The name of the Batch account. Required.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
Required.
:type application_name: str
:param version_name: The version of the application. Required.
:type version_name: str
:param parameters: The parameters for the request. Required.
:type parameters: ~azure.mgmt.batch.models.ActivateApplicationPackageParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationPackage or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.ApplicationPackage
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def activate(
self,
resource_group_name: str,
account_name: str,
application_name: str,
version_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ApplicationPackage:
"""Activates the specified application package. This should be done after the
``ApplicationPackage`` was created and uploaded. This needs to be done before an
``ApplicationPackage`` can be used on Pools or Tasks.
:param resource_group_name: The name of the resource group that contains the Batch account.
Required.
:type resource_group_name: str
:param account_name: The name of the Batch account. Required.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
Required.
:type application_name: str
:param version_name: The version of the application. Required.
:type version_name: str
:param parameters: The parameters for the request. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationPackage or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.ApplicationPackage
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def activate(
self,
resource_group_name: str,
account_name: str,
application_name: str,
version_name: str,
parameters: Union[_models.ActivateApplicationPackageParameters, IO],
**kwargs: Any
) -> _models.ApplicationPackage:
"""Activates the specified application package. This should be done after the
``ApplicationPackage`` was created and uploaded. This needs to be done before an
``ApplicationPackage`` can be used on Pools or Tasks.
:param resource_group_name: The name of the resource group that contains the Batch account.
Required.
:type resource_group_name: str
:param account_name: The name of the Batch account. Required.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
Required.
:type application_name: str
:param version_name: The version of the application. Required.
:type version_name: str
:param parameters: The parameters for the request. Is either a model type or a IO type.
Required.
:type parameters: ~azure.mgmt.batch.models.ActivateApplicationPackageParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationPackage or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.ApplicationPackage
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-10-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ApplicationPackage]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ActivateApplicationPackageParameters")
request = build_activate_request(
resource_group_name=resource_group_name,
account_name=account_name,
application_name=application_name,
version_name=version_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.activate.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ApplicationPackage", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
activate.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}/versions/{versionName}/activate"} # type: ignore
@overload
async def create(
self,
resource_group_name: str,
account_name: str,
application_name: str,
version_name: str,
parameters: Optional[_models.ApplicationPackage] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ApplicationPackage:
"""Creates an application package record. The record contains a storageUrl where the package
should be uploaded to. Once it is uploaded the ``ApplicationPackage`` needs to be activated
using ``ApplicationPackageActive`` before it can be used. If the auto storage account was
configured to use storage keys, the URL returned will contain a SAS.
:param resource_group_name: The name of the resource group that contains the Batch account.
Required.
:type resource_group_name: str
:param account_name: The name of the Batch account. Required.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
Required.
:type application_name: str
:param version_name: The version of the application. Required.
:type version_name: str
:param parameters: The parameters for the request. Default value is None.
:type parameters: ~azure.mgmt.batch.models.ApplicationPackage
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationPackage or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.ApplicationPackage
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create(
self,
resource_group_name: str,
account_name: str,
application_name: str,
version_name: str,
parameters: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ApplicationPackage:
"""Creates an application package record. The record contains a storageUrl where the package
should be uploaded to. Once it is uploaded the ``ApplicationPackage`` needs to be activated
using ``ApplicationPackageActive`` before it can be used. If the auto storage account was
configured to use storage keys, the URL returned will contain a SAS.
:param resource_group_name: The name of the resource group that contains the Batch account.
Required.
:type resource_group_name: str
:param account_name: The name of the Batch account. Required.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
Required.
:type application_name: str
:param version_name: The version of the application. Required.
:type version_name: str
:param parameters: The parameters for the request. Default value is None.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationPackage or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.ApplicationPackage
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create(
self,
resource_group_name: str,
account_name: str,
application_name: str,
version_name: str,
parameters: Optional[Union[_models.ApplicationPackage, IO]] = None,
**kwargs: Any
) -> _models.ApplicationPackage:
"""Creates an application package record. The record contains a storageUrl where the package
should be uploaded to. Once it is uploaded the ``ApplicationPackage`` needs to be activated
using ``ApplicationPackageActive`` before it can be used. If the auto storage account was
configured to use storage keys, the URL returned will contain a SAS.
:param resource_group_name: The name of the resource group that contains the Batch account.
Required.
:type resource_group_name: str
:param account_name: The name of the Batch account. Required.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
Required.
:type application_name: str
:param version_name: The version of the application. Required.
:type version_name: str
:param parameters: The parameters for the request. Is either a model type or a IO type. Default
value is None.
:type parameters: ~azure.mgmt.batch.models.ApplicationPackage or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationPackage or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.ApplicationPackage
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-10-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ApplicationPackage]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
if parameters is not None:
_json = self._serialize.body(parameters, "ApplicationPackage")
else:
_json = None
request = build_create_request(
resource_group_name=resource_group_name,
account_name=account_name,
application_name=application_name,
version_name=version_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ApplicationPackage", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}/versions/{versionName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, account_name: str, application_name: str, version_name: str, **kwargs: Any
) -> None:
"""Deletes an application package record and its associated binary file.
:param resource_group_name: The name of the resource group that contains the Batch account.
Required.
:type resource_group_name: str
:param account_name: The name of the Batch account. Required.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
Required.
:type application_name: str
:param version_name: The version of the application. Required.
:type version_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-10-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
application_name=application_name,
version_name=version_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}/versions/{versionName}"} # type: ignore
@distributed_trace_async
async def get(
self, resource_group_name: str, account_name: str, application_name: str, version_name: str, **kwargs: Any
) -> _models.ApplicationPackage:
"""Gets information about the specified application package.
:param resource_group_name: The name of the resource group that contains the Batch account.
Required.
:type resource_group_name: str
:param account_name: The name of the Batch account. Required.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
Required.
:type application_name: str
:param version_name: The version of the application. Required.
:type version_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationPackage or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.ApplicationPackage
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-10-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ApplicationPackage]
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
application_name=application_name,
version_name=version_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ApplicationPackage", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}/versions/{versionName}"} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
application_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationPackage"]:
"""Lists all of the application packages in the specified application.
:param resource_group_name: The name of the resource group that contains the Batch account.
Required.
:type resource_group_name: str
:param account_name: The name of the Batch account. Required.
:type account_name: str
:param application_name: The name of the application. This must be unique within the account.
Required.
:type application_name: str
:param maxresults: The maximum number of items to return in the response. Default value is
None.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationPackage or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.batch.models.ApplicationPackage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-10-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ListApplicationPackagesResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
application_name=application_name,
subscription_id=self._config.subscription_id,
maxresults=maxresults,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListApplicationPackagesResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}/versions"} # type: ignore
|
{
"content_hash": "63fb670a3b1475223be6e144d1d8b8d2",
"timestamp": "",
"source": "github",
"line_count": 642,
"max_line_length": 234,
"avg_line_length": 44.94392523364486,
"alnum_prop": 0.6451098634504748,
"repo_name": "Azure/azure-sdk-for-python",
"id": "0c3708764c607e3752bf3d1b514cd9007d331787",
"size": "29354",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/batch/azure-mgmt-batch/azure/mgmt/batch/aio/operations/_application_package_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""ImageNet Norm-Free Residual Networks as defined in (Brock et al., 2021).
Reference:
A. Brock, S. De, and S. L. Smith.
Characterizing signal propagation to close the performance gap
in unnormalized resnets.
International Conference on Learning Representations, 2021.
"""
from typing import Any, Optional
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jax_privacy.src.training.image_classification.models import common
class NFResNet(hk.Module):
"""Norm-Free preactivation ResNet."""
variant_dict = {
'ResNet50': {
'depth': [3, 4, 6, 3]
},
'ResNet101': {
'depth': [3, 4, 23, 3]
},
'ResNet152': {
'depth': [3, 8, 36, 3]
},
'ResNet200': {
'depth': [3, 24, 36, 3]
},
'ResNet288': {
'depth': [24, 24, 24, 24]
},
'ResNet600': {
'depth': [50, 50, 50, 50]
},
}
def __init__(
self,
num_classes: int,
*,
variant: str = 'ResNet50',
width: int = 4,
alpha: float = 0.2,
stochdepth_rate: float = 0.1,
drop_rate: Optional[float] = None,
activation: str = 'scaled_relu',
fc_init: Any = None,
skipinit_gain: hk.initializers.Initializer = jnp.zeros,
use_se: bool = False,
se_ratio: float = 0.25,
name: str = 'NF_ResNet',
):
super().__init__(name=name)
self.num_classes = num_classes
self.variant = variant
self.width = width
block_params = self.variant_dict[self.variant]
self.width_pattern = [item * self.width for item in [64, 128, 256, 512]]
self.depth_pattern = block_params['depth']
self.activation = common.activations_dict[activation]
if drop_rate is None:
self.drop_rate = block_params.get('drop_rate', 0.0)
else:
self.drop_rate = drop_rate
# Define the stem of the model.
ch = int(16 * self.width)
self.initial_conv = common.WSConv2D(
ch,
kernel_shape=7,
stride=2,
padding='SAME',
with_bias=False,
name='initial_conv')
# Define the body of the model.
self.blocks = []
expected_std = 1.0
num_blocks = sum(self.depth_pattern)
index = 0 # Overall block index
block_args = (self.width_pattern, self.depth_pattern, [1, 2, 2, 2])
for block_width, stage_depth, stride in zip(*block_args):
for block_index in range(stage_depth):
# Scalar pre-multiplier so each block sees an N(0,1) input at init.
beta = 1. / expected_std
block_stochdepth_rate = stochdepth_rate * index / num_blocks
self.blocks += [
NFResBlock(
ch,
block_width,
stride=stride if block_index == 0 else 1,
beta=beta,
alpha=alpha,
activation=self.activation,
stochdepth_rate=block_stochdepth_rate,
skipinit_gain=skipinit_gain,
use_se=use_se,
se_ratio=se_ratio,
)
]
ch = block_width
index += 1
# Reset expected std but still give it 1 block of growth.
if block_index == 0:
expected_std = 1.0
expected_std = (expected_std**2 + alpha**2)**0.5
# Define the head: by default, initialize with N(0, 0.01).
if fc_init is None:
fc_init = hk.initializers.RandomNormal(0.01, 0)
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x: chex.Array, is_training: bool = True) -> chex.Array:
"""Return the output of the final layer without any [log-]softmax."""
# Forward through the stem.
out = self.initial_conv(x)
out = hk.max_pool(
out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME')
# Forward through the blocks
for block in self.blocks:
out, unused_res_avg_var = block(out, is_training=is_training)
# Final-conv->activation, pool, dropout, classify
pool = jnp.mean(self.activation(out), [1, 2])
# Optionally apply dropout.
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
logits = self.fc(pool)
return logits
class NFResBlock(hk.Module):
"""Normalizer-Free pre-activation ResNet Block."""
def __init__(
self,
in_ch: int,
out_ch: int,
*,
bottleneck_ratio: float = 0.25,
kernel_size: int = 3,
stride: int = 1,
beta: float = 1.0,
alpha: float = 0.2,
activation: common.Activation = jax.nn.relu,
skipinit_gain: hk.initializers.Initializer = jnp.zeros,
stochdepth_rate: Optional[float] = None,
use_se: bool = False,
se_ratio: float = 0.25,
name: Optional[str] = None,
):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
self.kernel_size = kernel_size
self.activation = activation
self.beta, self.alpha = beta, alpha
self.skipinit_gain = skipinit_gain
self.use_se, self.se_ratio = use_se, se_ratio
# Bottleneck width.
self.width = int(self.out_ch * bottleneck_ratio)
self.stride = stride
# Conv 0 (typically expansion conv).
self.conv0 = common.WSConv2D(
self.width, kernel_shape=1, padding='SAME', name='conv0')
# Grouped NxN conv.
self.conv1 = common.WSConv2D(
self.width,
kernel_shape=kernel_size,
stride=stride,
padding='SAME',
name='conv1',
)
# Conv 2, typically projection conv.
self.conv2 = common.WSConv2D(
self.out_ch, kernel_shape=1, padding='SAME', name='conv2')
# Use shortcut conv on channel change or downsample.
self.use_projection = stride > 1 or self.in_ch != self.out_ch
if self.use_projection:
self.conv_shortcut = common.WSConv2D(
self.out_ch,
kernel_shape=1,
stride=stride,
padding='SAME',
name='conv_shortcut')
# Are we using stochastic depth?
self._has_stochdepth = (
stochdepth_rate is not None and 0. < stochdepth_rate < 1.0)
if self._has_stochdepth:
self.stoch_depth = common.StochDepth(stochdepth_rate)
if self.use_se:
self.se = common.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio)
def __call__(self, x: chex.Array, is_training: bool) -> chex.Array:
out = self.activation(x) * self.beta
shortcut = x
if self.use_projection: # Downsample with conv1x1.
shortcut = self.conv_shortcut(out)
out = self.conv0(out)
out = self.conv1(self.activation(out))
out = self.conv2(self.activation(out))
if self.use_se:
out = 2 * self.se(out) * out
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
# Apply stochdepth if applicable.
if self._has_stochdepth:
out = self.stoch_depth(out, is_training)
# Apply the kipInit Gain.
out = out * hk.get_parameter(
'skip_gain', (), out.dtype, init=self.skipinit_gain)
return out * self.alpha + shortcut, res_avg_var
|
{
"content_hash": "71fb7125d61caf56a47840a996172dfb",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 77,
"avg_line_length": 32.67741935483871,
"alnum_prop": 0.5942744323790721,
"repo_name": "deepmind/jax_privacy",
"id": "4c6f1823342cbba508fe7badc8dec847838bca6a",
"size": "7701",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jax_privacy/src/training/image_classification/models/imagenet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "210177"
},
{
"name": "Shell",
"bytes": "2187"
},
{
"name": "TeX",
"bytes": "279"
}
],
"symlink_target": ""
}
|
from inspect import isclass
from django.conf import settings
from django.core.files.storage import get_storage_class
from celery.datastructures import AttributeDict
from tower import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
""" Expects: addon """
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
# L10n: {0} is the user being removed, {1} is their role.
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
# TODO(davedash): Log these types when pages are present
class SET_PUBLIC_STATS(_LOG):
id = 10
format = _(u'Stats set public for {addon}.')
keep = True
# TODO(davedash): Log these types when pages are present
class UNSET_PUBLIC_STATS(_LOG):
id = 11
format = _(u'{addon} stats set to private.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
# L10n: {0} is the status
format = _(u'{addon} status changed to {0}.')
keep = True
class ADD_PREVIEW(_LOG):
id = 13
action_class = 'add'
format = _(u'Preview added to {addon}.')
class EDIT_PREVIEW(_LOG):
id = 14
action_class = 'edit'
format = _(u'Preview edited for {addon}.')
class DELETE_PREVIEW(_LOG):
id = 15
action_class = 'delete'
format = _(u'Preview deleted from {addon}.')
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
# Note, {0} is a string not a version since the version is deleted.
# L10n: {0} is the version number
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
"""
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
"""
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
class REJECT_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
class RETAIN_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
class ESCALATE_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Escalated')
keep = True
review_email_user = True
review_queue = True
class REQUEST_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Comment')
keep = True
review_queue = True
hide_developer = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_REVIEW(_LOG):
id = 29
action_class = 'review'
format = _(u'{review} for {addon} written.')
# TODO(davedash): Add these when we do the admin site
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
# L10n: {0} is the application, {1} is the version of the app
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
""" Expects: author.user, role, addon """
id = 36
# L10n: {0} is a user, {1} is their role
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_REVIEW(_LOG):
id = 40
action_class = 'approve'
format = _(u'{review} for {addon} approved.')
editor_format = _(u'{user} approved {review} for {addon}.')
keep = True
editor_event = True
class DELETE_REVIEW(_LOG):
"""Requires review.id and add-on objects."""
id = 41
action_class = 'review'
format = _(u'Review {review} for {addon} deleted.')
editor_format = _(u'{user} deleted {review} for {addon}.')
keep = True
editor_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class MAKE_PREMIUM(_LOG):
id = 50
format = _(u'{addon} changed to premium.')
class MANIFEST_UPDATED(_LOG):
id = 52
format = _(u'{addon} manifest updated.')
class APPROVE_VERSION_PRIVATE(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but private.')
short = _(u'Approved but private')
keep = True
review_email_user = True
review_queue = True
class PURCHASE_ADDON(_LOG):
id = 54
format = _(u'{addon} purchased.')
class INSTALL_ADDON(_LOG):
id = 55
format = _(u'{addon} installed.')
class REFUND_REQUESTED(_LOG):
id = 56
format = _(u'Refund requested for {addon}')
class REFUND_DECLINED(_LOG):
id = 57
format = _(u'Refund declined for {addon} for {0}.')
class REFUND_GRANTED(_LOG):
id = 58
format = _(u'Refund granted for {addon} for {0}.')
class REFUND_INSTANT(_LOG):
id = 59
format = _(u'Instant refund granted for {addon}.')
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class RECEIPT_CHECKED(_LOG):
id = 65
format = _(u'Valid receipt was checked for {addon}.')
class ESCALATION_CLEARED(_LOG):
id = 66
format = _(u'Escalation cleared for {addon}.')
short = _(u'Escalation cleared')
keep = True
review_queue = True
class APP_DISABLED(_LOG):
id = 67
format = _(u'{addon} banned.')
short = _(u'App banned')
keep = True
review_queue = True
class ESCALATED_HIGH_ABUSE(_LOG):
id = 68
format = _(u'{addon} escalated because of high number of abuse reports.')
short = _(u'High Abuse Reports')
keep = True
review_queue = True
class ESCALATED_HIGH_REFUNDS(_LOG):
id = 69
format = _(u'{addon} escalated because of high number of refund requests.')
short = _(u'High Refund Requests')
keep = True
review_queue = True
class REREVIEW_MANIFEST_CHANGE(_LOG):
id = 70
format = _(u'{addon} re-reviewed because of manifest change.')
short = _(u'Manifest Change')
keep = True
review_queue = True
class REREVIEW_PREMIUM_TYPE_UPGRADE(_LOG):
id = 71
format = _(u'{addon} re-reviewed because app upgraded premium type.')
short = _(u'Premium Type Upgrade')
keep = True
review_queue = True
class REREVIEW_CLEARED(_LOG):
id = 72
format = _(u'Re-review cleared for {addon}.')
short = _(u'Re-review cleared')
keep = True
review_queue = True
class ESCALATE_MANUAL(_LOG):
id = 73
format = _(u'{addon} escalated by reviewer.')
short = _(u'Reviewer escalation')
keep = True
review_queue = True
# TODO(robhudson): Escalation log for editor escalation..
class VIDEO_ERROR(_LOG):
id = 74
format = _(u'Video removed from {addon} because of a problem with '
u'the video. ')
short = _(u'Video removed')
class REREVIEW_DEVICES_ADDED(_LOG):
id = 75
format = _(u'{addon} re-review because of new device(s) added.')
short = _(u'Device(s) Added')
keep = True
review_queue = True
class REVIEW_DEVICE_OVERRIDE(_LOG):
id = 76
format = _(u'{addon} device support manually changed by reviewer.')
short = _(u'Device(s) Changed by Reviewer')
keep = True
review_queue = True
class WEBAPP_RESUBMIT(_LOG):
id = 77
format = _(u'{addon} resubmitted for review.')
short = _(u'App Resubmission')
keep = True
review_queue = True
class ESCALATION_VIP_APP(_LOG):
id = 78
format = _(u'{addon} auto-escalated because its a VIP app.')
short = _(u'VIP auto-escalation')
keep = True
review_queue = True
class REREVIEW_MANIFEST_URL_CHANGE(_LOG):
id = 79
format = _(u'{addon} re-reviewed because of manifest URL change.')
short = _(u'Manifest URL Change')
keep = True
review_queue = True
class ESCALATION_PRERELEASE_APP(_LOG):
id = 80
format = _(u'{addon} auto-escalated because its a prerelease app.')
short = _(u'Prerelease auto-escalation')
keep = True
review_queue = True
class REREVIEW_ABUSE_APP(_LOG):
id = 81
format = _(
u'{addon} re-reviewed because abuse reports need investigation.')
short = _(u'Abuse reports investigation')
keep = True
review_queue = True
class REREVIEW_MANUAL(_LOG):
id = 82
format = _(u'{addon} manually flagged for re-review.')
short = _(u'Manual re-review')
keep = True
review_queue = True
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_REVIEW(_LOG):
id = 107
action_class = 'review'
format = _(u'{review} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class REVIEW_FEATURES_OVERRIDE(_LOG):
id = 122
format = _(u'{addon} minimum requirements manually changed by reviewer.')
short = _(u'Requirements Changed by Reviewer')
keep = True
review_queue = True
class REREVIEW_FEATURES_CHANGED(_LOG):
id = 123
format = _(u'{addon} minimum requirements manually changed.')
short = _(u'Requirements Changed')
keep = True
review_queue = True
class CHANGE_VERSION_STATUS(_LOG):
id = 124
# L10n: {0} is the status
format = _(u'{version} status changed to {0}.')
keep = True
class DELETE_USER_LOOKUP(_LOG):
id = 125
# L10n: {0} is the status
format = _(u'User {0.name} {0.id} deleted via lookup tool.')
keep = True
class CONTENT_RATING_TO_ADULT(_LOG):
id = 126
format = _('{addon} content rating changed to Adult.')
review_queue = True
class CONTENT_RATING_CHANGED(_LOG):
id = 127
format = _('{addon} content rating changed.')
class PRIORITY_REVIEW_REQUESTED(_LOG):
id = 128
format = _(u'Priority review requested for {addon}.')
short = _(u'Priority Review')
keep = True
review_queue = True
class PASS_ADDITIONAL_REVIEW(_LOG):
id = 129
action_class = 'review'
format = _(u'{addon} {version} passed the {queue} review.')
review_queue = True
class FAIL_ADDITIONAL_REVIEW(_LOG):
id = 130
action_class = 'review'
format = _(u'{addon} {version} failed the {queue} review.')
review_queue = True
class APP_ABUSE_MARKREAD(_LOG):
"""Requires report.id and add-on objects."""
id = 131
format = _(u'Abuse report {report} for {addon} read.')
editor_format = _(u'{user} marked read {report} for {addon}.')
keep = True
editor_event = True
class WEBSITE_ABUSE_MARKREAD(_LOG):
"""Requires report.id and website objects."""
id = 132
format = _(u'Abuse report {report} for {website} read.')
editor_format = _(u'{user} marked read {report} for {website}.')
keep = True
editor_event = True
# Adding a log type? If it's a review_queue log type, you have to add a
# note_type to constants/comm.py.
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
LOG_BY_ID = dict((l.id, l) for l in LOGS)
LOG = AttributeDict((l.__name__, l) for l in LOGS)
LOG_ADMINS = [l.id for l in LOGS if hasattr(l, 'admin_event')]
LOG_KEEP = [l.id for l in LOGS if hasattr(l, 'keep')]
LOG_EDITORS = [l.id for l in LOGS if hasattr(l, 'editor_event')]
LOG_REVIEW_QUEUE = [l.id for l in LOGS if hasattr(l, 'review_queue')]
# Is the user emailed the message?
LOG_REVIEW_EMAIL_USER = [l.id for l in LOGS if hasattr(l, 'review_email_user')]
# Logs *not* to show to the developer.
LOG_HIDE_DEVELOPER = [l.id for l in LOGS
if (getattr(l, 'hide_developer', False) or
l.id in LOG_ADMINS)]
def log(action, *args, **kw):
"""
e.g. mkt.log(mkt.LOG.CREATE_ADDON, []),
mkt.log(mkt.LOG.ADD_FILE_TO_VERSION, file, version)
"""
from mkt import get_user
from mkt.developers.models import (ActivityLog, ActivityLogAttachment,
AppLog, CommentLog, GroupLog, UserLog,
VersionLog)
from mkt.access.models import Group
from mkt.site.utils import log as logger_log
from mkt.webapps.models import Webapp
from mkt.users.models import UserProfile
from mkt.versions.models import Version
user = kw.get('user', get_user())
if not user:
logger_log.warning('Activity log called with no user: %s' % action.id)
return
al = ActivityLog(user=user, action=action.id)
al.arguments = args
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog(comments=al.details['comments'], activity_log=al).save()
# TODO(davedash): post-remora this may not be necessary.
if 'created' in kw:
al.created = kw['created']
# Double save necessary since django resets the created date on save.
al.save()
if 'attachments' in kw:
formset = kw['attachments']
storage = get_storage_class()()
for form in formset:
data = form.cleaned_data
if 'attachment' in data:
attachment = data['attachment']
storage.save('%s/%s' % (settings.REVIEWER_ATTACHMENTS_PATH,
attachment.name), attachment)
ActivityLogAttachment(activity_log=al,
description=data['description'],
mimetype=attachment.content_type,
filepath=attachment.name).save()
for arg in args:
if isinstance(arg, tuple):
if arg[0] == Webapp:
AppLog(addon_id=arg[1], activity_log=al).save()
elif arg[0] == Version:
VersionLog(version_id=arg[1], activity_log=al).save()
elif arg[0] == UserProfile:
UserLog(user_id=arg[1], activity_log=al).save()
elif arg[0] == Group:
GroupLog(group_id=arg[1], activity_log=al).save()
if isinstance(arg, Webapp):
AppLog(addon=arg, activity_log=al).save()
elif isinstance(arg, Version):
VersionLog(version=arg, activity_log=al).save()
elif isinstance(arg, UserProfile):
# Index by any user who is mentioned as an argument.
UserLog(activity_log=al, user=arg).save()
elif isinstance(arg, Group):
GroupLog(group=arg, activity_log=al).save()
# Index by every user
UserLog(activity_log=al, user=user).save()
return al
|
{
"content_hash": "2875705a0d45c02720a3cca760d87cdd",
"timestamp": "",
"source": "github",
"line_count": 816,
"max_line_length": 79,
"avg_line_length": 24.026960784313726,
"alnum_prop": 0.6023666224625115,
"repo_name": "luckylavish/zamboni",
"id": "fbde0d5a5e98d10aeef90fcdab69a3b328969785",
"size": "19606",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mkt/site/log.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357271"
},
{
"name": "HTML",
"bytes": "2278036"
},
{
"name": "JavaScript",
"bytes": "533454"
},
{
"name": "Makefile",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "4353206"
},
{
"name": "Shell",
"bytes": "11156"
},
{
"name": "Smarty",
"bytes": "1159"
}
],
"symlink_target": ""
}
|
import logging
import lirc
from soco import SoCo
from soco.exceptions import SoCoException
import config
def is_playing(transport_info):
state = transport_info['current_transport_state']
if state == 'PLAYING':
return True
elif state == 'PAUSED_PLAYBACK':
return False
elif state == 'STOPPED':
return False
def run():
sonos = SoCo(config.IP_ADDRESS)
logging.info(u"Starting: {0}".format(sonos.player_name))
while True:
sockid = lirc.init("sore")
val = lirc.nextcode()
if val:
try:
button = val[0]
logging.info("hello: {0}".format(button))
if button == 'play':
if not is_playing(sonos.get_current_transport_info()):
sonos.play()
else:
sonos.pause()
elif button == 'plus':
sonos.volume += 2
elif button == 'minus':
sonos.volume -= 2
elif button == 'next':
sonos.next()
elif button == 'previous':
sonos.previous()
elif button == 'menu':
# play radio station
# from sonos.get_favorite_radio_stations():
# {u'uri': 'x-sonosapi-stream:s44255?sid=254&flags=8224&sn=0', u'title': 'ORF - Radio Wien'}
sonos.play_uri(uri='x-sonosapi-stream:s44255?sid=254&flags=8224&sn=0', title='ORF - Radio Wien', start=True)
except SoCoException as err:
logging.error("SoCo Error: {0}".format(err))
pass
except:
logging.error("Error: {0}".format(sys.exc_info()[1]))
if __name__ == "__main__":
# TODO: Logging
# logging.basicConfig(filename="/home/pi/SonosRemote/sore.log", level=logging.INFO)
run()
|
{
"content_hash": "bfcf1eb40a6fefab5cbc393bef6a4888",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 128,
"avg_line_length": 29.636363636363637,
"alnum_prop": 0.5,
"repo_name": "prebm/SonosRemote",
"id": "1746867d6849f74024a5fa2e5a20bf905eaffe6e",
"size": "2032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sore.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2546"
},
{
"name": "Shell",
"bytes": "5312"
}
],
"symlink_target": ""
}
|
"""
pinq.queryable
~~~~~~~~~~~~~~
This module implements the Queryable class for querying iterables using the LINQ api.
:copyright: (c) 2016 by David Shriver.
:license: MIT, see LICENSE for more details.
"""
from __future__ import division
from .compat import *
from .predicates import true
from .transforms import identity, select_i
class Queryable(object):
"""A wrapper for iterable objects to allow querying of the underlying data.
"""
def __init__(self, iterator):
self.iterator = iterator
def __iter__(self):
self.iterator, iterator = tee(self.iterator)
for element in iterator:
yield element
def aggregate(self, accumulator, seed=None, result_transform=identity):
"""Applies an accumulator function over a sequence.
:param accumulator: The accumulator function to apply.
:type accumulator: function
:param seed: (optional) The initial accumulator value.
:param result_transform: (optional) A transform function to apply to the result.
:type result_transform: function
:return: The accumulated value.
:raise TypeError: if 'accumulator' is not callable
:raise TypeError: if 'result_transform' is not callable
"""
if not callable(accumulator):
raise TypeError(
"Value for 'accumulator' is not callable.")
if not callable(result_transform):
raise TypeError(
"Value for 'result_transform' is not callable.")
if seed is not None:
return result_transform(reduce(accumulator, self, seed))
return result_transform(reduce(accumulator, self))
def all(self, predicate):
"""Determines whether all elements of the sequence satisfy a condition.
:param predicate: A function to test each element for a condition.
:type predicate: function
:return: True if every element satisfies the condition, or the sequence is empty.
:rtype: bool
:raise TypeError: if 'predicate' is not callable
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
for element in self:
if not predicate(element):
return False
return True
def any(self, predicate=true):
"""Determines whether any element of the sequence satisfies a condition.
:param predicate: (optional) A function to test each element for a condition.
:type predicate: function
:return: True if any element satisfies the condition.
:rtype: bool
:raise TypeError: if 'predicate' is not callable
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
for element in self:
if predicate(element):
return True
return False
def average(self, transform=identity):
"""Computes the average of the elements in the sequence.
:param transform: (optional) A transform function to invoke on each element of the sequence.
:type transform: function:
:return: The average value of the elements in the sequence.
:rtype: float
:raise TypeError: if 'transform' is not callable
"""
if not callable(transform):
raise TypeError("Value for 'transform' is not callable.")
count = 0
value_sum = 0
for element in self:
count += 1
value_sum += transform(element)
return value_sum / count
def cast(self, to_type):
"""Casts the elements of the sequence to the specified type.
:param to_type: The type to cast elements to.
:type to_type: type
:return: The elements of the sequence cast to the specified type.
:rtype: :class:`Queryable`
:raise TypeError: if 'to_type' is not callable
"""
if not isinstance(to_type, type):
raise TypeError("Value for 'to_type' is not a type.")
return Queryable((to_type(element) for element in self))
def concat(self, other):
"""Concatenates two sequences.
:param other: The sequence to concatenate to this sequence.
:type other: Iterable
:return: A Queryable containing the concatenated elements of the two input sequences.
:rtype: :class:`Queryable`
:raise TypeError: if 'other' is not Iterable
"""
if not isinstance(other, Iterable):
raise TypeError("Value for 'other' is not an Iterable.")
return Queryable(chain(self, other))
def contains(self, value, equality_comparer=eq):
"""Determines whether the sequence contains the specified value.
:param value: The value to find in the sequence.
:param equality_comparer: (optional) An equality comparer to compare values.
:type equality_comparer: function
:return: True if the sequence contains the specified value.
:rtype: bool
:raise TypeError: if 'equality_comparer' is not callable
"""
if not callable(equality_comparer):
raise TypeError("Value for 'equality_comparer' is not callable.")
for element in self:
if equality_comparer(value, element):
return True
return False
def count(self, predicate=true):
"""Returns the number of elements in the sequence.
:param predicate: (optional) A function to test each element for a condition:
:type predicate: function
:return: The number of elements that satisfy the specified condition.
:rtype: int
:raise TypeError: if 'predicate' is not callable
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
count = 0
for element in self:
if predicate(element):
count += 1
return count
def default_if_empty(self, default_value=None):
"""Returns the sequence or a sequence with a single default value if the sequence is empty.
:param default_value: (optional) The default value to return.
:return:This sequence, or a sequence containing 'default_value' if it is empty.
:rtype: :class:`Queryable`
"""
for _ in self:
return self
return Queryable([default_value])
def difference(self, other, key_selector=identity):
"""Returns the set difference of the two sequences.
:param other: An iterable of elements to be removed from this sequence.
:type other: Iterable
:param key_selector: (optional) An function to select a key for comparing values.
:type key_selector: function
:return: The set difference of this sequence and the provided sequence.
:rtype: :class:`Queryable`
:raise TypeError: if 'other' is not an Interable
:raise TypeError: if 'key_selector' is not callable
"""
return self.except_values(other, key_selector)
def distinct(self, key_selector=identity):
"""Returns distinct elements fromt the sequence.
:param key_selector: (optional) An function to select a key for comparing values.
:type key_selector: function
:return: A sequence of distinct elements from this sequence.
:rtype: :class:`Queryable`
:raise TypeError: if 'key_selector' is not callable
"""
if not callable(key_selector):
raise TypeError("Value for 'key_selector' is not callable.")
seen = {}
def _distinct(value):
key = key_selector(value)
if key in seen:
return False
seen[key] = 1
return True
return Queryable((element for element in self if _distinct(element)))
def element_at(self, index):
"""Returns the element at the specified location in the sequence.
:param index: The zero-based index of the element to retrieve.
:type index: int
:return: The element at the specified location in the sequence.
:raise TypeError: if 'index' is not an int
:raise IndexError: if 'index' is less than zero or larger than the number of elements
"""
if not isinstance(index, int):
raise TypeError("Value for 'index' is not an integer.")
elif index < 0:
raise IndexError("The provided index is out of range.")
count = 0
for element in self:
if count == index:
return element
count += 1
raise IndexError("The provided index is out of range.")
def element_at_or_default(self, index, default_value=None):
"""Returns the element at the specified index or a default value if it is out of range.
:param index: The zero-based index of the element to retrieve.
:type index: int
:param default_value: (optional) The default value if the index is out of range.
:return: The element at the specified location in the sequence.
:raise TypeError: if 'index' is not an int
"""
try:
return self.element_at(index)
except IndexError:
return default_value
def except_values(self, other, key_selector=identity):
"""Returns the set difference of the two sequences.
:param other: An iterable of elements to be removed from this sequence.
:type other: Iterable
:param key_selector: (optional) An function to select a key for comparing values.
:type key_selector: function
:return: The set difference of this sequence and the provided sequence.
:rtype: :class:`Queryable`
:raise TypeError: if 'other' is not an Interable
:raise TypeError: if 'key_selector' is not callable
"""
if not isinstance(other, Iterable):
raise TypeError("Value for 'other' is not an Iterable.")
if not callable(key_selector):
raise TypeError("Value for 'key_selector' is not callable.")
@lru_cache(1, False)
def _seen():
return dict([(key_selector(element), 1) for element in other])
def _removed(value):
seen = _seen()
key = key_selector(value)
if key in seen:
return True
return False
return Queryable((element for element in self if not _removed(element)))
def first(self, predicate=true):
"""Returns the first element in the sequence.
:param predicate: (optional) A function to test each element for a condition.
:type predicate: function
:return: The first element of the sequence satisfying the condition.
:raise TypeError: if 'predicate' is not callable
:raise ValueError: if the sequence is empty
:raise ValueError: if no element satisfies the condition
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
count = 0
for element in self:
count += 1
if predicate(element):
return element
if count == 0:
raise ValueError("The source sequence is empty.")
raise ValueError("No element satisfies the predicate.")
def first_or_default(self, predicate=true, default_value=None):
"""Returns the first element in the sequence or a default value if empty.
:param predicate: (optional) A function to test each element for a condition.
:type predicate: function
:param default_value: (optional) The default value to return if empty.
:return: The first element of the sequence satisfying the condition.
:raise TypeError: if 'predicate' is not callable
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
count = 0
for element in self:
count += 1
if predicate(element):
return element
return default_value
def group_by(self, key_selector, value_transform=identity, result_transform=identity):
"""Groups the elements of the sequence according to the specified key selector function.
:param key_selector: A function to extract the key for each element.
:type key_selector: function
:param value_transform: A transform function to be applied to each element.
:type value_transform: function
:param result_transform: A transform function to be applied to each group.
:type result_transform: function
:return: A sequence where each element represents the transformation of a group and its key.
:rtype: :class:`Queryable`
:raise TypeError: if 'key_selector' is not callable
:raise TypeError: if 'value_transform' is not callable
:raise TypeError: if 'result_transform' is not callable
"""
if not callable(key_selector):
raise TypeError("Value for 'key_selector' is not callable.")
if not callable(value_transform):
raise TypeError("Value for 'value_transform' is not callable.")
if not callable(result_transform):
raise TypeError("Value for 'result_transform' is not callable.")
if result_transform.__code__.co_argcount == 1:
return Queryable((result_transform(
(key, [value_transform(element) for element in group])) for key, group in groupby(
sorted(self, key=key_selector), key=key_selector)))
else:
return Queryable(
(result_transform(key, [value_transform(element) for element in group])
for key, group in groupby(sorted(self, key=key_selector), key=key_selector)))
def group_join(self, other, key_selector, other_key_selector, result_transform):
"""Correlates the elements of the two sequences and groups the results.
:param other: The sequence to join this sequence.
:type other: Iterable
:param key_selector: A function to extract a key from each element of this sequence.
:type key_selector: function
:param other_key_selector: A function to extract a key from each element of 'other'.
:type other_key_selector: function
:param result_transform: A function to create a result from an item and its matching group.
:type result_transform: function
:return: The elements of the two sequences after performing a grouped join.
:rtype: :class:`Queryable`
:raise TypeError: if 'other' is not an Iterable
:raise TypeError: if 'key_selector' is not callable
:raise TypeError: if 'other_key_selector' is not callable
:raise TypeError: if 'result_transform' is not callable
"""
if not isinstance(other, Iterable):
raise TypeError("Value for 'other' is not an Iterable.")
if not callable(key_selector):
raise TypeError("Value for 'key_selector' is not callable.")
if not callable(other_key_selector):
raise TypeError("Value for 'other_key_selector' is not callable.")
if not callable(result_transform):
raise TypeError("Value for 'result_transform' is not callable.")
@lru_cache(1, False)
def _other_groups():
groups = defaultdict(list)
for element in other:
key = other_key_selector(element)
groups[key].append(element)
return groups
if result_transform.__code__.co_argcount == 1:
return Queryable((result_transform((
element, _other_groups()[key_selector(element)])) for element in self))
else:
return Queryable((result_transform(
element, _other_groups()[key_selector(element)]) for element in self))
def intersect(self, other, key_selector=identity):
"""Returns the set intersection of the two sequences.
:param other: A sequence to compute the intersection with.
:type other: Iterable
:param key_selector: (optional) A function to extract a key for each element for comparison.
:type key_selector: function
:return: A sequence of distinct elements that are in both of the provided seequences.
:rtype: :class:`Queryable`
:raise TypeError: if 'other' is not an Iterable
:raise TypeError: if 'key_selector' is not callable
"""
if not isinstance(other, Iterable):
raise TypeError("Value for 'other' is not an Iterable.")
if not callable(key_selector):
raise TypeError("Value for 'key_selector' is not callable.")
@lru_cache(1, False)
def _other_set():
return dict([(key_selector(element), 1) for element in other])
seen = {}
def _intersects(element):
key = key_selector(element)
if key in _other_set() and not key in seen:
seen[key] = 1
return True
return False
return Queryable((element for element in self if _intersects(element)))
def join(self, other, key_selector, other_key_selector, result_transform):
"""Correlates the elements of the two sequences.
:param other: The sequence to join this sequence.
:type other: Iterable
:param key_selector: A function to extract a key from each element of this sequence.
:type key_selector: function
:param other_key_selector: A function to extract a key from each element of 'other'.
:type other_key_selector: function
:param result_transform: A function to create a result from an item and its match.
:type result_transform: function
:return: The elements of the two sequences after performing an inner join.
:rtype: :class:`Queryable`
:raise TypeError: if 'other' is not an Iterable
:raise TypeError: if 'key_selector' is not callable
:raise TypeError: if 'other_key_selector' is not callable
:raise TypeError: if 'result_transform' is not callable
"""
if not isinstance(other, Iterable):
raise TypeError("Value for 'other' is not an Iterable.")
if not callable(key_selector):
raise TypeError("Value for 'key_selector' is not callable.")
if not callable(other_key_selector):
raise TypeError("Value for 'other_key_selector' is not callable.")
if not callable(result_transform):
raise TypeError("Value for 'result_transform' is not callable.")
@lru_cache(1, False)
def _other_groups():
groups = defaultdict(list)
for element in other:
key = other_key_selector(element)
groups[key].append(element)
return groups
if result_transform.__code__.co_argcount == 1:
return Queryable((result_transform(element) for element in chain.from_iterable(
([(element, other_element) for other_element in _other_groups()[
key_selector(element)]] for element in self))))
else:
return Queryable((result_transform(*element) for element in chain.from_iterable(
([(element, other_element) for other_element in _other_groups()[
key_selector(element)]] for element in self))))
def last(self, predicate=true):
"""Returns the last item of the sequence.
:param predicate: (optional) A function to test each element for a condition.
:type predicate: function
:return: The last item in the sequence that satisfies the condition.
:raise TypeError: if 'predicate' is not callable
:raise ValueError: if the source iterable is empty
:raise ValueError: if no element satisfies condition
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
last_element = None
found_element = False
count = 0
for element in self:
count += 1
if predicate(element):
found_element = True
last_element = element
if found_element:
return last_element
if count == 0:
raise ValueError("The source sequence is empty.")
raise ValueError("No element satisfies the predicate.")
def last_or_default(self, predicate=true, default_value=None):
"""Returns the last item of the sequence.
:param predicate: (optional) A function to test each element for a condition.
:type predicate: function
:param default_value: (optional) The default value to return if empty.
:return: The last item in the sequence that satisfies the condition.
:raise TypeError: if 'predicate' is not callable
:raise ValueError: if the source iterable is empty
:raise ValueError: if no element satisfies condition
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
last_element = default_value
count = 0
for element in self:
count += 1
if predicate(element):
last_element = element
return last_element
def long_count(self, predicate=true):
"""Returns the number of elements in the sequence.
:param predicate: (optional) A function to test each element for a condition:
:type predicate: function
:return: The number of elements that satisfy the specified condition.
:rtype: int
:raise TypeError: if 'predicate' is not callable
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
count = 0
for element in self:
if predicate(element):
count += 1
return count
def max(self, transform=identity):
"""Returns the maximum element in the sequence.
:param transform: (optional) A transformation function to apply to each element.
:type transform: function
:return: The maximum element in the sequence.
:rtype: int
:raise TypeError: if 'transform' is not callable
"""
if not callable(transform):
raise TypeError("Value for 'transform' is not callable.")
return max((transform(element) for element in self))
def min(self, transform=identity):
"""Returns the minimum element in the sequence.
:param transform: (optional) A transformation function to apply to each element.
:type transform: function
:return: The minimum element in the sequence.
:rtype: int
:raise TypeError: if 'transform' is not callable
"""
if not callable(transform):
raise TypeError("Value for 'transform' is not callable.")
return min((transform(element) for element in self))
def of_type(self, of_type):
"""Filters the elements based on the specified type.
:param of_type: The type to keep.
:type of_type: type
:return: The elements of the sequence with the specified type.
:rtype: :class:`Queryable`
:raise TypeError: if 'of_type' is not a type
"""
if not isinstance(of_type, type):
raise TypeError("Value for 'of_type' is not a type.")
return Queryable((element for element in self if isinstance(element, of_type)))
def order_by(self, key_selector):
"""Sorts the elements of the sequence in ascending order according to a key.
:param key_selector: A function to extract a key from an element.
:type key_selector: function
:return: The elements of the sequence sorted in ascending order.
:rtype: :class:`OrderedQueryable`
:raise TypeError: if 'key_selector' is not callable
"""
if not callable(key_selector):
raise TypeError("Value for 'key_selector' is not callable.")
return OrderedQueryable(self, [(key_selector, False)])
def order_by_descending(self, key_selector):
"""Sorts the elements of the sequence in descending order according to a key.
:param key_selector: A function to extract a key from an element.
:type key_selector: function
:return: The elements of the sequence sorted in descending order.
:rtype: :class:`OrderedQueryable`
:raise TypeError: if 'key_selector' is not callable
"""
if not callable(key_selector):
raise TypeError("Value for 'key_selector' is not callable.")
return OrderedQueryable(self, [(key_selector, True)])
def reverse(self):
"""Reverses the order of the elements in the sequence.
:return: The elements of the sequence in reverse order.
:rtype: :class:`Queryable`
"""
def _reverse(iterator):
elements = []
for element in iterator:
elements.append(element)
while len(elements) > 0:
yield elements.pop()
return Queryable(_reverse(self))
def select(self, selector):
"""Returns the elements of the sequence after applying a transform function to each element.
:param selector: A transform function to apply to each element.
:type selector: function
:return: The elements of the sequence after applying the transform function.
:rtype: :class:`Queryable`
:raise TypeError: if 'selector' is not callable
"""
if not callable(selector):
raise TypeError("Value for 'selector' is not callable.")
if selector.__code__.co_argcount == 1:
return Queryable((selector(element) for element in self))
else:
return Queryable((selector(element, index) for index, element in enumerate(self)))
def select_many(self, selector, result_transform=select_i(1)):
"""Projects each element to a sequence and flattens the resulting sequences.
:param selector: A function to transform each element into a sequence.
:type selector: function
:param result_transform: (optional) A transform function for items of the selected sequence.
:type result_transform: function
:return: A flattened sequence of transformed elements.
:rtype: :class:`Queryable`
:raise TypeError: if 'selector' is not callable
:raise TypeError: if 'result_transform' is not callable
"""
if not callable(selector):
raise TypeError("Value for 'selector' is not callable.")
if not callable(result_transform):
raise TypeError("Value for 'result_transform' is not callable.")
if selector.__code__.co_argcount == 1:
return Queryable(chain.from_iterable(([result_transform(
element, sub_element) for sub_element in selector(element)] for element in self)))
else:
return Queryable(chain.from_iterable(([result_transform(
element, sub_element) for sub_element in selector(
element, index)] for index, element in enumerate(self))))
def sequence_equal(self, other, equality_comparer=eq):
"""Determines whether two sequences are equal.
:param other: The sequence to compare elements to.
:type other: Iterable
:param equality_comparer: (optional) The equality comparison function to use.
:type equality_comparer: function
:return: True if the sequences are equal, false otherwise.
:rtype: bool
:raise TypeError: if 'other' is not an Iterable
:raise TypeError: if 'equality_comparer' is not callable
"""
if not isinstance(other, Iterable):
raise TypeError("Value for 'other' is not an Iterable.")
if not callable(equality_comparer):
raise TypeError("Value for 'equality_comparer' is not callable.")
class _IterableEnd:
pass
for element1, element2 in zip_longest(self, other, fillvalue=_IterableEnd()):
if isinstance(element1, _IterableEnd) or isinstance(element2, _IterableEnd):
return False
elif not equality_comparer(element1, element2):
return False
return True
def single(self, predicate=true):
"""Returns the only element of the sequence.
:param predicate: (optional) A function to test an element for a condition.
:type predicate: function
:return: The single element of the sequence satisfying the condition.
:raise TypeError: if 'predicate' is not callable
:raise ValueError: if the sequence is empty
:raise ValueError: if no element satisfies the condition
:raise ValueError: if more than one element satisfies the condition
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
class _NoItem:
pass
single_item = _NoItem()
count = 0
for element in self:
count += 1
if predicate(element):
if not isinstance(single_item, _NoItem):
raise ValueError(
"More than one element satisfies 'predicate'.")
single_item = element
if not isinstance(single_item, _NoItem):
return single_item
if count == 0:
raise ValueError("The source sequence is empty.")
raise ValueError("More than one element satisfies 'predicate'.")
def single_or_default(self, predicate=true, default_value=None):
"""Returns the only element of the sequence.
:param predicate: (optional) A function to test an element for a condition.
:type predicate: function
:param default_value: (optional) The default value to return if empty.
:return: The single element of the sequence satisfying the condition.
:raise TypeError: if 'predicate' is not callable
:raise ValueError: if more than one element satisfies the condition
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
class _NoItem:
pass
single_item = _NoItem()
count = 0
for element in self:
count += 1
if predicate(element):
if not isinstance(single_item, _NoItem):
raise ValueError(
"More than one element satisfies 'predicate'.")
single_item = element
if not isinstance(single_item, _NoItem):
return single_item
return default_value
def skip(self, num):
"""Skips a specified number of elements in the sequence and returns the remaining elements.
:param num: The number of elements to skip.
:type num: int
:return: A sequence containing the elements after position 'num'.
:rtype: :class:`Queryable`
:raise TypeError: if 'num' is not an int
"""
if not isinstance(num, int):
raise TypeError("Value for 'num' is not an integer.")
return Queryable(islice(self, num, None))
def skip_while(self, predicate):
"""Skip elements of the sequence while the specified condition is true.
:param predicate: The condition to check for.
:type predicate: function
:return: Elements of the sequence after the first item to fail the specified condition.
:rtype: :class:`Queryable`
:raise TypeError: if 'condition' is not callable
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
return Queryable(dropwhile(predicate, self))
def sum(self, transform=identity):
"""Computes the sum of the sequence by invoking a transform on each element.
:param transform: (optional) A transform function to apply to each element.
:type transform: function
:return: The sum of the elements of the sequence.
:rtype: int
:raise TypeError: if 'transform' is not callable
"""
if not callable(transform):
raise TypeError("Value for 'transform' is not callable.")
return sum((transform(element) for element in self))
def take(self, num):
"""Takes the specified number of elements from the start of the sequence.
:param num: The number of elements to take.
:type num: int
:return: The specified number of elements from the start of the sequence.
:rtype: :class:`Queryable`
:raise TypeError: if 'num' is not an int
"""
if not isinstance(num, int):
raise TypeError("Value for 'num' is not an integer.")
return Queryable(islice(self, num))
def take_while(self, predicate):
"""Takes elements from the start of the sequence while the specified condition holds.
:param predicate: The condition to check for.
:type predicate: function
:return: The elements from the start of the sequence that satisfy the condition.
:rtype: :class:`Queryable`
:raise TypeError: if 'predicate' is not callable
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
return Queryable(takewhile(predicate, self))
def to_dict(self, key_selector, value_selector=identity):
"""Creates a dictionary object according to the specified key selector function.
:param key_selector: A function to extract the key for the dictionary entry.
:type key_selector: function
:param value_selector: (optional) A function to extract the value for the dictionary entry.
:type value_selector: function
:return: A dictionary of the elements in the sequence.
:rtype: dict
:raise TypeError: if 'key_selector' is not callable
:raise TypeError: if 'value_selector' is not callable
"""
return self.to_dictionary(key_selector, value_selector)
def to_dictionary(self, key_selector, value_selector=identity):
"""Creates a dictionary object according to the specified key selector function.
:param key_selector: A function to extract the key for the dictionary entry.
:type key_selector: function
:param value_selector: (optional) A function to extract the value for the dictionary entry.
:type value_selector: function
:return: A dictionary of the elements in the sequence.
:rtype: dict
:raise TypeError: if 'key_selector' is not callable
:raise TypeError: if 'value_selector' is not callable
"""
if not callable(key_selector):
raise TypeError(
"Value for 'key_selector' is not callable.")
if not callable(value_selector):
raise TypeError(
"Value for 'value_selector' is not callable.")
return dict(((key_selector(element), value_selector(element)) for element in self))
def to_list(self):
"""Creates a list object from the sequence.
:return: A list of the elements in the sequence.
:rtype: list
"""
return list(self)
def union(self, other, key_selector=identity):
"""Returns the set union of two sequences.
:param other: The second sequence to produce the union with.
:type other: Iterable
:param key_selector: (optional) A function to extract a key for comparison.
:type key_selector: function
:return: The set union of the two sequences.
:rtype: :class:`Queryable`
:raise TypeError: if 'other' is not an Iterable
:raise TypeError: if 'key_selector' is not callable
"""
if not isinstance(other, Iterable):
raise TypeError("Value for 'other' is not an Iterable.")
if not callable(key_selector):
raise TypeError("Value for 'key_selector' is not callable.")
seen = {}
def _seen(value):
key = key_selector(value)
if key in seen:
return True
seen[key] = 1
return False
return Queryable((element for element in chain(self, other) if not _seen(element)))
def where(self, predicate):
"""Filters the sequence of values based on the specified condition.
:param predicate: A function to check an element for a condition.
:type condition: function
:return: The elements of the sequence that satisfy the condition.
:rtype: :class:`Queryable`
:raise TypeError: if 'predicate' is not callable
"""
if not callable(predicate):
raise TypeError("Value for 'predicate' is not callable.")
if predicate.__code__.co_argcount == 1:
return Queryable((element for element in self if predicate(element)))
else:
return Queryable(
(element for index, element in enumerate(self) if predicate(element, index)))
def zip(self, other, result_transform):
"""Applies a function to the corresponding elements of the two sequences.
:param other: The other input sequence.
:type other: Iterable
:param result_transform: A function that combines the corresponding elements.
:type result_transform: function
:return: A sequence of elements of the two sequences combined using 'result_transform'.
:rtype: :class:`Queryable`
:raise TypeError: if 'other' is not an Iterable
:raise TypeError: if 'result_transform' is not callable
"""
if not isinstance(other, Iterable):
raise TypeError("Value for 'other' is not an Iterable.")
if not callable(result_transform):
raise TypeError("Value for 'result_transform' is not callable.")
return Queryable((result_transform(*element) for element in zip(self, other)))
class OrderedQueryable(Queryable):
"""A wrapper for ordered sequences.
"""
def __init__(self, iterator, keys):
super(OrderedQueryable, self).__init__(iterator)
self._keys = keys
def __iter__(self):
sorted_elements = self.iterator
for key_selector in self._keys:
sorted_elements = sorted(sorted_elements, key=key_selector[
0], reverse=key_selector[1])
for element in sorted_elements:
yield element
def then_by(self, key_selector):
"""Performs a subsequenct ordering on the elements of an ordered sequence.
:param key: A function to extract a key to use for comparisons.
:type key: function
:return: The elements of the sequence in ascending order according to the key
:rtype: :class:`Queryable`
:raise TypeError: if 'key_selector' is not callable
"""
if not callable(key_selector):
raise TypeError("Value for 'key_selector' is not callable.")
self._keys.insert(0, (key_selector, False))
return self
def then_by_descending(self, key_selector):
"""Performs a subsequenct ordering on the elements of an ordered sequence.
:param key: A function to extract a key to use for comparisons.
:type key: function
:return: The elements of the sequence in descending order according to the key
:rtype: :class:`Queryable`
:raise TypeError: if 'key_selector' is not callable
"""
if not callable(key_selector):
raise TypeError("Value for 'key_selector' is not callable.")
self._keys.insert(0, (key_selector, True))
return self
|
{
"content_hash": "a4246e492b4be4729a70571d68d54c85",
"timestamp": "",
"source": "github",
"line_count": 933,
"max_line_length": 100,
"avg_line_length": 42.91211146838157,
"alnum_prop": 0.6265454454629468,
"repo_name": "dlshriver/pinq",
"id": "463cb93624e1b11e79505c38eff422e8fdfaea90",
"size": "40037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pinq/queryable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99468"
}
],
"symlink_target": ""
}
|
from dataclasses import dataclass, field
import torch
from omegaconf import II
from fairseq import metrics, utils
from fairseq.dataclass import ChoiceEnum
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationConfig, TranslationTask
from .logsumexp_moe import LogSumExpMoE
from .mean_pool_gating_network import MeanPoolGatingNetwork
METHOD_CHOICES = ChoiceEnum(["sMoElp", "sMoEup", "hMoElp", "hMoEup"])
@dataclass
class TranslationMoEConfig(TranslationConfig):
method: METHOD_CHOICES = field(
default="hMoEup",
metadata={"help": "MoE method"},
)
num_experts: int = field(
default=3,
metadata={"help": "number of experts"},
)
mean_pool_gating_network: bool = field(
default=False,
metadata={"help": "use a simple mean-pooling gating network"},
)
mean_pool_gating_network_dropout: float = field(
default=0,
metadata={"help": "dropout for mean-pooling gating network"},
)
mean_pool_gating_network_encoder_dim: int = field(
default=0,
metadata={"help": "encoder output dim for mean-pooling gating network"},
)
gen_expert: int = field(
default=0,
metadata={"help": "which expert to use for generation"},
)
sentence_avg: bool = II("optimization.sentence_avg")
@register_task("translation_moe", dataclass=TranslationMoEConfig)
class TranslationMoETask(TranslationTask):
"""
Translation task for Mixture of Experts (MoE) models.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
cfg: TranslationMoEConfig
def __init__(self, cfg: TranslationMoEConfig, src_dict, tgt_dict):
if cfg.method == "sMoElp":
# soft MoE with learned prior
self.uniform_prior = False
self.hard_selection = False
elif cfg.method == "sMoEup":
# soft MoE with uniform prior
self.uniform_prior = True
self.hard_selection = False
elif cfg.method == "hMoElp":
# hard MoE with learned prior
self.uniform_prior = False
self.hard_selection = True
elif cfg.method == "hMoEup":
# hard MoE with uniform prior
self.uniform_prior = True
self.hard_selection = True
# add indicator tokens for each expert
for i in range(cfg.num_experts):
# add to both dictionaries in case we're sharing embeddings
src_dict.add_symbol("<expert_{}>".format(i))
tgt_dict.add_symbol("<expert_{}>".format(i))
super().__init__(cfg, src_dict, tgt_dict)
def build_model(self, cfg, from_checkpoint=False):
from fairseq import models
model = models.build_model(cfg, self)
if not self.uniform_prior and not hasattr(model, "gating_network"):
if self.cfg.mean_pool_gating_network:
if self.cfg.mean_pool_gating_network_encoder_dim > 0:
encoder_dim = self.cfg.mean_pool_gating_network_encoder_dim
elif getattr(cfg, "encoder_embed_dim", None):
# assume that encoder_embed_dim is the encoder's output dimension
encoder_dim = cfg.encoder_embed_dim
else:
raise ValueError(
"Must specify --mean-pool-gating-network-encoder-dim"
)
if self.cfg.mean_pool_gating_network_dropout > 0:
dropout = self.cfg.mean_pool_gating_network_dropout
elif getattr(cfg, "dropout", None):
dropout = cfg.dropout
else:
raise ValueError("Must specify task.mean_pool_gating_network_dropout")
model.gating_network = MeanPoolGatingNetwork(
encoder_dim,
self.cfg.num_experts,
dropout,
)
else:
raise ValueError(
"translation_moe task with learned prior requires the model to "
"have a gating network; try using --mean-pool-gating-network"
)
return model
def expert_index(self, i):
return i + self.tgt_dict.index("<expert_0>")
def _get_loss(self, sample, model, criterion):
assert hasattr(
criterion, "compute_loss"
), "translation_moe task requires the criterion to implement the compute_loss() method"
k = self.cfg.num_experts
bsz = sample["target"].size(0)
def get_lprob_y(encoder_out, prev_output_tokens_k):
net_output = model.decoder(
prev_output_tokens=prev_output_tokens_k,
encoder_out=encoder_out,
)
loss, _ = criterion.compute_loss(model, net_output, sample, reduce=False)
loss = loss.view(bsz, -1)
return -loss.sum(dim=1, keepdim=True) # -> B x 1
def get_lprob_yz(winners=None):
encoder_out = model.encoder(
src_tokens=sample["net_input"]["src_tokens"],
src_lengths=sample["net_input"]["src_lengths"],
)
if winners is None:
lprob_y = []
for i in range(k):
prev_output_tokens_k = sample["net_input"][
"prev_output_tokens"
].clone()
assert not prev_output_tokens_k.requires_grad
prev_output_tokens_k[:, 0] = self.expert_index(i)
lprob_y.append(get_lprob_y(encoder_out, prev_output_tokens_k))
lprob_y = torch.cat(lprob_y, dim=1) # -> B x K
else:
prev_output_tokens_k = sample["net_input"]["prev_output_tokens"].clone()
prev_output_tokens_k[:, 0] = self.expert_index(winners)
lprob_y = get_lprob_y(encoder_out, prev_output_tokens_k) # -> B
if self.uniform_prior:
lprob_yz = lprob_y
else:
lprob_z = model.gating_network(encoder_out) # B x K
if winners is not None:
lprob_z = lprob_z.gather(dim=1, index=winners.unsqueeze(-1))
lprob_yz = lprob_y + lprob_z.type_as(lprob_y) # B x K
return lprob_yz
# compute responsibilities without dropout
with utils.model_eval(model): # disable dropout
with torch.no_grad(): # disable autograd
lprob_yz = get_lprob_yz() # B x K
prob_z_xy = torch.nn.functional.softmax(lprob_yz, dim=1)
assert not prob_z_xy.requires_grad
# compute loss with dropout
if self.hard_selection:
winners = prob_z_xy.max(dim=1)[1]
loss = -get_lprob_yz(winners)
else:
lprob_yz = get_lprob_yz() # B x K
loss = -LogSumExpMoE.apply(lprob_yz, prob_z_xy, 1)
loss = loss.sum()
sample_size = (
sample["target"].size(0) if self.cfg.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data),
"ntokens": sample["ntokens"],
"nsentences": bsz,
"sample_size": sample_size,
"posterior": prob_z_xy.float().sum(dim=0).cpu(),
}
return loss, sample_size, logging_output
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
loss, sample_size, logging_output = self._get_loss(sample, model, criterion)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = self._get_loss(sample, model, criterion)
return loss, sample_size, logging_output
def inference_step(
self,
generator,
models,
sample,
prefix_tokens=None,
expert=None,
constraints=None,
):
expert = expert or self.cfg.gen_expert
with torch.no_grad():
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
bos_token=self.expert_index(expert),
)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
metrics.log_scalar(
"posterior",
sum(log["posterior"] for log in logging_outputs if "posterior" in log),
)
|
{
"content_hash": "758cbcfd14ad4b2ae83dd2b0cb74a9fb",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 95,
"avg_line_length": 36.873517786561266,
"alnum_prop": 0.5694072247829349,
"repo_name": "pytorch/fairseq",
"id": "1ee9d1b727006841382bd6389e97e4cb2771a112",
"size": "9507",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/translation_moe/translation_moe_src/translation_moe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21106"
},
{
"name": "Cuda",
"bytes": "38166"
},
{
"name": "Cython",
"bytes": "13294"
},
{
"name": "Lua",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "3699357"
},
{
"name": "Shell",
"bytes": "2182"
}
],
"symlink_target": ""
}
|
import os;
print os.access('myOS_file-path.txt', os.R_OK);
print os.access('myOS_file-path.txt', os.W_OK);
print os.access('myOS_file-path.txt', os.X_OK);
|
{
"content_hash": "91f3b9328e1db33a853956602d65c9e5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 47,
"avg_line_length": 19.875,
"alnum_prop": 0.6729559748427673,
"repo_name": "xiaoyong0312/Python-dev",
"id": "e1e405ab358d293e823577c56d66356ee974cdac",
"size": "297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python2.x/Python2.x-0-basic/019_OS_file-path.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34671"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from rest_framework.test import APITestCase
from rest_framework import status
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from pdc.apps.bindings import models as binding_models
from pdc.apps.common.test_utils import TestCaseWithChangeSetMixin
from pdc.apps.component import models as component_models
from pdc.apps.release import models as release_models
from . import models
class RPMSortKeyTestCase(TestCase):
def test_sort_key_precedence(self):
data = [((0, "10", "10"), (1, "1", "1")),
((0, "1", "10"), (0, "10", "1")),
((0, "1", "1"), (0, "1", "10"))]
for v1, v2 in data:
p1 = models.RPM(epoch=v1[0], version=v1[1], release=v1[2])
p2 = models.RPM(epoch=v2[0], version=v2[1], release=v2[2])
self.assertTrue(p1.sort_key < p2.sort_key)
def test_complex_version_sort(self):
data = [((0, "1.0.1", "10"), (1, "1.0.2", "1")),
((0, "1.11.1", "10"), (0, "1.100.1", "1")),
((0, "1", "1.0.1"), (0, "1", "1.1")),
((0, "1", "11"), (0, "1", "101"))]
for v1, v2 in data:
p1 = models.RPM(epoch=v1[0], version=v1[1], release=v1[2])
p2 = models.RPM(epoch=v2[0], version=v2[1], release=v2[2])
self.assertTrue(p1.sort_key < p2.sort_key, msg="%s < %s" % (v1, v2))
def test_handle_non_numbers(self):
data = [((0, "svn24104.0.92", "1"), (1, "svn24104.0.93", "1")),
((0, "3.2.5d", "1"), (0, "3.2.5e", "1")),
((0, "3.2.5d", "1"), (0, "3.2.6a", "1")),
((0, "2.1a15", "1"), (0, "2.1a20", "1")),
((0, "2.1a15", "1"), (0, "2.2", "1")),
((0, "2.1a15", "1"), (0, "2.1", "1"))]
for v1, v2 in data:
p1 = models.RPM(epoch=v1[0], version=v1[1], release=v1[2])
p2 = models.RPM(epoch=v2[0], version=v2[1], release=v2[2])
self.assertTrue(p1.sort_key < p2.sort_key, msg="%s < %s" % (v1, v2))
class RPMSaveValidationTestCase(TestCase):
def test_empty_srpm_nevra_with_arch_is_src(self):
rpm = models.RPM.objects.create(name='kernel', epoch=0, version='3.19.3', release='100',
arch='src', srpm_name='kernel', filename='kernel-3.19.3-100.src.rpm')
self.assertIsNotNone(rpm)
self.assertEqual(1, models.RPM.objects.count())
def test_non_empty_srpm_nevra_with_arch_is_not_src(self):
rpm = models.RPM.objects.create(name='kernel', epoch=0, version='3.19.3', release='100',
arch='x86_64', srpm_name='kernel', filename='kernel-3.19.3-100.x86_64.rpm',
srpm_nevra='kernel-0:3.19.3-100.x86_64')
self.assertIsNotNone(rpm)
self.assertEqual(1, models.RPM.objects.count())
def test_non_empty_srpm_nevra_with_arch_is_src(self):
with self.assertRaises(ValidationError):
models.RPM.objects.create(name='kernel', epoch=0, version='3.19.3', release='100',
arch='src', srpm_name='kernel', filename='kernel-3.19.3-100.src.rpm',
srpm_nevra='kernel-0:3.19.3-100.src')
self.assertEqual(0, models.RPM.objects.count())
def test_empty_srpm_nevra_with_arch_is_not_src(self):
with self.assertRaises(ValidationError):
models.RPM.objects.create(name='kernel', epoch=0, version='3.19.3', release='100',
arch='x86_64', srpm_name='kernel', filename='kernel-3.19.3-100.x86_64.rpm')
self.assertEqual(0, models.RPM.objects.count())
class RPMDepsFilterAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
"""
15 packages are created. They all have name test-X, where X is a
number. Each packages has a dependency of each type with the same
constraint. They are summarized in the table below.
0 (=1.0) 1 (<1.0) 2 (>1.0) 3 (<=1.0) 4 (>=1.0)
5 (=2.0) 6 (<2.0) 7 (>2.0) 8 (<=2.0) 9 (>=2.0)
10 (=3.0) 11 (<3.0) 12 (>3.0) 13 (<=3.0) 14 (>=3.0)
"""
counter = 0
for version in ['1.0', '2.0', '3.0']:
for op in '= < > <= >='.split():
name = 'test-{counter}'.format(counter=counter)
counter += 1
rpm = models.RPM.objects.create(name=name, epoch=0, version='1.0',
release='1', arch='x86_64', srpm_name='test-pkg',
srpm_nevra='test-pkg-1.0.1.x86_64',
filename='dummy')
for type in [t[0] for t in models.Dependency.DEPENDENCY_TYPE_CHOICES]:
rpm.dependency_set.create(name='pkg', version=version,
type=type, comparison=op)
#
# No contraint tests
#
def test_filter_without_version_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
#
# Equality contraint tests
#
def test_filter_with_version_equality_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
#
# Greater than constraint tests
#
def test_filter_with_greater_version_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 7, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_version_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg>2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 7, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_version_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg>2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 7, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_version_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg>2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 7, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_version_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg>2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 7, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_version_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg>2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 7, 9, 10, 11, 12, 13, 14]])
#
# Lesser than constraint tests
#
def test_filter_with_lesser_version_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 6, 8, 11, 13]])
def test_filter_with_lesser_version_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg<2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 6, 8, 11, 13]])
def test_filter_with_lesser_version_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg<2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 6, 8, 11, 13]])
def test_filter_with_lesser_version_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg<2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 6, 8, 11, 13]])
def test_filter_with_lesser_version_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg<2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 6, 8, 11, 13]])
def test_filter_with_lesser_version_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg<2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 6, 8, 11, 13]])
#
# Greater than or equal constraint tests
#
def test_filter_with_greater_or_equal_version_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_or_equal_version_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg>=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_or_equal_version_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg>=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_or_equal_version_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg>=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_or_equal_version_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg>=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_or_equal_version_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg>=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]])
#
# Lesser than or equal constraint tests
#
def test_filter_with_lesser_or_equal_version_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 13]])
def test_filter_with_lesser_or_equal_version_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg<=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 13]])
def test_filter_with_lesser_or_equal_version_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg<=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 13]])
def test_filter_with_lesser_or_equal_version_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg<=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 13]])
def test_filter_with_lesser_or_equal_version_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg<=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 13]])
def test_filter_with_lesser_or_equal_version_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg<=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 13]])
class RPMDepsFilterWithReleaseTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
cls.rpm = models.RPM.objects.create(name='test-pkg', epoch=0, version='1.0',
release='1', arch='x86_64', srpm_name='test-pkg',
srpm_nevra='test-pkg-1.0.1.x86_64',
filename='dummy')
cls.rpm.dependency_set.create(name='pkg', version='3.0-1.fc22',
type=models.Dependency.REQUIRES, comparison='=')
def test_filter_with_same_release_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=3.0-1.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_same_release_lesser(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<3.0-1.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_filter_with_same_release_greater(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>3.0-1.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_filter_with_same_release_lesser_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<=3.0-1.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_same_release_greater_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>=3.0-1.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_different_release_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=3.0-2.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_filter_with_different_release_lesser(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<3.0-2.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_different_release_greater(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>3.0-2.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_filter_with_different_release_lesser_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<=3.0-2.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_different_release_greater_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>=3.0-2.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
class RPMDepsFilterWithEpochTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
cls.rpm = models.RPM.objects.create(name='test-pkg', epoch=0, version='1.0',
release='1', arch='x86_64', srpm_name='test-pkg',
srpm_nevra='test-pkg-1.0.1.x86_64',
filename='dummy')
cls.rpm.dependency_set.create(name='pkg', version='3.0',
type=models.Dependency.REQUIRES, comparison='=')
def test_filter_with_same_epoch_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=0:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_same_epoch_lesser(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<0:4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_same_epoch_greater(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>0:2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_same_epoch_lesser_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<=0:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_same_epoch_greater_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>=0:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_different_epoch_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=1:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_filter_with_different_epoch_lesser(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<1:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_different_epoch_greater(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>1:2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_filter_with_different_epoch_lesser_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<=1:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_different_epoch_greater_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>=1:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
class RPMDepsFilterRangeAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
rpm = models.RPM.objects.create(name='test-pkg', epoch=0, version='1.0',
release='1', arch='x86_64', srpm_name='test-pkg',
srpm_nevra='test-pkg-1.0.1.x86_64',
filename='dummy')
for type in [t[0] for t in models.Dependency.DEPENDENCY_TYPE_CHOICES]:
rpm.dependency_set.create(name='pkg', version='1.0',
type=type, comparison='>=')
rpm.dependency_set.create(name='pkg', version='3.0',
type=type, comparison='<')
def test_filter_with_range_match_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_with_range_no_match_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_filter_with_range_match_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_with_range_no_match_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg=4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_filter_with_range_match_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_with_range_no_match_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg=4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_filter_with_range_match_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_with_range_no_match_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg=4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_filter_with_range_match_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_with_range_no_match_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg=4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_filter_with_range_match_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_with_range_no_match_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg=4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
class RPMDepsAPITestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
'pdc/apps/common/fixtures/test/sigkey.json',
'pdc/apps/release/fixtures/tests/release.json',
'pdc/apps/package/fixtures/test/rpm.json',
'pdc/apps/compose/fixtures/tests/compose.json',
'pdc/apps/compose/fixtures/tests/compose_composerpm.json',
'pdc/apps/compose/fixtures/tests/variant_arch.json',
'pdc/apps/compose/fixtures/tests/variant.json'
]
def setUp(self):
self.maxDiff = None
def _create_deps(self):
models.Dependency.objects.create(type=models.Dependency.SUGGESTS,
name='suggested', rpm_id=1)
models.Dependency.objects.create(type=models.Dependency.CONFLICTS,
name='conflicting', rpm_id=1)
def test_create_rpm_with_deps(self):
data = {'name': 'fake_bash', 'version': '1.2.3', 'epoch': 0,
'release': '4.b1', 'arch': 'x86_64', 'srpm_name': 'bash',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm',
'linked_releases': [], 'srpm_nevra': 'fake_bash-0:1.2.3-4.b1.src',
'dependencies': {'requires': ['required-package'],
'obsoletes': ['obsolete-package'],
'suggests': ['suggested-package >= 1.0.0'],
'recommends': ['recommended = 0.1.0'],
'provides': ['/bin/bash', '/usr/bin/whatever'],
'conflicts': ['nothing']}}
response = self.client.post(reverse('rpms-list'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response.data.pop('id')
data.update({'linked_composes': []})
self.assertDictEqual(dict(response.data), data)
self.assertEqual(7, models.Dependency.objects.count())
with_version = models.Dependency.objects.get(name='recommended')
self.assertEqual(with_version.comparison, '=')
self.assertEqual(with_version.version, '0.1.0')
self.assertNumChanges([1])
def test_create_rpm_with_duplicate_deps(self):
data = {'name': 'fake_bash', 'version': '1.2.3', 'epoch': 0,
'release': '4.b1', 'arch': 'x86_64', 'srpm_name': 'bash',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm',
'linked_releases': [], 'srpm_nevra': 'fake_bash-0:1.2.3-4.b1.src',
'dependencies': {'requires': ['required-package', 'required-package'],
'obsoletes': ['obsolete-package'],
'suggests': ['suggested-package >= 1.0.0', 'suggested-package >= 1.0.0'],
'recommends': ['recommended = 0.1.0'],
'provides': ['/bin/bash', '/usr/bin/whatever'],
'conflicts': ['nothing']}}
response = self.client.post(reverse('rpms-list'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([1])
def test_put_to_rpm_with_none(self):
data = {
'name': 'bash',
'epoch': 0,
'version': '1.2.3',
'release': '4.b1',
'arch': 'x86_64',
'srpm_name': 'bash',
'srpm_nevra': 'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm',
'dependencies': {
'requires': ['required-package']
}
}
response = self.client.put(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, models.Dependency.objects.count())
dep = models.Dependency.objects.first()
self.assertIsNone(dep.comparison)
self.assertIsNone(dep.version)
self.assertEqual(dep.rpm.pk, 1)
self.assertNumChanges([1])
def test_put_to_overwrite_existing(self):
models.Dependency.objects.create(type=models.Dependency.SUGGESTS,
name='suggested', rpm_id=1)
models.Dependency.objects.create(type=models.Dependency.CONFLICTS,
name='conflicting', rpm_id=1)
data = {'name': 'bash',
'epoch': 0,
'version': '1.2.3',
'release': '4.b1',
'arch': 'x86_64',
'srpm_name': 'bash',
'srpm_nevra': 'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm',
'dependencies': {'requires': ['required-package']}}
response = self.client.put(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, models.Dependency.objects.count())
dep = models.Dependency.objects.first()
self.assertIsNone(dep.comparison)
self.assertIsNone(dep.version)
self.assertEqual(dep.rpm.pk, 1)
self.assertEqual(dep.name, 'required-package')
self.assertEqual(dep.type, models.Dependency.REQUIRES)
self.assertNumChanges([1])
def test_patch_to_rpm_with_none(self):
data = {'dependencies': {'requires': ['required-package']}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, models.Dependency.objects.count())
dep = models.Dependency.objects.first()
self.assertIsNone(dep.comparison)
self.assertIsNone(dep.version)
self.assertEqual(dep.rpm.pk, 1)
self.assertEqual(dep.name, 'required-package')
self.assertEqual(dep.type, models.Dependency.REQUIRES)
self.assertNumChanges([1])
def test_patch_to_overwrite_existing(self):
self._create_deps()
data = {'dependencies': {'requires': ['required-package']}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, models.Dependency.objects.count())
dep = models.Dependency.objects.first()
self.assertIsNone(dep.comparison)
self.assertIsNone(dep.version)
self.assertEqual(dep.rpm.pk, 1)
self.assertEqual(dep.name, 'required-package')
self.assertEqual(dep.type, models.Dependency.REQUIRES)
self.assertNumChanges([1])
def test_put_to_remove(self):
self._create_deps()
data = {'name': 'bash',
'epoch': 0,
'version': '1.2.3',
'release': '4.b1',
'arch': 'x86_64',
'srpm_name': 'bash',
'srpm_nevra': 'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm',
'dependencies': {}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertEqual(0, models.Dependency.objects.count())
def test_patch_to_remove(self):
self._create_deps()
data = {'dependencies': {}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertEqual(0, models.Dependency.objects.count())
def test_bad_dependency_format(self):
data = {'dependencies': {'recommends': ['foo bar']}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_bad_dependency_type(self):
data = {'dependencies': {'wants': ['icecream']}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_deps_are_not_list(self):
data = {'dependencies': {'suggests': 'pony'}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_deps_with_too_many_lists(self):
data = {'dependencies': {'suggests': [['pony']]}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_patch_without_deps_does_not_delete_existing(self):
self._create_deps()
data = {'name': 'new_name'}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertEqual(2, models.Dependency.objects.count())
def test_put_without_deps_deletes_existing(self):
self._create_deps()
data = {'name': 'new-name',
'epoch': 0,
'version': '1.2.3',
'release': '4.b1',
'arch': 'x86_64',
'srpm_name': 'bash',
'srpm_nevra': 'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm'}
response = self.client.put(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertEqual(0, models.Dependency.objects.count())
def test_has_no_deps_filter(self):
self._create_deps()
response = self.client.get(reverse('rpms-list'), {'has_no_deps': 'true'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
response = self.client.get(reverse('rpms-list'), {'has_no_deps': 'false'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
class RPMAPIRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
'pdc/apps/common/fixtures/test/sigkey.json',
'pdc/apps/release/fixtures/tests/release.json',
'pdc/apps/package/fixtures/test/rpm.json',
'pdc/apps/compose/fixtures/tests/compose.json',
'pdc/apps/compose/fixtures/tests/compose_composerpm.json',
'pdc/apps/compose/fixtures/tests/variant_arch.json',
'pdc/apps/compose/fixtures/tests/variant.json'
]
def setUp(self):
self.empty_deps = {'conflicts': [], 'obsoletes': [], 'provides': [],
'recommends': [], 'requires': [], 'suggests': []}
def test_query_all_rpms(self):
url = reverse('rpms-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 3)
def test_query_with_params(self):
url = reverse('rpms-list')
response = self.client.get(url + '?name=bash', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(url + '?epoch=0', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 3)
response = self.client.get(url + '?version=1.2.3', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 3)
response = self.client.get(url + '?release=4.b1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(url + '?arch=x86_64', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
response = self.client.get(url + '?srpm_name=bash', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 3)
response = self.client.get(url + '?srpm_nevra=bash-0:1.2.3-4.b1.src', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(url + '?srpm_nevra=null', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(url + '?compose=compose-1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
results = response.data.get('results', [])
ids = []
for result in results:
ids.append(result['id'])
self.assertTrue(1 in ids)
def test_query_with_multi_value_against_same_key(self):
url = reverse('rpms-list')
response = self.client.get(url + '?name=bash&name=bash-doc', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
response = self.client.get(url + '?srpm_nevra=bash-0:1.2.3-4.b1.src&srpm_nevra=bash-0:1.2.3-4.b2.src',
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_different_key(self):
url = reverse('rpms-list')
response = self.client.get(url + '?name=bash&version=1.2.3', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_wrong_params(self):
url = reverse('rpms-list')
response = self.client.get(url + 'wrong_param/', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_query_with_bad_epoch(self):
url = reverse('rpms-list')
response = self.client.get(url, {'epoch': 'foo'}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('epoch', response.data['detail'][0])
def test_query_with_only_key(self):
url = reverse('rpms-list')
response = self.client.get(url + '?name', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(url + '?name=', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(url + '?epoch', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(url + '?epoch=', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_retrieve_rpm(self):
url = reverse('rpms-detail', args=[1])
response = self.client.get(url, format='json')
expect_data = {"id": 1, "name": "bash", "version": "1.2.3", "epoch": 0, "release": "4.b1",
"arch": "x86_64",
"srpm_name": "bash", "srpm_nevra": "bash-0:1.2.3-4.b1.src",
"filename": "bash-1.2.3-4.b1.x86_64.rpm", "linked_releases": [],
"linked_composes": ["compose-1"], "dependencies": self.empty_deps}
self.assertEqual(response.data, expect_data)
def test_retrieve_rpm_should_not_have_duplicated_composes(self):
url = reverse('rpms-detail', args=[2])
response = self.client.get(url, format='json')
self.assertEqual(response.data.get("linked_composes"), ['compose-1'])
def test_create_rpm(self):
url = reverse('rpms-list')
data = {"name": "fake_bash", "version": "1.2.3", "epoch": 0, "release": "4.b1", "arch": "x86_64",
"srpm_name": "bash", "filename": "bash-1.2.3-4.b1.x86_64.rpm", "linked_releases": ['release-1.0'],
"srpm_nevra": "fake_bash-0:1.2.3-4.b1.src"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
expected_response_data = {"id": 4, 'linked_composes': [],
"name": "fake_bash", "version": "1.2.3", "epoch": 0, "release": "4.b1",
"arch": "x86_64", "srpm_name": "bash", "filename": "bash-1.2.3-4.b1.x86_64.rpm",
"linked_releases": ['release-1.0'], "srpm_nevra": "fake_bash-0:1.2.3-4.b1.src",
"dependencies": self.empty_deps}
self.assertEqual(response.data, expected_response_data)
self.assertNumChanges([1])
def test_create_rpm_with_wrong_release(self):
url = reverse('rpms-list')
data = {"name": "fake_bash", "version": "1.2.3", "epoch": 0, "release": "4.b1", "arch": "x86_64",
"srpm_name": "bash", "filename": "bash-1.2.3-4.b1.x86_64.rpm", "linked_releases": ['release-1.0-wrong'],
"srpm_nevra": "fake_bash-0:1.2.3-4.b1.src"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_partial_update_rpm_with_assign_release(self):
url = reverse('rpms-detail', args=[1])
data = {"linked_releases": ['release-1.0']}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('linked_releases'), ['release-1.0'])
self.assertNumChanges([1])
def test_partial_update_does_not_break_filename(self):
url = reverse('rpms-detail', args=[1])
data = {'linked_releases': ['release-1.0']}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.data.get('filename'), 'bash-1.2.3-4.b1.x86_64.rpm')
def test_full_update_uses_default_filename(self):
url = reverse('rpms-detail', args=[1])
data = {'name': 'fake_bash', 'version': '1.2.3', 'epoch': 0, 'release': '4.b1', 'arch': 'x86_64',
'srpm_name': 'bash', 'linked_releases': ['release-1.0'],
'srpm_nevra': 'fake_bash-0:1.2.3-4.b1.src'}
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('filename'), 'fake_bash-1.2.3-4.b1.x86_64.rpm')
self.assertNumChanges([1])
def test_full_update_with_missing_fields_does_not_crash_on_default_filename(self):
url = reverse('rpms-detail', args=[1])
data = {'epoch': 0,
'srpm_name': 'bash', 'linked_releases': ['release-1.0'],
'srpm_nevra': 'fake_bash-0:1.2.3-4.b1.src'}
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_partial_update_rpm_with_assign_wrong_release(self):
url = reverse('rpms-detail', args=[1])
data = {"linked_releases": ['release-1.0-fake']}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_rpm(self):
data = {"name": "fake_bash", "version": "1.2.3", "epoch": 0, "release": "4.b1", "arch": "x86_64",
"srpm_name": "bash", "filename": "bash-1.2.3-4.b1.x86_64.rpm", "linked_releases": ['release-1.0'],
"srpm_nevra": "fake_bash-0:1.2.3-4.b1.src"}
url = reverse('rpms-detail', args=[1])
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data.update({'id': 1, 'linked_composes': [u'compose-1'], 'dependencies': self.empty_deps})
self.assertDictEqual(dict(response.data), data)
self.assertNumChanges([1])
def test_update_rpm_with_linked_compose_should_read_only(self):
url = reverse('rpms-detail', args=[3])
data = {'linked_composes': [u'compose-1']}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_bulk_update_patch(self):
self.client.patch(reverse('rpms-list'),
{1: {"linked_releases": ['release-1.0']}}, format='json')
url = reverse('rpms-detail', args=[1])
response = self.client.get(url, format='json')
self.assertEqual(response.data.get("linked_releases"), ['release-1.0'])
self.assertNumChanges([1])
def test_delete_rpm_should_not_be_allowed(self):
url = reverse('rpms-detail', args=[1])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_bulk_delete_rpms_should_not_be_allowed(self):
url = reverse('rpms-list')
response = self.client.delete(url, [1, 2], format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class ImageRESTTestCase(APITestCase):
fixtures = [
'pdc/apps/release/fixtures/tests/release.json',
'pdc/apps/compose/fixtures/tests/compose.json',
'pdc/apps/compose/fixtures/tests/variant_arch.json',
'pdc/apps/compose/fixtures/tests/variant.json',
'pdc/apps/package/fixtures/test/image.json',
'pdc/apps/compose/fixtures/tests/compose_composeimage.json',
]
def test_list_all(self):
response = self.client.get(reverse('image-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 3)
def test_query_file_name(self):
response = self.client.get(reverse('image-list'), {'file_name': 'image-1'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'file_name': ['image-1', 'image-2']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_image_format(self):
response = self.client.get(reverse('image-list'), {'image_format': 'iso'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'image_format': ['iso', 'qcow']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_image_type(self):
response = self.client.get(reverse('image-list'), {'image_type': 'dvd'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'image_type': ['dvd', 'boot']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_disc_number(self):
response = self.client.get(reverse('image-list'), {'disc_number': 1})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'), {'disc_number': [1, 2]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_disc_count(self):
response = self.client.get(reverse('image-list'), {'disc_count': 1})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'), {'disc_count': [1, 2]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_arch(self):
response = self.client.get(reverse('image-list'), {'arch': 'src'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'), {'arch': ['src', 'x86_64']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_mtime(self):
response = self.client.get(reverse('image-list'), {'mtime': 111111111})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'), {'mtime': [111111111, 222222222]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_size(self):
response = self.client.get(reverse('image-list'), {'size': 444444444})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'), {'size': [444444444, 555555555]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_bootable(self):
response = self.client.get(reverse('image-list'), {'bootable': True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_negative_bootable(self):
response = self.client.get(reverse('image-list'), {'bootable': 'false'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_active_bootable(self):
response = self.client.get(reverse('image-list'), {'bootable': 'true'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_implant_md5(self):
response = self.client.get(reverse('image-list'), {'implant_md5': 'a' * 32})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'implant_md5': ['a' * 32, 'b' * 32]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_volume_id(self):
response = self.client.get(reverse('image-list'), {'volume_id': 'image-1-volume_id'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'volume_id': ['image-1-volume_id', 'image-2-volume_id']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_md5(self):
response = self.client.get(reverse('image-list'), {'md5': '1' * 32})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'md5': ['1' * 32, '2' * 32]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_sha1(self):
response = self.client.get(reverse('image-list'), {'sha1': '1' * 40})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'sha1': ['1' * 40, '2' * 40]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_sha256(self):
response = self.client.get(reverse('image-list'), {'sha256': '1' * 64})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'sha256': ['1' * 64, '2' * 64]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_compose(self):
response = self.client.get(reverse('image-list'), {'compose': 'foo'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(reverse('image-list'),
{'compose': ['compose-1', 'foo']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 3)
def test_query_disc_number_with_wrong_value(self):
key = 'disc_number'
value = 'wrongvalue'
response = self.client.get(reverse('image-list'), {key: value})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"detail": [u'Value [%s] of %s is not an integer' % (value, key)]})
def test_query_disc_count_with_wrong_value(self):
key = 'disc_count'
value = 'wrongvalue'
response = self.client.get(reverse('image-list'), {key: value})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"detail": [u'Value [%s] of %s is not an integer' % (value, key)]})
def test_query_mtime_with_wrong_value(self):
key = 'mtime'
value = 'wrongvalue'
response = self.client.get(reverse('image-list'), {key: value})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"detail": [u'Value [%s] of %s is not an integer' % (value, key)]})
def test_query_size_with_wrong_value(self):
key = 'size'
value = 'wrongvalue'
response = self.client.get(reverse('image-list'), {key: value})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"detail": [u'Value [%s] of %s is not an integer' % (value, key)]})
class BuildImageRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
'pdc/apps/package/fixtures/test/rpm.json',
'pdc/apps/package/fixtures/test/archive.json',
'pdc/apps/package/fixtures/test/release.json',
'pdc/apps/package/fixtures/test/build_image.json',
]
def test_create_with_new_rpms(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'new_rpm', 'epoch': 0, 'version': '1.0.0',
'release': '1', 'arch': 'src', 'srpm_name': 'new_srpm'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_with_new_incorrect_rpms_1(self):
url = reverse('buildimage-list')
# rpm's arch is not src but srpm_nevra is empty
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'new_rpm', 'epoch': 0, 'version': '1.0.0',
'release': '1', 'arch': 'x86-64', 'srpm_name': 'new_srpm'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('rpms'), ["RPM's srpm_nevra should be empty if and only if arch is src"])
def test_create_with_new_incorrect_rpms_2(self):
url = reverse('buildimage-list')
# rpm's arch is src but srpm_nevra is not empty
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'new_rpm', 'epoch': 0, 'version': '1.0.0',
'release': '1', 'arch': 'src', 'srpm_name': 'new_srpm', 'srpm_nevra': 'fake_srpm_nevra'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('rpms'), ["RPM's srpm_nevra should be empty if and only if arch is src"])
def test_create_with_exist_rpms(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{
"name": "bash-doc",
"epoch": 0,
"version": "1.2.3",
"release": "4.b2",
"arch": "x86_64",
"srpm_name": "bash",
"srpm_nevra": "bash-0:1.2.3-4.b2.src"}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([1])
self.assertIn('bash-doc', response.content)
def test_create_with_exist_rpm_nevra(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{
"name": "bash-doc",
"epoch": 0,
"version": "1.2.3",
"release": "4.b2",
"arch": "x86_64",
"srpm_name": "bash",
"srpm_nevra": "new_bash-0:1.2.3-4.b2.src"}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([1])
self.assertIn('bash-doc', response.content)
def test_create_with_new_archives(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [],
'archives': [{'build_nvr': 'new_build', 'name': 'new_name',
'size': 123, 'md5': '1111222233334444aaaabbbbccccdddd'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([2])
def test_create_with_exist_release_id(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'releases': ["release-1.0", "release-2.0"]}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_with_non_exist_release_id(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'releases': ["release-1.0-fake-name"]}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_with_exist_archives(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{
"name": "bash-doc",
"epoch": 0,
"version": "1.2.3",
"release": "4.b2",
"arch": "x86_64",
"srpm_name": "bash",
"srpm_nevra": "bash-0:1.2.3-4.b2.src"}],
'archives': [{'build_nvr': 'my-server-docker-1.0-27', 'name': 'tdl-x86_64.xml',
'size': 641, 'md5': '22222222222222222222222222222222'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([1])
self.assertIn('bash-doc', response.content)
def test_create_with_wrong_field(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [],
'archives': [{'build_name': 'new_build', 'name': 'new_name',
'size': 123, 'md5': '1111222233334444aaaabbbbccccdddd'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('archives', response.content)
self.assertIn('build_nvr', response.content)
def test_create_with_exist_rpms_missing_fields(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'bash-doc'}],
'archives': []}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('rpms', response.content)
self.assertIn('epoch', response.content)
self.assertIn('version', response.content)
self.assertIn('release', response.content)
self.assertIn('arch', response.content)
self.assertIn('srpm_name', response.content)
def test_create_with_new_rpms_missing_fields(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'new_rpm'}],
'archives': []}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('rpms', response.content)
self.assertIn('epoch', response.content)
self.assertIn('version', response.content)
self.assertIn('release', response.content)
self.assertIn('arch', response.content)
self.assertIn('srpm_name', response.content)
def test_create_with_exist_archives_missing_fields(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [],
'archives': [{'build_nvr': 'my-server-docker-1.0-27'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('archives', response.content)
self.assertIn('name', response.content)
self.assertIn('size', response.content)
self.assertIn('md5', response.content)
def test_create_with_new_archives_missing_fields(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [],
'archives': [{'build_nvr': 'new_build'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('archives', response.content)
self.assertIn('name', response.content)
self.assertIn('size', response.content)
self.assertIn('md5', response.content)
def test_get(self):
url = reverse('buildimage-detail', args=[1])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list(self):
url = reverse('buildimage-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_with_component_name(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?component_name=bash', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_component_name_with_srpm_name_mapping(self):
rpm = models.RPM.objects.create(
name='kernel', epoch=0, version='3.19.3', release='100',
arch='src', srpm_name='kernel', filename='kernel-3.19.3-100.src.rpm')
build_image = models.BuildImage.objects.first()
build_image.rpms.add(rpm)
global_component = component_models.GlobalComponent.objects.create(name='bash')
release = release_models.Release.objects.create(
release_type=release_models.ReleaseType.objects.get(short='ga'),
short='release',
version='1.1',
name='Awesome Release')
release_component = component_models.ReleaseComponent.objects.create(
global_component=global_component,
release=release,
name='bash')
binding_models.ReleaseComponentSRPMNameMapping.objects.create(
srpm_name='kernel',
release_component=release_component)
url = reverse('buildimage-list')
response = self.client.get(url + '?component_name=bash', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertIn('kernel', response.content)
def test_query_component_name_without_srpm_name_mapping(self):
rpm = models.RPM.objects.create(
name='kernel', epoch=0, version='3.19.3', release='100',
arch='src', srpm_name='kernel', filename='kernel-3.19.3-100.src.rpm')
build_image = models.BuildImage.objects.first()
build_image.rpms.add(rpm)
global_component = component_models.GlobalComponent.objects.create(name='kernel')
release = release_models.Release.objects.create(
release_type=release_models.ReleaseType.objects.get(short='ga'),
short='release',
version='7.1',
name='Awesome Release')
component_models.ReleaseComponent.objects.create(
global_component=global_component,
release=release,
name='kernel')
url = reverse('buildimage-list')
response = self.client.get(url + '?component_name=kernel', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertIn('kernel', response.content)
def test_query_with_rpm_version(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?rpm_version=1.2.3', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_rpm_release(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?rpm_release=4.b1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_image_id(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?image_id=my-server-docker-1.0-27', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertEqual(response.data.get('results')[0].get('image_id'), 'my-server-docker-1.0-27')
def test_query_with_archive_build_nvr(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?archive_build_nvr=my-server-docker-1.0-27', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_with_image_format(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?image_format=docker', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_with_md5(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?md5=0123456789abcdef0123456789abcdef',
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_archive_name(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?archive_name=archive_1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_archive_size(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?archive_size=666', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_archive_md5(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?archive_md5=22222222222222222222222222222222', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_with_release_id(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?release_id=release-1.0', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
response = self.client.get(url + '?release_id=release-2.0', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_update_image_with_release_id(self):
url = reverse('buildimage-detail', args=[1])
data = {"releases": ["release-1.0", "release-2.0"]}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('releases'), ["release-1.0", "release-2.0"])
self.assertNumChanges([1])
def test_patch_update(self):
url = reverse('buildimage-detail', args=[1])
data = {'image_id': 'new_build'}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('image_id'), 'new_build')
def test_partial_update_empty(self):
response = self.client.patch(reverse('buildimage-detail', args=[1]), {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_patch_update_failed(self):
url = reverse('buildimage-detail', args=[1])
data = {'image_format': 'new_format'}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('image_format'), ["Object with name=new_format does not exist."])
def test_put_update(self):
url = reverse('buildimage-detail', args=[1])
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{
"name": "new_rpm",
"epoch": 0,
"version": "0.1.0",
"release": "1",
"arch": "x86_64",
"srpm_name": "new_srpm",
"srpm_nevra": "new_srpm_nevra"}],
'archives': [{'build_nvr': 'new_build', 'name': 'new_name',
'size': 123, 'md5': '1111222233334444aaaabbbbccccdddd'}]
}
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([3])
self.assertIn('new_rpm', response.content)
def test_delete(self):
url = reverse('buildimage-detail', args=[1])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([1])
def test_create_same_image_id_with_different_format(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'new_rpm', 'epoch': 0, 'version': '1.0.0',
'release': '1', 'arch': 'src', 'srpm_name': 'new_srpm'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'iso',
'md5': "0123456789abcdef0123456789abcabc",
'rpms': [{'name': 'new_rpm', 'epoch': 0, 'version': '1.0.0',
'release': '1', 'arch': 'src', 'srpm_name': 'new_srpm'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
|
{
"content_hash": "3067e5b255628940ef290b754114a9a2",
"timestamp": "",
"source": "github",
"line_count": 1564,
"max_line_length": 120,
"avg_line_length": 51.381713554987215,
"alnum_prop": 0.5930115354462986,
"repo_name": "lao605/product-definition-center",
"id": "ed50e38678259973b375b22ff36286cbcd1f960a",
"size": "80470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdc/apps/package/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1767"
},
{
"name": "HTML",
"bytes": "49433"
},
{
"name": "JavaScript",
"bytes": "6629"
},
{
"name": "Makefile",
"bytes": "2828"
},
{
"name": "Python",
"bytes": "1162974"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
}
|
"""
test_discrete.py: Test suite for the Gaussian estimator class :class:DiscreteEst`
"""
from __future__ import print_function
import unittest
import numpy as np
# Add the path to the vampyre package and import it
import env
env.add_vp_path()
import vampyre as vp
def discrete_test(zshape=(1000,10), verbose=False, nvals=10,\
tol_init=1e-3, tol_est=0.15,is_complex=False):
"""
Unit test for the :class:`DiscreteEst` class
The test works by creating synthetic distribution, creating an i.i.d.
matrix :math:`z` with components from that distribution and then
Gaussian measurements
:math:`r = z + w, w \sim {\mathcal N}(0,\\tau_r)`
Then, the estimateion methods are called to see if
the measured error variance matches the expected value.
:param zshape: shape of :math:`z`
:param Boolean verbose: prints results.
:param tol_init: tolerance on initial estimate for test to be considered
a pass. This tolerance should be very low.
:param tol_est: Error tolerance on the estimation error. This should
be much higher since the Monte Carlo simulations take a large number
of samples to converge.
:param nvals: number of values in the discrete distribution
:param is_complex: Flag indicating to use complex data
"""
# Generate a random discrete distribution
if is_complex:
zval = np.random.randn(nvals) + 1j*np.random.randn(nvals)
else:
zval = np.random.randn(nvals)
pz = np.random.rand(nvals)
pz = pz/sum(pz)
# Noise variance
#rvar = np.power(10,np.random.uniform(-2,1,1))[0]
if is_complex:
rvar = 0.6
else:
rvar = 0.1
# Generate random data
z = np.random.choice(zval,zshape,p=pz)
if is_complex:
w = np.random.normal(0,np.sqrt(rvar/2),zshape) +\
1j*np.random.normal(0,np.sqrt(rvar/2),zshape)
else:
w = np.random.normal(0,np.sqrt(rvar),zshape)
r = z + w
# Create estimator
est = vp.estim.DiscreteEst(zval, pz, zshape, var_axes='all',\
is_complex=is_complex)
# Run the initial estimate
zmean, zvar, cost = est.est_init(return_cost=True)
# Compute the true expected mean
zmean0 = pz.dot(zval)
if np.abs(zmean0 -np.mean(zmean)) > tol_init:
raise vp.common.TestException("Initial mean does not match expected value")
# Compute the true expected variance
zvar0 = pz.dot(np.abs(zval-zmean0)**2)
if np.abs(zvar0 -np.mean(zvar)) > tol_init:
raise vp.common.TestException(\
"Initial variance does not match expected value")
# Get posterior estimate
zhat, zhatvar, cost = est.est(r,rvar,return_cost=True)
# Measure error
zerr = np.mean(np.abs(zhat-z)**2)
fail = (np.abs(zerr-zhatvar) > tol_est*np.abs(zerr))
if verbose or fail:
print("err: true: {0:12.4e} est: {1:12.4e}".format(zerr,zhatvar) )
if fail:
raise vp.common.TestException("Posterior estimate discrete error "+
"does not match predicted value")
class TestCases(unittest.TestCase):
def test_discrete_real(self):
verbose = False
discrete_test(is_complex=False, verbose=verbose)
def test_discrete_complex(self):
verbose = False
discrete_test(is_complex=True, verbose=verbose)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "19639ed24e9c7c374a2e785b55855dd0",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 83,
"avg_line_length": 33.24761904761905,
"alnum_prop": 0.6304783729590375,
"repo_name": "GAMPTeam/vampyre",
"id": "87f31980f9d6ca68fa469c3e3a46fdd10a88512a",
"size": "3491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_estim/test_discrete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "269151"
}
],
"symlink_target": ""
}
|
import logging
import urlparse
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from keystoneclient import service_catalog
from keystoneclient.v2_0 import client as keystone_client
from keystoneclient.v2_0 import tokens
from horizon.api import base
from horizon import exceptions
LOG = logging.getLogger(__name__)
DEFAULT_ROLE = None
class Service(base.APIDictWrapper):
""" Wrapper for a dict based on the service data from keystone. """
_attrs = ['id', 'type', 'name']
def __init__(self, service, *args, **kwargs):
super(Service, self).__init__(service, *args, **kwargs)
self.url = service['endpoints'][0]['internalURL']
self.host = urlparse.urlparse(self.url).hostname
self.region = service['endpoints'][0]['region']
self.disabled = None
def __unicode__(self):
if(self.type == "identity"):
return _("%(type)s (%(backend)s backend)") \
% {"type": self.type,
"backend": keystone_backend_name()}
else:
return self.type
def __repr__(self):
return "<Service: %s>" % unicode(self)
def _get_endpoint_url(request, endpoint_type, catalog=None):
if getattr(request.user, "service_catalog", None):
return base.url_for(request,
service_type='identity',
endpoint_type=endpoint_type)
return request.session.get('region_endpoint',
getattr(settings, 'OPENSTACK_KEYSTONE_URL'))
def keystoneclient(request, username=None, password=None, tenant_id=None,
token_id=None, endpoint=None, endpoint_type=None,
admin=False):
"""Returns a client connected to the Keystone backend.
Several forms of authentication are supported:
* Username + password -> Unscoped authentication
* Username + password + tenant id -> Scoped authentication
* Unscoped token -> Unscoped authentication
* Unscoped token + tenant id -> Scoped authentication
* Scoped token -> Scoped authentication
Available services and data from the backend will vary depending on
whether the authentication was scoped or unscoped.
Lazy authentication if an ``endpoint`` parameter is provided.
Calls requiring the admin endpoint should have ``admin=True`` passed in
as a keyword argument.
The client is cached so that subsequent API calls during the same
request/response cycle don't have to be re-authenticated.
"""
user = request.user
if admin:
if not user.is_admin():
raise exceptions.NotAuthorized
endpoint_type = 'adminURL'
else:
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
# Take care of client connection caching/fetching a new client.
# Admin vs. non-admin clients are cached separately for token matching.
cache_attr = "_keystone_admin" if admin else "_keystone"
if hasattr(request, cache_attr) and (not token_id
or getattr(request, cache_attr).auth_token == token_id):
LOG.debug("Using cached client for token: %s" % user.token)
conn = getattr(request, cache_attr)
else:
endpoint_lookup = _get_endpoint_url(request, endpoint_type)
auth_url = endpoint or endpoint_lookup
LOG.debug("Creating a new keystoneclient connection to %s." % auth_url)
conn = keystone_client.Client(username=username or user.username,
password=password,
tenant_id=tenant_id or user.tenant_id,
token=token_id or user.token,
auth_url=auth_url,
endpoint=endpoint)
setattr(request, cache_attr, conn)
# Fetch the correct endpoint if we've re-scoped the token.
catalog = getattr(conn, 'service_catalog', None)
if catalog and "serviceCatalog" in catalog.catalog.keys():
catalog = catalog.catalog['serviceCatalog']
endpoint = _get_endpoint_url(request, endpoint_type, catalog)
conn.management_url = endpoint
return conn
def tenant_name(request, tenant_id):
return keystoneclient(request).tenants.get(tenant_id).name
def tenant_create(request, tenant_name, description, enabled):
return keystoneclient(request, admin=True).tenants.create(tenant_name,
description,
enabled)
def tenant_get(request, tenant_id, admin=False):
return keystoneclient(request, admin=admin).tenants.get(tenant_id)
def tenant_delete(request, tenant_id):
keystoneclient(request, admin=True).tenants.delete(tenant_id)
def tenant_list(request, admin=False):
return keystoneclient(request, admin=admin).tenants.list()
def tenant_update(request, tenant_id, tenant_name, description, enabled):
return keystoneclient(request, admin=True).tenants.update(tenant_id,
tenant_name,
description,
enabled)
def tenant_list_for_token(request, token, endpoint_type=None):
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
c = keystoneclient(request,
token_id=token,
endpoint=_get_endpoint_url(request, endpoint_type),
endpoint_type=endpoint_type)
return c.tenants.list()
def token_create(request, tenant, username, password):
'''
Creates a token using the username and password provided. If tenant
is provided it will retrieve a scoped token and the service catalog for
the given tenant. Otherwise it will return an unscoped token and without
a service catalog.
'''
c = keystoneclient(request,
username=username,
password=password,
tenant_id=tenant,
endpoint=_get_endpoint_url(request, 'internalURL'))
token = c.tokens.authenticate(username=username,
password=password,
tenant_id=tenant)
return token
def token_create_scoped(request, tenant, token):
'''
Creates a scoped token using the tenant id and unscoped token; retrieves
the service catalog for the given tenant.
'''
if hasattr(request, '_keystone'):
del request._keystone
c = keystoneclient(request,
tenant_id=tenant,
token_id=token,
endpoint=_get_endpoint_url(request, 'internalURL'))
raw_token = c.tokens.authenticate(tenant_id=tenant,
token=token,
return_raw=True)
c.service_catalog = service_catalog.ServiceCatalog(raw_token)
if request.user.is_admin():
c.management_url = c.service_catalog.url_for(service_type='identity',
endpoint_type='adminURL')
else:
endpoint_type = getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
c.management_url = c.service_catalog.url_for(
service_type='identity', endpoint_type=endpoint_type)
scoped_token = tokens.Token(tokens.TokenManager, raw_token)
return scoped_token
def user_list(request, tenant_id=None):
return keystoneclient(request, admin=True).users.list(tenant_id=tenant_id)
def user_create(request, user_id, email, password, tenant_id, enabled):
return keystoneclient(request, admin=True).users.create(user_id,
password,
email,
tenant_id,
enabled)
def user_delete(request, user_id):
keystoneclient(request, admin=True).users.delete(user_id)
def user_get(request, user_id, admin=True):
return keystoneclient(request, admin=admin).users.get(user_id)
def user_update(request, user, **data):
return keystoneclient(request, admin=True).users.update(user, **data)
def user_update_enabled(request, user_id, enabled):
return keystoneclient(request, admin=True).users.update_enabled(user_id,
enabled)
def user_update_password(request, user_id, password, admin=True):
return keystoneclient(request, admin=admin).users.update_password(user_id,
password)
def user_update_tenant(request, user_id, tenant_id, admin=True):
return keystoneclient(request, admin=admin).users.update_tenant(user_id,
tenant_id)
def role_list(request):
""" Returns a global list of available roles. """
return keystoneclient(request, admin=True).roles.list()
def add_tenant_user_role(request, tenant_id, user_id, role_id):
""" Adds a role for a user on a tenant. """
return keystoneclient(request, admin=True).roles.add_user_role(user_id,
role_id,
tenant_id)
def remove_tenant_user(request, tenant_id, user_id):
""" Removes all roles from a user on a tenant, removing them from it. """
client = keystoneclient(request, admin=True)
roles = client.roles.roles_for_user(user_id, tenant_id)
for role in roles:
client.roles.remove_user_role(user_id, role.id, tenant_id)
def get_default_role(request):
"""
Gets the default role object from Keystone and saves it as a global
since this is configured in settings and should not change from request
to request. Supports lookup by name or id.
"""
global DEFAULT_ROLE
default = getattr(settings, "OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
if default and DEFAULT_ROLE is None:
try:
roles = keystoneclient(request, admin=True).roles.list()
except:
exceptions.handle(request)
for role in roles:
if role.id == default or role.name == default:
DEFAULT_ROLE = role
break
return DEFAULT_ROLE
def list_ec2_credentials(request, user_id):
return keystoneclient(request).ec2.list(user_id)
def create_ec2_credentials(request, user_id, tenant_id):
return keystoneclient(request).ec2.create(user_id, tenant_id)
def get_user_ec2_credentials(request, user_id, access_token):
return keystoneclient(request).ec2.get(user_id, access_token)
def keystone_can_edit_user():
if hasattr(settings, "OPENSTACK_KEYSTONE_BACKEND"):
return settings.OPENSTACK_KEYSTONE_BACKEND['can_edit_user']
else:
return False
def keystone_backend_name():
if hasattr(settings, "OPENSTACK_KEYSTONE_BACKEND"):
return settings.OPENSTACK_KEYSTONE_BACKEND['name']
else:
return 'unknown'
|
{
"content_hash": "5a8560554c09882f36911e6a4961f4e8",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 79,
"avg_line_length": 38.50993377483444,
"alnum_prop": 0.5893379191745486,
"repo_name": "gyang/horizon",
"id": "333afc1f658d2f4f2f64123c09019f3f94c57b51",
"size": "12471",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "horizon/api/keystone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from flask import render_template, redirect, url_for, flash, request
from flask_login import login_user, logout_user, login_required, current_user
from . import auth
from .. import db
from ..email import send_email
from ..models.users import User
from .forms import LoginForm, RegistrationForm, ChangePasswordForm, PasswordResetRequestForm, \
PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
if not current_user.confirmed \
and request.endpoint \
and request.endpoint[:5] != 'auth.' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account', 'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired!')
return redirect(url_for('main.index'))
@auth.route('/confirm')
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account', 'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid Password.')
return render_template('auth/change_password.html', form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
flash('Could not Reset Password')
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email', user=current_user, token=token)
flash('An email with instructions to confirm your new email address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template('auth/change_email.html', form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated')
else:
flash('Invalid request')
return redirect(url_for('main.index'))
|
{
"content_hash": "c896c1040920de497235c06ad7f7fde8",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 112,
"avg_line_length": 37.62893081761006,
"alnum_prop": 0.6480026742436904,
"repo_name": "KevDi/Flashcards",
"id": "0a2830052cded23ebaef55168aace0e1593430ad",
"size": "5983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/auth/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2122"
},
{
"name": "HTML",
"bytes": "16946"
},
{
"name": "JavaScript",
"bytes": "685"
},
{
"name": "Python",
"bytes": "36324"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracking', '0005_device_hidden'),
]
operations = [
migrations.AlterField(
model_name='device',
name='hidden',
field=models.BooleanField(default=True, verbose_name='Hidden/private use'),
),
migrations.AlterField(
model_name='device',
name='source_device_type',
field=models.CharField(choices=[('tracplus', 'TracPlus'), ('iriditrak', 'Iriditrak'), ('dplus', 'DPlus'), ('spot', 'Spot'), ('dfes', 'DFES'), ('mp70', 'MP70'), ('fleetcare', 'fleetcare'), ('other', 'Other')], default='other', max_length=32),
),
migrations.AlterField(
model_name='loggedpoint',
name='source_device_type',
field=models.CharField(choices=[('tracplus', 'TracPlus'), ('iriditrak', 'Iriditrak'), ('dplus', 'DPlus'), ('spot', 'Spot'), ('dfes', 'DFES'), ('mp70', 'MP70'), ('fleetcare', 'fleetcare'), ('other', 'Other')], default='other', max_length=32),
),
]
|
{
"content_hash": "b12cfe0ad23ee78f931b9462781a6b9c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 253,
"avg_line_length": 42.73076923076923,
"alnum_prop": 0.5634563456345635,
"repo_name": "ropable/resource_tracking",
"id": "a2e7e65cb59d4c2436f2b26fe77e23d5bc309273",
"size": "1161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracking/migrations/0006_auto_20200213_1716.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9984"
},
{
"name": "Dockerfile",
"bytes": "1018"
},
{
"name": "HTML",
"bytes": "8052"
},
{
"name": "JavaScript",
"bytes": "63420"
},
{
"name": "Python",
"bytes": "158781"
}
],
"symlink_target": ""
}
|
import _julia
import matplotlib.pyplot as plt
class Julia:
def __init__(self, min_r, max_r, min_i, max_i, z, resolution):
self.scores = _julia.Julia(min_r, max_r, min_i, max_i, z, resolution)
self._julia = None
@property
def julia(self):
if not self._julia:
self._julia = [c for c in self.scores if self.scores[c] == 101]
return self._julia
def render_julia(self):
real = [c.real for c in self.julia]
imag = [c.imag for c in self.julia]
plt.scatter(real, imag, s=.0001)
plt.show()
def render(self):
elements = [c for c in self.scores if not c in self.julia]
real = [c.real for c in elements]
imag = [c.imag for c in elements]
scores = [(.5, 1 - self.scores[c]/101, self.scores[c]/101) for c in elements]
plt.scatter(real, imag, c=scores, s=.01)
plt.show()
|
{
"content_hash": "422945b24d5f67efb6f22f6cfaf16605",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 85,
"avg_line_length": 33.77777777777778,
"alnum_prop": 0.569078947368421,
"repo_name": "jfamestad/julia",
"id": "d723e21dae49724bc5accce97377f8a9491a3a81",
"size": "912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "julia.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8149"
},
{
"name": "Makefile",
"bytes": "58"
},
{
"name": "Python",
"bytes": "1092"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.