text
stringlengths 4
1.02M
| meta
dict |
|---|---|
__all__ = ('EVENT_SCHEDULER_START', 'EVENT_SCHEDULER_SHUTDOWN', 'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED',
'EVENT_JOBSTORE_JOB_ADDED', 'EVENT_JOBSTORE_JOB_REMOVED', 'EVENT_JOB_EXECUTED', 'EVENT_JOB_ERROR',
'EVENT_JOB_MISSED', 'EVENT_ALL', 'SchedulerEvent', 'JobStoreEvent', 'JobEvent')
EVENT_SCHEDULER_START = 1 # The scheduler was started
EVENT_SCHEDULER_SHUTDOWN = 2 # The scheduler was shut down
EVENT_JOBSTORE_ADDED = 4 # A job store was added to the scheduler
EVENT_JOBSTORE_REMOVED = 8 # A job store was removed from the scheduler
EVENT_JOBSTORE_JOB_ADDED = 16 # A job was added to a job store
EVENT_JOBSTORE_JOB_REMOVED = 32 # A job was removed from a job store
EVENT_JOB_EXECUTED = 64 # A job was executed successfully
EVENT_JOB_ERROR = 128 # A job raised an exception during execution
EVENT_JOB_MISSED = 256 # A job's execution was missed
EVENT_ALL = (EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN | EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED |
EVENT_JOBSTORE_JOB_ADDED | EVENT_JOBSTORE_JOB_REMOVED | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR |
EVENT_JOB_MISSED)
class SchedulerEvent(object):
"""
An event that concerns the scheduler itself.
:var code: the type code of this event
"""
def __init__(self, code):
self.code = code
class JobStoreEvent(SchedulerEvent):
"""
An event that concerns job stores.
:var alias: the alias of the job store involved
:var job: the new job if a job was added
"""
def __init__(self, code, alias, job=None):
SchedulerEvent.__init__(self, code)
self.alias = alias
if job:
self.job = job
class JobEvent(SchedulerEvent):
"""
An event that concerns the execution of individual jobs.
:var job: the job instance in question
:var scheduled_run_time: the time when the job was scheduled to be run
:var retval: the return value of the successfully executed job
:var exception: the exception raised by the job
:var traceback: the traceback object associated with the exception
"""
def __init__(self, code, job, scheduled_run_time, retval=None,
exception=None, traceback=None):
SchedulerEvent.__init__(self, code)
self.job = job
self.scheduled_run_time = scheduled_run_time
self.retval = retval
self.exception = exception
self.traceback = traceback
|
{
"content_hash": "95d85fdb819d5edb35fca4f137c21323",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 113,
"avg_line_length": 40.885245901639344,
"alnum_prop": 0.6603849238171612,
"repo_name": "yonglehou/apscheduler",
"id": "2c262d412675e651acb5b449861332457ba3fa6a",
"size": "2494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apscheduler/events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "117725"
}
],
"symlink_target": ""
}
|
import re
import pep8
import six
# Guidelines for writing new hacking checks
#
# - Use only for Neutron specific tests. OpenStack general tests
# should be submitted to the common 'hacking' module.
# - Pick numbers in the range N3xx. Find the current test with
# the highest allocated number and then pick the next value.
# - Keep the test method code in the source file ordered based
# on the N3xx value.
# - List the new rule in the top level HACKING.rst file
# - Add test cases for each new rule to
# neutron/tests/unit/hacking/test_checks.py
_all_log_levels = {
'reserved': '_', # this should never be used with a log unless
# it is a variable used for a log message and
# a exception
'error': '_LE',
'info': '_LI',
'warn': '_LW',
'warning': '_LW',
'critical': '_LC',
'exception': '_LE',
}
_all_hints = set(_all_log_levels.values())
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
def _regex_for_level(level, hint):
return r".*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % {
'level': level,
'wrong_hints': '|'.join(_all_hints - set([hint])),
}
log_translation_hint = re.compile(
'|'.join('(?:%s)' % _regex_for_level(level, hint)
for level, hint in six.iteritems(_all_log_levels)))
oslo_namespace_imports_dot = re.compile(r"import[\s]+oslo[.][^\s]+")
oslo_namespace_imports_from_dot = re.compile(r"from[\s]+oslo[.]")
oslo_namespace_imports_from_root = re.compile(r"from[\s]+oslo[\s]+import[\s]+")
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test directory
if "neutron/tests" in filename:
return
if pep8.noqa(physical_line):
return
msg = "N320: Log messages require translation hints!"
if log_translation_hint.match(logical_line):
yield (0, msg)
def use_jsonutils(logical_line, filename):
msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s"
# Some files in the tree are not meant to be run from inside Neutron
# itself, so we should not complain about them not using jsonutils
json_check_skipped_patterns = [
"neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/"
"plugins/netwrap",
]
for pattern in json_check_skipped_patterns:
if pattern in filename:
return
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_(' and 'LOG.debug(_Lx('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
N319
"""
for hint in _all_hints:
if logical_line.startswith("LOG.debug(%s(" % hint):
yield(0, "N319 Don't translate debug level logs")
def check_assert_called_once_with(logical_line, filename):
# Try to detect unintended calls of nonexistent mock methods like:
# assert_called_once
# assertCalledOnceWith
# assert_has_called
# called_once_with
if 'neutron/tests/' in filename:
if '.assert_called_once_with(' in logical_line:
return
uncased_line = logical_line.lower().replace('_', '')
check_calls = ['.assertcalledonce', '.calledoncewith']
if any(x for x in check_calls if x in uncased_line):
msg = ("N322: Possible use of no-op mock method. "
"please use assert_called_once_with.")
yield (0, msg)
if '.asserthascalled' in uncased_line:
msg = ("N322: Possible use of no-op mock method. "
"please use assert_has_calls.")
yield (0, msg)
def check_oslo_namespace_imports(logical_line):
if re.match(oslo_namespace_imports_from_dot, logical_line):
msg = ("N323: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
elif re.match(oslo_namespace_imports_from_root, logical_line):
msg = ("N323: '%s' must be used instead of '%s'.") % (
logical_line.replace('from oslo import ', 'import oslo_'),
logical_line)
yield(0, msg)
elif re.match(oslo_namespace_imports_dot, logical_line):
msg = ("N323: '%s' must be used instead of '%s'.") % (
logical_line.replace('import', 'from').replace('.', ' import '),
logical_line)
yield(0, msg)
def check_no_contextlib_nested(logical_line, filename):
msg = ("N324: contextlib.nested is deprecated. With Python 2.7 and later "
"the with-statement supports multiple nested objects. See https://"
"docs.python.org/2/library/contextlib.html#contextlib.nested for "
"more information.")
if contextlib_nested.match(logical_line):
yield(0, msg)
def check_python3_xrange(logical_line):
if re.search(r"\bxrange\s*\(", logical_line):
yield(0, "N325: Do not use xrange. Use range, or six.moves.range for "
"large loops.")
def check_no_basestring(logical_line):
if re.search(r"\bbasestring\b", logical_line):
msg = ("N326: basestring is not Python3-compatible, use "
"six.string_types instead.")
yield(0, msg)
def check_python3_no_iteritems(logical_line):
if re.search(r".*\.iteritems\(\)", logical_line):
msg = ("N327: Use six.iteritems() instead of dict.iteritems().")
yield(0, msg)
def check_asserttrue(logical_line, filename):
if 'neutron/tests/' in filename:
if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?\)", logical_line):
msg = ("N328: Use assertTrue(observed) instead of "
"assertEqual(True, observed)")
yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?\)", logical_line):
msg = ("N328: Use assertTrue(observed) instead of "
"assertEqual(True, observed)")
yield (0, msg)
def no_mutable_default_args(logical_line):
msg = "N329: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
def check_assertfalse(logical_line, filename):
if 'neutron/tests/' in filename:
if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?\)", logical_line):
msg = ("N328: Use assertFalse(observed) instead of "
"assertEqual(False, observed)")
yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?\)", logical_line):
msg = ("N328: Use assertFalse(observed) instead of "
"assertEqual(False, observed)")
yield (0, msg)
def check_assertempty(logical_line, filename):
if 'neutron/tests/' in filename:
msg = ("N330: Use assertEqual(*empty*, observed) instead of "
"assertEqual(observed, *empty*). *empty* contains "
"{}, [], (), set(), '', \"\"")
empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")"
reg = r"assertEqual\(([^,]*,\s*)+?%s\)\s*$" % empties
if re.search(reg, logical_line):
yield (0, msg)
def check_assertisinstance(logical_line, filename):
if 'neutron/tests/' in filename:
if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)",
logical_line):
msg = ("N331: Use assertIsInstance(observed, type) instead "
"of assertTrue(isinstance(observed, type))")
yield (0, msg)
def check_assertequal_for_httpcode(logical_line, filename):
msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) "
"instead of assertEqual(observed_http_code, expected_http_code)")
if 'neutron/tests/' in filename:
if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)",
logical_line):
yield (0, msg)
def factory(register):
register(validate_log_translations)
register(use_jsonutils)
register(check_assert_called_once_with)
register(no_translate_debug_logs)
register(check_oslo_namespace_imports)
register(check_no_contextlib_nested)
register(check_python3_xrange)
register(check_no_basestring)
register(check_python3_no_iteritems)
register(check_asserttrue)
register(no_mutable_default_args)
register(check_assertfalse)
register(check_assertempty)
register(check_assertisinstance)
register(check_assertequal_for_httpcode)
|
{
"content_hash": "19b19ae41401393f749cfbc702b39ff2",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 79,
"avg_line_length": 36.851851851851855,
"alnum_prop": 0.5989949748743718,
"repo_name": "dims/neutron",
"id": "af72744f5ab27122628cc7453a9c973daa16d4c1",
"size": "9546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/hacking/checks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "8048836"
},
{
"name": "Shell",
"bytes": "14802"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
__author__ = 'Abraham Othman'
__copyright__ = 'Copyright 2018, Abraham Othman'
__version__ = '1.6.0'
__maintainer__ = 'Abraham Othman'
__email__ = 'abrahamo@wharton.upenn.edu'
long_description = open('README.md').read()
install_requires = [
'numpy',
'cvxpy>=1.0.0',
'cvxopt'
]
setup(
name = 'hiscore',
version = __version__,
author = __maintainer__,
author_email = __email__,
packages = find_packages(),
description = 'A simple and powerful engine for creating scores',
long_description=long_description,
install_requires=install_requires,
url = 'https://github.com/aothman/hiscore', # use the URL to the github repo
keywords = [],
classifiers =[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
]
)
|
{
"content_hash": "a54134606e43cf4222012533de256bff",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 26.583333333333332,
"alnum_prop": 0.658307210031348,
"repo_name": "aothman/hiscore",
"id": "46fde4235475e624217b5d347caa9aab59afe84b",
"size": "957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14850"
}
],
"symlink_target": ""
}
|
import argparse
import ipaddress
import json
import logging
import os
import random
import socket
import subprocess
import sys
from contextlib import contextmanager
from typing import Dict, Generator, List, Optional, Union
from kazoo.client import KazooClient
from kazoo.exceptions import ConnectionLoss, LockTimeout, SessionExpiredError
from kazoo.retry import KazooRetry
from kazoo.security import make_digest_acl
log = logging.getLogger(__name__)
JsonTypeMembers = List[Dict[str, Union[str, int, List[str]]]]
# The path of the ZNode used for locking.
ZK_LOCK_PATH = "/etcd/locking"
# The path of the ZNode containing the list of cluster members.
ZK_NODES_PATH = "/etcd/nodes"
# The id to use when contending for the ZK lock.
LOCK_CONTENDER_ID = "{}:{}".format(socket.gethostname(), os.getpid())
# The time in seconds to wait when attempting to acquire a lock. Lock
# acquisition between 5 ZooKeeper nodes is an operation on the order of
# milliseconds.
#
# Furthermore, the operations performed while the lock is held are performed
# once and never again. This means a process will only contend for the lock
# once. As such, if lock aquisition fails due to some other process holding it,
# the current process will crash and be restarted with one less contender for
# the same lock. This means that the locking behaviour does converge and no
# timeout-sensitive livelock can occur.
#
# We set the lock timeout to a couple of seconds instead of milliseconds to
# account for variation in network latency between nodes in the cluster. The
# current value has so far shown to be sufficient.
ZK_LOCK_TIMEOUT = 5
# Location of the detect IP address detection script
DETECT_IP_SCRIPT = '/opt/mesosphere/bin/detect_ip'
# In theory we could use the dcos_internal_utils.utils library, but in practice
# the amount of deps it pulls in is huge compared to just a simple `execute
# script and verify the output` function that we want
def detect_ip() -> str:
machine_ip = subprocess.check_output(
[DETECT_IP_SCRIPT],
stderr=subprocess.STDOUT).decode('ascii').strip() # type: str
# Validate IP address
ipaddress.ip_address(machine_ip)
log.info("private IP is `%s`", machine_ip)
return machine_ip
def zk_connect(zk_addr: str,
zk_user: Optional[str] = None,
zk_secret: Optional[str] = None) -> KazooClient:
"""Connect to ZooKeeper.
On connection failure, the function attempts to reconnect indefinitely with
exponential backoff up to 3 seconds. If a command fails, that command is
retried every 300ms for 3 attempts before failing.
These values are chosen to suit a human-interactive time.
Args:
zk_addr: The address to connect to
zk_user: The username to use when connecting to ZooKeeper or `None`
if no authentication is necessary.
zk_secret: The secret to use when connecting to ZooKeeper or `None`
if no authentication is necessary.
Returns:
A ZooKeeper client connection in the form of a `kazoo.client.KazooClient`.
"""
# Try to reconnect indefinitely, with time between updates going
# exponentially to ~3s. Then every retry occurs every ~3 seconds.
conn_retry_policy = KazooRetry(
max_tries=-1,
delay=0.3,
backoff=1.3,
max_delay=3,
ignore_expire=True,
)
# Retry commands every 0.3 seconds, for a total of <1s (usually 0.9)
cmd_retry_policy = KazooRetry(
max_tries=3,
delay=0.3,
backoff=1,
max_delay=1,
ignore_expire=False,
)
default_acl = None
auth_data = None
if zk_user and zk_secret:
default_acl = [make_digest_acl(zk_user, zk_secret, all=True)]
scheme = 'digest'
credential = "{}:{}".format(zk_user, zk_secret)
auth_data = [(scheme, credential)]
zk = KazooClient(
hosts=zk_addr,
timeout=30,
connection_retry=conn_retry_policy,
command_retry=cmd_retry_policy,
default_acl=default_acl,
auth_data=auth_data,
)
zk.start()
return zk
@contextmanager
def zk_lock(zk: KazooClient, lock_path: str, contender_id: str,
timeout: int) -> Generator:
"""
This contextmanager takes a ZooKeeper lock, yields, then releases the lock.
This lock behaves like an interprocess mutex lock.
ZooKeeper allows one to read values without holding a lock, but there is no
guarantee that you will read the latest value. To read the latest value,
you must call `sync()` on a ZNode before calling `get()`.
Args:
zk:
The client to use to communicate with ZooKeeper.
lock_path:
The ZNode path to use as prefix for the locking recipe.
contender_id:
The contender id to identify the current client
in the locking recipe.
timeout:
Time in seconds to wait for the lock to be acquired.
If this time elapses before the lock is acquired, a
`kazoo.exceptions.LockTimeout` exception is raised.
Raises:
kazoo.exceptions.LockTimeout:
If the `timeout` is exceeded without the lock being acquired.
"""
lock = zk.Lock(lock_path, contender_id)
try:
log.info("Acquiring ZooKeeper lock.")
lock.acquire(blocking=True, timeout=timeout, ephemeral=True)
except (ConnectionLoss, SessionExpiredError) as e:
msg_fmt = "Failed to acquire lock: {}"
msg = msg_fmt.format(e.__class__.__name__)
log.exception(msg)
raise e
except LockTimeout as e:
msg_fmt = "Failed to acquire lock in `{}` seconds"
msg = msg_fmt.format(timeout)
log.exception(msg)
raise e
else:
log.info("ZooKeeper lock acquired.")
try:
yield
finally:
log.info("Releasing ZooKeeper lock")
lock.release()
log.info("ZooKeeper lock released.")
def get_registered_nodes(zk: KazooClient, zk_path: str) -> List[str]:
"""
Return the IPs of nodes that have registered in ZooKeeper.
The ZNode `zk_path` is expected to exist, having been
created during cluster bootstrap.
Args:
zk:
The client to use to communicate with ZooKeeper.
zk_path:
The path of the ZNode to use for node registration.
Returns:
A list of internal IP addresses of nodes that have
previously joined the etcd cluster.
"""
# We call `sync()` before reading the value in order to read the latest
# data written to ZooKeeper.
# See https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#ch_zkGuarantees
log.info("Calling sync() on ZNode `%s`", zk_path)
zk.sync(zk_path)
log.info("Loading data from ZNode `%s`", zk_path)
data, _ = zk.get(zk_path)
if data:
nodes = json.loads(data.decode('ascii'))['nodes'] # type: List[str]
log.info("Found registered nodes: %s", nodes)
return nodes
log.info("Found no registered nodes.")
return []
def register_cluster_membership(zk: KazooClient, zk_path: str,
ip: str) -> List[str]:
"""
Add `ip` to the list of cluster members registered in ZooKeeper.
The ZK lock must be held around the call to this function.
Args:
zk:
The client to use to communicate with ZooKeeper.
zk_path:
The path of the ZNode to use for node registration.
ip:
The ip to add to the list of cluster member IPs in ZooKeeper.
"""
log.info("Registering cluster membership for `%s`", ip)
# Get the latest list of cluster members.
nodes = get_registered_nodes(zk=zk, zk_path=zk_path)
if ip in nodes:
# We're already registered with ZK.
log.info(
"Cluster member `%s` already registered in ZooKeeper. Skipping.",
ip)
return nodes
log.info("Adding `%s` to list of nodes `%s`", ip, nodes)
nodes.append(ip)
zk.set(zk_path, json.dumps({"nodes": nodes}).encode("ascii"))
zk.sync(zk_path)
log.info("Successfully registered cluster membership for `%s`", ip)
return nodes
def remove_cluster_membership(zk: KazooClient, zk_path: str,
ip: str) -> List[str]:
"""
Remove `ip` from the list of cluster members registered in ZooKeeper.
The ZK lock must be held around the call to this function.
Args:
zk:
The client to use to communicate with ZooKeeper.
zk_path:
The path of the ZNode to use for node registration.
ip:
The ip to add to the list of cluster member IPs in ZooKeeper.
"""
log.info("Removing cluster membership for `%s`", ip)
# Get the latest list of cluster members.
nodes = get_registered_nodes(zk=zk, zk_path=zk_path)
if ip not in nodes:
# We're already registered with ZK.
log.info(
"Cluster member `%s` already removed from Zookeeper. Skipping.",
ip)
return nodes
log.info("Removing `%s` to list of nodes `%s`", ip, nodes)
nodes.remove(ip)
zk.set(zk_path, json.dumps({"nodes": nodes}).encode("ascii"))
zk.sync(zk_path)
log.info("Successfully removed %s from the cluster", ip)
return nodes
def dump_nodes_to_file(nodes: List[str], file_path: str) -> None:
log.info("Writing nodes %s to file %s", ','.join(nodes), file_path)
with open(file_path, 'w') as f:
nodes_str = ','.join(
["etcd-{ip}=https://{ip}:2380".format(ip=ip) for ip in nodes])
f.write(nodes_str)
def dump_state_to_file(state: str, file_path: str) -> None:
log.info("Writing initial cluster state: `%s`to file `%s`", state,
file_path)
with open(file_path, 'w') as f:
f.write(state)
def parse_cmdline() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='DC/OS etcd node discovery')
parser.add_argument(
'--secure',
action='store_true',
help='enable ensure connection for etcd peers and clients.')
parser.add_argument('--zk-addr',
action='store',
default='127.0.0.1:2181',
help='address of the ZK instance to connect to')
parser.add_argument('--etcd-client-tls-cert',
action='store',
default='',
help='key used for connecting to etcd via etcdctl')
parser.add_argument(
'--etcd-client-tls-key',
action='store',
default='',
help='certificate used for connecting to etcd via etcdctl')
parser.add_argument('--etcdctl-path',
action='store',
default='/opt/mesosphere/active/etcd/bin/etcdctl',
help='path to etcdctl binary')
parser.add_argument('--ca-cert',
action='store',
default='',
help='path to the CA certificate')
subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
parser_joincluster = subparsers.add_parser('join-cluster')
parser_joincluster.set_defaults(func=join_cluster)
parser_joincluster.add_argument(
'--cluster-nodes-file',
action='store',
default='/var/lib/dcos/etcd/initial-nodes',
help='file where initial cluster nodes should be saved')
parser_joincluster.add_argument(
'--cluster-state-file',
action='store',
default='/var/lib/dcos/etcd/initial-state',
help='file where initial cluster state should be saved')
parser_joincluster.add_argument('--etcd-data-dir',
action='store',
default='/var/lib/dcos/etcd/default.etcd/',
help="etcd's data directory location")
parser_leavecluster = subparsers.add_parser('leave-cluster')
parser_leavecluster.set_defaults(func=leave_cluster)
# We want to make it overridable
parser_leavecluster.add_argument(
'--node-ip',
action='store',
default=detect_ip(),
help="the IP address of the node to remove from the ensemble")
parser_ensure_perms = subparsers.add_parser('ensure-permissions')
parser_ensure_perms.set_defaults(func=ensure_permissions)
return parser.parse_args()
def ensure_permissions(args: argparse.Namespace) -> None:
log.info("ensure-permissions subcommand starts, args: `%s`", args)
etcdctl = EtcdctlHelper(
args.secure,
# NOTE(prozlach): we intentionally do not read back the nodes list from
# disk and connect to the local etcd instance. With more than one node
# in a quorum we would have sometimes intra-node and sometimes inter-node
# communication which may result in disruptions that happen only
# sometimes and are hard to reproduce.
[
"127.0.0.1",
],
args.etcdctl_path,
args.ca_cert,
args.etcd_client_tls_cert,
args.etcd_client_tls_key,
)
# See below for more context:
# https://github.com/etcd-io/etcd/blob/3898452b5432b4d69028ee79d796ddeab0acc42c/Documentation/op-guide/authentication.md
etcdctl.ensure_user("root")
etcdctl.grant_role("root", "root")
etcdctl.ensure_user("calico")
# See below for more details:
# https://docs.projectcalico.org/v3.11/reference/etcd-rbac/calico-etcdv3-paths#calicoctl-read-only-access
etcdctl.ensure_role("calico_prefix", "/calico/")
etcdctl.grant_role("calico", "calico_prefix")
etcdctl.ensure_user("adminrouter")
etcdctl.ensure_role("adminrouter_prefix", "/")
etcdctl.grant_role("adminrouter", "adminrouter_prefix")
etcdctl.ensure_user("telegraf")
etcdctl.ensure_role("telegraf_prefix", "/")
etcdctl.grant_role("telegraf", "telegraf_prefix")
etcdctl.enable_auth()
def join_cluster(args: argparse.Namespace) -> None:
log.info("join-cluster subcommand starts, args: `%s`", args)
# Check if etcd is up and running already. If so - we can skip quering ZK,
# as etcd is able to get the list of peers directly from the shared
# storage.
if os.path.isdir(args.etcd_data_dir) and os.path.exists(
args.cluster_nodes_file) and os.path.exists(args.cluster_state_file):
log.info(
"directory `%s`, initial nodes file `%s` and state file `%s` already exists, etcd seems initialized",
args.etcd_data_dir, args.cluster_nodes_file, args.cluster_state_file)
return
# Determine our internal IP.
private_ip = detect_ip()
# Connect to ZooKeeper.
log.info("connecting to ZooKeeper")
zk_user = os.environ.get('DATASTORE_ZK_USER')
zk_secret = os.environ.get('DATASTORE_ZK_SECRET')
zk = zk_connect(zk_addr=args.zk_addr, zk_user=zk_user, zk_secret=zk_secret)
nodes = [] # type: List[str]
with zk_lock(
zk=zk,
lock_path=ZK_LOCK_PATH,
contender_id=LOCK_CONTENDER_ID,
timeout=ZK_LOCK_TIMEOUT,
):
nodes = get_registered_nodes(zk=zk, zk_path=ZK_NODES_PATH)
cluster_state = ""
# The order of nodes is important - the first node to register
# becomes the `designated node` that will initialize cluster, all
# the other nodes will join it.
# FIXME(prozlach): It's not 100% bulletproof, because if during the
# init something happens to the first node, the whole cluster will
# not be able to bootstrap itself. OTOH this simplifies this
# script, as there is no need for monitoring the process from
# within this script/making it a wrapper around etcd.
if len(nodes) == 0:
log.info("Cluster has not been initialized yet: %s", nodes)
cluster_state = "new"
else:
# There is already at least one etcd node which we should join
log.info("Cluster has members that already registered: %s", nodes)
cluster_state = "existing"
etcdctl = EtcdctlHelper(
args.secure,
nodes,
args.etcdctl_path,
args.ca_cert,
args.etcd_client_tls_cert,
args.etcd_client_tls_key,
)
if private_ip not in nodes:
# The problem here is that once we add given etcd to the
# quorum (e.g. the first node that started), and the said node
# gets restarted, then etcd will not be able to start at all,
# as we will be missing quorum. The solution is to first
# register the node in the quorum and only then update ZK while
# still holding the ZK lock (and thus holding back the other
# nodes). The result is that the list of nodes in ZK reflect
# the nodes that have been **actually** added to the quorum -
# i.e. the `etcdctl` command was succesful. This way, once a
# node/nodes get restarted, the quorum-member nodes will start
# and will not try to execute etcdctl (which's failure woudl
# block starting and creating the quorum in the first place),
# and the non-member nodes will wait for the quorum to form (as
# their etcdctl commands will fail and thus prevent them from
# registering in ZK node list). It is also important to note
# that there is a small drawback to this approach - if etcdctl
# succeds but subsequent call ZK fail then the given node (and
# maybe even the cluster) will not be able to start. This would
# require ZK failing withing narrow time window though
# (i.e. between the `get_registered_nodes` call above and
# `register_cluster_membership` below). I belive the risk is
# acceptable considering the simplicity of this script.
log.info("current node is not a member of the quorum, joining "
"the existing quorum")
etcdctl.ensure_member(private_ip)
# NOTE(mainred): considering the case private IP presents in zk,
# but dcos-etcd service hasn't run on this node ever, like master
# node replacement.
# we should remove the member first before joining the cluster
# again for keeping the member without the original data will bring
# raft exception as follows:
#
# tocommit(8) is out of range [lastIndex(0)]. Was the raft log corrupted, truncated, or lost? # NOQA
elif not os.path.exists(args.etcd_data_dir):
log.info("rejoining the quorum as a result of data loss")
etcdctl.remove_member(private_ip)
etcdctl.ensure_member(private_ip)
nodes = register_cluster_membership(zk=zk,
zk_path=ZK_NODES_PATH,
ip=private_ip)
# NOTE(icharala): Ensure that both node & state files are always present
# otherwise the `etcd.sh` script will fail to start.
dump_nodes_to_file(nodes, args.cluster_nodes_file)
dump_state_to_file(cluster_state, args.cluster_state_file)
log.info("registration complete")
class EtcdctlHelper:
def __init__(
self,
secure: bool,
nodes: List[str],
etcdctl_path: str,
ca_cert: str,
etcd_client_tls_cert: str,
etcd_client_tls_key: str,
):
self.scheme = "https" if secure else "http"
self._ca_cert = ca_cert
self._etcdctl_path = etcdctl_path
self._etcd_client_tls_cert = etcd_client_tls_cert
self._etcd_client_tls_key = etcd_client_tls_key
self._designated_node = None
self._nodes = nodes
def get_designated_node(self) -> str:
"""
Lazily finds out the designated node to use
"""
if self._designated_node is None:
# Choose one node from the list
healthy_nodes = list(filter(self._is_node_healthy, self._nodes))
# In order to not to always hit the same node, we randomize the choice:
if len(healthy_nodes) == 0:
raise Exception("there are no healthy nodes")
self._designated_node = random.choice(healthy_nodes)
return self._designated_node
def get_members(self) -> JsonTypeMembers:
""" gets etcd cluster members
an example of results of `member list -w json`
[
{
'ID': 2080818695399562020,
'name': 'etcd-10.0.4.116',
'peerURLs': [
'https://10.0.4.116:2380'
],
'clientURLs': [
'https://10.0.4.116:2379',
'https://localhost:2379'
]
}
]
"""
result = self._execute_etcdctl(
self.get_designated_node(),
["member", "list", "-w", "json"],
)
result.check_returncode()
output = json.loads(result.stdout)
members = output["members"] # type: JsonTypeMembers
return members
def get_node_id(self, node_ip: str) -> str:
""" Returns etcd member ID in Hex
"""
members_info = self.get_members()
for member in members_info:
# Uninitialized members do not have "name" entry
if "name" in member and member["name"] == "etcd-{}".format(
node_ip):
assert isinstance(member["ID"], int)
# valid member ID should be in Hex, as etcdctl will try to
# parse the string of member ID to Hex when used
return hex(member["ID"]).replace("0x", "")
return ""
def ensure_member(self, node_ip: str) -> None:
members_info = self.get_members()
for member in members_info:
if "name" in member and member["name"] == "etcd-{}".format(
node_ip):
log.info("node %s is already member of the ensemble", node_ip)
return
self.add_member(node_ip)
def add_member(self, node_ip: str) -> None:
result = self._execute_etcdctl(
self.get_designated_node(),
[
"member",
"add",
"etcd-{}".format(node_ip),
"--peer-urls=https://{}:2380".format(node_ip),
],
)
result.check_returncode()
log.info("node %s was added to the ensemble", node_ip)
def ensure_role(self, role_name: str, prefix: str) -> None:
roles = self.list_roles()
if role_name not in roles:
self.add_role(role_name)
self.add_permission(role_name, prefix)
def list_roles(self) -> List[str]:
result = self._execute_etcdctl(
self.get_designated_node(),
[
"role",
"list",
],
)
result.check_returncode()
roles = result.stdout.decode('utf8').splitlines() # type: List[str]
log.info("roles currently defined: %s", roles)
return roles
def grant_role(self, user_name: str, role_name: str) -> None:
result = self._execute_etcdctl(
self.get_designated_node(),
[
"user",
"grant-role",
user_name,
role_name,
],
)
result.check_returncode()
log.info("role %s was granted to %s", role_name, user_name)
def enable_auth(self) -> None:
result = self._execute_etcdctl(
self.get_designated_node(),
[
"auth",
"enable",
],
)
result.check_returncode()
log.info("authentication was enabled")
def add_role(self, role_name: str) -> None:
result = self._execute_etcdctl(
self.get_designated_node(),
[
"role",
"add",
role_name,
],
)
result.check_returncode()
log.info("role %s was added", role_name)
def add_permission(self, role_name: str, prefix: str) -> None:
result = self._execute_etcdctl(
self.get_designated_node(),
[
"role",
"grant",
role_name,
"--prefix=true",
"readwrite",
prefix,
],
)
result.check_returncode()
log.info("prefix %s has been granted to the role %s", prefix,
role_name)
def ensure_user(self, user_name: str) -> None:
users = self.list_users()
if user_name not in users:
self.add_user(user_name)
def add_user(self, user_name: str) -> None:
result = self._execute_etcdctl(
self.get_designated_node(),
[
"user",
"add",
"--no-password",
user_name,
],
)
result.check_returncode()
log.info("user %s was added", user_name)
def list_users(self) -> List[str]:
result = self._execute_etcdctl(
self.get_designated_node(),
[
"user",
"list",
],
)
result.check_returncode()
users = result.stdout.decode('utf8').splitlines() # type: List[str]
log.info("users currently defined: %s", users)
return users
def remove_member(self, node_ip: str) -> None:
node_id = self.get_node_id(node_ip)
if node_id == "":
log.warning("node %s is not a member yet so it cannot be removed",
node_ip)
return
result = self._execute_etcdctl(
self.get_designated_node(),
["member", "remove", node_id],
)
result.check_returncode()
log.info("node %s was removed from the ensemble", node_ip)
def _is_node_healthy(self, node: str) -> bool:
result = self._execute_etcdctl(node, ["endpoint", "health"])
healthy = result.returncode == 0
return healthy
def _execute_etcdctl(self, endpoint_ip: str,
args: List[str]) -> subprocess.CompletedProcess:
cmd = [
self._etcdctl_path, "--endpoints",
"{}://{}:2379".format(self.scheme, endpoint_ip)
]
if self.scheme == "https":
cmd.extend([
"--cacert", self._ca_cert, "--cert",
self._etcd_client_tls_cert, "--key", self._etcd_client_tls_key
])
cmd.extend(args)
result = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
log.debug(
"executed `%s`, exit status: `%d`, stdout: `%s`, stderr: `%s`",
" ".join(cmd),
result.returncode,
result.stdout,
result.stderr,
)
return result
def leave_cluster(args: argparse.Namespace) -> None:
log.info("leave-cluster subcommand starts, args: `%s`", args)
# Connect to ZooKeeper.
log.info("connecting to ZooKeeper")
zk_user = os.environ.get('DATASTORE_ZK_USER')
zk_secret = os.environ.get('DATASTORE_ZK_SECRET')
zk = zk_connect(zk_addr=args.zk_addr, zk_user=zk_user, zk_secret=zk_secret)
with zk_lock(
zk=zk,
lock_path=ZK_LOCK_PATH,
contender_id=LOCK_CONTENDER_ID,
timeout=ZK_LOCK_TIMEOUT,
):
remove_cluster_membership(zk=zk,
zk_path=ZK_NODES_PATH,
ip=args.node_ip)
nodes = get_registered_nodes(zk=zk, zk_path=ZK_NODES_PATH)
if nodes:
# There is already at least one etcd node which we should join
log.info("Cluster has members that already registered: %s", nodes)
etcdctl = EtcdctlHelper(
args.secure,
nodes,
args.etcdctl_path,
args.ca_cert,
args.etcd_client_tls_cert,
args.etcd_client_tls_key,
)
etcdctl.remove_member(args.node_ip)
log.info("removal complete")
def main() -> None:
logging.basicConfig(format='[%(levelname)s] %(message)s', level='INFO')
args = parse_cmdline()
try:
args.func(args)
except Exception as e: # pylint: disable=broad-except
log.exception("error occured: %s", e)
sys.exit(1)
if __name__ == '__main__':
main()
|
{
"content_hash": "2f8344c0735798d0fd9184d8187ca103",
"timestamp": "",
"source": "github",
"line_count": 799,
"max_line_length": 124,
"avg_line_length": 36.26157697121402,
"alnum_prop": 0.5852000138059573,
"repo_name": "dcos/dcos",
"id": "6bdb39c898a39247e571ab9883a05d3bf1190664",
"size": "28996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/etcd/extra/etcd_discovery/etcd_discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2529"
},
{
"name": "Dockerfile",
"bytes": "9395"
},
{
"name": "Go",
"bytes": "5104"
},
{
"name": "Groovy",
"bytes": "711"
},
{
"name": "HCL",
"bytes": "14047"
},
{
"name": "HTML",
"bytes": "91122"
},
{
"name": "Lua",
"bytes": "200521"
},
{
"name": "Makefile",
"bytes": "8767"
},
{
"name": "PowerShell",
"bytes": "230"
},
{
"name": "Python",
"bytes": "1625906"
},
{
"name": "Shell",
"bytes": "102887"
}
],
"symlink_target": ""
}
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.utils import timestampStringFromTime, timestampStringFromTimeSeconds, trimStringToByteLength
from zope.interface import implementer
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
irc.RPL_TOPICWHOTIME = "333"
@implementer(IPlugin, IModuleData)
class TopicCommand(ModuleData):
name = "TopicCommand"
core = True
def actions(self) -> List[Tuple[str, int, Callable]]:
return [ ("topic", 1, self.onTopic),
("join", 2, self.sendChannelTopic),
("buildisupport", 1, self.buildISupport) ]
def userCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("TOPIC", 1, UserTopic(self.ircd, self)) ]
def serverCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("TOPIC", 1, ServerTopic(self.ircd)) ]
def verifyConfig(self, config: Dict[str, Any]) -> None:
if "topic_length" in config:
if not isinstance(config["topic_length"], int) or config["topic_length"] < 0:
raise ConfigValidationError("topic_length", "invalid number")
elif config["topic_length"] > 326:
config["topic_length"] = 326
self.ircd.logConfigValidationWarning("topic_length", "value is too large", 326)
def onTopic(self, channel: "IRCChannel", setter: str, setterName: str, oldTopic: str):
userSource = setter in self.ircd.users
if userSource:
sourceUser = self.ircd.users[setter]
conditionalTags = {}
self.ircd.runActionStandard("sendingusertags", sourceUser, conditionalTags)
for user in channel.users.keys():
if user.uuid[:3] == self.ircd.serverID:
tags = {}
if userSource:
tags = user.filterConditionalTags(conditionalTags)
user.sendMessage("TOPIC", channel.topic, to=channel.name, prefix=channel.topicSetter, tags=tags)
sourceServer = None
if userSource and setter[:3] == self.ircd.serverID:
if sourceUser not in channel.users:
tags = sourceUser.filterConditionalTags(conditionalTags)
sourceUser.sendMessage("TOPIC", channel.topic, to=channel.name, prefix=channel.topicSetter, tags=tags)
elif setter != self.ircd.serverID:
sourceServer = self.ircd.servers[setter[:3]]
while sourceServer.nextClosest != self.ircd.serverID:
sourceServer = self.ircd.servers[sourceServer.nextClosest]
self.ircd.broadcastToServers(sourceServer, "TOPIC", channel.name, timestampStringFromTime(channel.existedSince), timestampStringFromTime(channel.topicTime), channel.topic, prefix=setter)
def sendChannelTopic(self, channel: "IRCChannel", user: "IRCUser", fromServer: Optional["IRCServer"] = None) -> None:
if not channel.topic:
user.sendMessage(irc.RPL_NOTOPIC, channel.name, "No topic is set")
else:
user.sendMessage(irc.RPL_TOPIC, channel.name, channel.topic)
user.sendMessage(irc.RPL_TOPICWHOTIME, channel.name, channel.topicSetter, timestampStringFromTimeSeconds(channel.topicTime))
def buildISupport(self, data: Dict[str, Union[str, int]]) -> None:
data["TOPICLEN"] = self.ircd.config.get("topic_length", 326)
@implementer(ICommand)
class UserTopic(Command):
def __init__(self, ircd, module):
self.ircd = ircd
self.module = module
def parseParams(self, user: "IRCUser", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if not params:
user.sendSingleError("TopicCmd", irc.ERR_NEEDMOREPARAMS, "TOPIC", "Not enough parameters")
return None
if params[0] not in self.ircd.channels:
user.sendSingleError("TopicCmd", irc.ERR_NOSUCHCHANNEL, params[0], "No such channel")
return None
channel = self.ircd.channels[params[0]]
if len(params) == 1:
return {
"channel": channel
}
topic = trimStringToByteLength(params[1], self.ircd.config.get("topic_length", 326))
return {
"channel": channel,
"topic": topic
}
def affectedChannels(self, user: "IRCUser", data: Dict[Any, Any]) -> List["IRCChannel"]:
return [ data["channel"] ]
def execute(self, user: "IRCUser", data: Dict[Any, Any]) -> bool:
if "topic" in data:
data["channel"].setTopic(data["topic"], user.uuid)
else:
self.module.sendChannelTopic(data["channel"], user)
return True
@implementer(ICommand)
class ServerTopic(Command):
burstQueuePriority = 79
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, server: "IRCServer", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if len(params) != 4:
return None
if params[0] not in self.ircd.channels:
if params[0] in self.ircd.recentlyDestroyedChannels:
return {
"lostchannel": True
}
return None
try:
return {
"source": prefix,
"channel": self.ircd.channels[params[0]],
"chantime": datetime.utcfromtimestamp(float(params[1])),
"topictime": datetime.utcfromtimestamp(float(params[2])),
"topic": params[3]
}
except (TypeError, ValueError):
return None
def execute(self, server: "IRCServer", data: Dict[Any, Any]) -> bool:
if "lostchannel" in data:
return True
channel = data["channel"]
remoteChannelTime = data["chantime"]
if remoteChannelTime > channel.existedSince: # Don't set the topic when our channel overrides
return True # Assume handled by our ignoring of it
if remoteChannelTime < channel.existedSince:
channel.setCreationTime(remoteChannelTime)
if channel.topic and data["topictime"] <= channel.topicTime:
return True # Don't set the topic when our topic overrides
if channel.setTopic(data["topic"], data["source"]):
return True
return False
topicCommand = TopicCommand()
|
{
"content_hash": "be9d7db610c59a55cbf8a4df5e73fdf2",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 188,
"avg_line_length": 39.41379310344828,
"alnum_prop": 0.7184601924759405,
"repo_name": "Heufneutje/txircd",
"id": "75407afc74a3db181c1df1f6e0d7d6697236dd74",
"size": "5715",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev/next",
"path": "txircd/modules/rfc/cmd_topic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "547"
},
{
"name": "Python",
"bytes": "792279"
}
],
"symlink_target": ""
}
|
"""Tests for workflow specific imports."""
from datetime import date
from os.path import abspath
from os.path import dirname
from os.path import join
from integration.ggrc import TestCase
from ggrc import db
from ggrc.converters import errors
from ggrc_workflows.models.task_group import TaskGroup
from ggrc_workflows.models.task_group_object import TaskGroupObject
from ggrc_workflows.models.task_group_task import TaskGroupTask
from ggrc_workflows.models.workflow import Workflow
THIS_ABS_PATH = abspath(dirname(__file__))
class TestWorkflowObjectsImport(TestCase):
"""Test imports for basic workflow objects."""
CSV_DIR = join(THIS_ABS_PATH, "test_csvs/")
def setUp(self):
super(TestWorkflowObjectsImport, self).setUp()
self.client.get("/login")
def test_full_good_import(self):
"""Test full good import without any warnings."""
filename = "workflow_small_sheet.csv"
response = self.import_file(filename)
self._check_csv_response(response, {})
self.assertEqual(1, Workflow.query.count())
self.assertEqual(1, TaskGroup.query.count())
self.assertEqual(4, TaskGroupTask.query.count())
self.assertEqual(2, TaskGroupObject.query.count())
task2 = TaskGroupTask.query.filter_by(slug="t-2").first()
task3 = TaskGroupTask.query.filter_by(slug="t-3").first()
task4 = TaskGroupTask.query.filter_by(slug="t-4").first()
self.assertEqual(task2.start_date, date(2015, 7, 10))
self.assertEqual(task2.end_date, date(2016, 12, 30))
self.assertIn("ch2", task3.response_options)
self.assertIn("option 1", task4.response_options)
def test_bad_imports(self):
"""Test workflow import with errors and warnings"""
filename = "workflow_with_warnings_and_errors.csv"
response = self.import_file(filename)
expected_errors = {
"Workflow": {
"row_errors": {
errors.MISSING_VALUE_ERROR.format(
line=8, column_name="Manager")
},
}
}
self._check_csv_response(response, expected_errors)
def test_import_task_date_format(self):
"""Test import of tasks for workflows
This is a test for various imports of task dates for all types of
workflows. This test ignores all warnings returned by the file import,
since those are verified in a different test.
Raises:
AssertionError: if the start and end values on tasks don't match the
values in the imported csv files.
"""
filename = "workflow_big_sheet.csv"
self.import_file(filename)
# Assert that CSV import got imported correctly
getters = {
"one_time": lambda task: (task.start_date, task.end_date),
"weekly": lambda task: (task.relative_start_day,
task.relative_end_day),
"monthly": lambda task: (task.relative_start_day,
task.relative_end_day),
"quarterly": lambda task: ((task.relative_start_month,
task.relative_start_day),
(task.relative_end_month,
task.relative_end_day)),
"annually": lambda task: ((task.relative_start_month,
task.relative_start_day),
(task.relative_end_month,
task.relative_end_day))
}
tasks = [
["task-1", "one_time", (date(2015, 7, 1), date(2015, 7, 15))],
["task-2", "weekly", (2, 5)],
["task-3", "monthly", (1, 22)],
["task-4", "quarterly", ((1, 5), (2, 15))],
["task-10", "quarterly", ((3, 5), (1, 1))],
["task-11", "quarterly", ((3, 5), (1, 1))],
["task-5", "annually", ((5, 7), (7, 15))],
]
for slug, freq, result in tasks:
task = db.session.query(TaskGroupTask).filter(
TaskGroupTask.slug == slug).one()
getter = getters[freq]
self.assertEqual(task.task_group.workflow.frequency, freq)
self.assertEqual(
getter(task), result,
"Failed importing data for task with slug = '{}'".format(slug))
def test_import_task_types(self):
"""Test task import with warnings
Check that the warnings for bay task type field work and that the task type
gets set to default when an invalid values is found in the csv.
Raises:
AssertionError: When file import does not return correct errors for the
example csv, or if any of the tasks does not have the expected task
type.
"""
filename = "workflow_big_sheet.csv"
response = self.import_file(filename)
expected_errors = {
"Task Group Task": {
"row_warnings": {
errors.WRONG_REQUIRED_VALUE.format(
line=38, value="aaaa", column_name="Task Type"
),
errors.MISSING_VALUE_WARNING.format(
line=39, default_value="Rich Text", column_name="Task Type"
),
errors.MISSING_VALUE_WARNING.format(
line=40, default_value="Rich Text", column_name="Task Type"
),
}
},
}
self._check_csv_response(response, expected_errors)
task_types = {
"text": [
"task-1",
"task-2",
"task-4",
"task-7",
"task-9",
"task-10",
"task-11",
],
"menu": [
"task-5",
"task-8",
],
"checkbox": [
"task-3",
"task-6",
],
}
for task_type, slugs in task_types.items():
self._test_task_types(task_type, slugs)
def test_bad_task_dates(self):
"""Test import updates with invalid task dates.
This import checks if it's possible to update task dates with start date
being bigger than the end date.
"""
self.import_file("workflow_small_sheet.csv")
response = self.import_file("workflow_bad_task_dates.csv")
expected_errors = {
"Task Group Task": {
"row_errors": {
errors.INVALID_START_END_DATES.format(
line=4, start_date="Start date", end_date="End date"),
errors.INVALID_START_END_DATES.format(
line=5, start_date="Start date", end_date="End date"),
errors.INVALID_START_END_DATES.format(
line=6, start_date="Start date", end_date="End date"),
errors.INVALID_START_END_DATES.format(
line=7, start_date="Start date", end_date="End date"),
}
},
}
self._check_csv_response(response, expected_errors)
def test_malformed_task_dates(self):
"""Test import updates with malformed task dates.
Check that the warnings for task dates in MM/DD/YYYY format of annually
workflow are shown up and YYYY part of date is ignored.
Raises:
AssertionError: When file import does not return correct warnings for the
example csv, or if any of the tasks does not have the expected
relative dates.
"""
response = self.import_file("workflow_malformed_task_dates.csv")
expected_errors = {
"Task Group Task": {
"row_warnings": {
errors.WRONG_DATE_FORMAT.format(line=15, column_name="Start"),
errors.WRONG_DATE_FORMAT.format(line=15, column_name="End"),
errors.WRONG_DATE_FORMAT.format(line=16, column_name="Start"),
errors.WRONG_DATE_FORMAT.format(line=17, column_name="End"),
},
},
}
self._check_csv_response(response, expected_errors)
task_slugs = ["t-1", "t-2", "t-3", "t-4"]
tasks = db.session.query(TaskGroupTask).filter(
TaskGroupTask.slug.in_(task_slugs)).all()
for task in tasks:
self.assertEqual(task.relative_start_month, 7)
self.assertEqual(task.relative_start_day, 10)
self.assertEqual(task.relative_end_month, 12)
self.assertEqual(task.relative_end_day, 30)
def _test_task_types(self, expected_type, task_slugs):
"""Test that all listed tasks have rich text type.
This is a part of the test_import_task_date_format
Args:
expected_type: Expected task type for all tasks specified by task_slugs.
task_slugs: list of slugs for the tasks that will be tested.
Raises:
AssertionError: if any of the tasks does not exists or if their type is
not text.
"""
tasks = db.session.query(TaskGroupTask).filter(
TaskGroupTask.slug.in_(task_slugs)).all()
for task in tasks:
self.assertEqual(
task.task_type,
expected_type,
"task '{}' has type '{}', expected '{}'".format(
task.slug,
task.task_type,
expected_type,
)
)
self.assertEqual(len(tasks), len(task_slugs))
|
{
"content_hash": "cc222138bacb4fae13652a6cbcf15e73",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 79,
"avg_line_length": 35.537848605577686,
"alnum_prop": 0.591255605381166,
"repo_name": "AleksNeStu/ggrc-core",
"id": "5123e434d8c0a220924ab759547fc4d0ccdcd9d7",
"size": "9034",
"binary": false,
"copies": "6",
"ref": "refs/heads/release/0.10-Raspberry",
"path": "test/integration/ggrc_workflows/converters/test_import_workflow_objects.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "221201"
},
{
"name": "HTML",
"bytes": "1055542"
},
{
"name": "JavaScript",
"bytes": "1872353"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2700938"
},
{
"name": "Shell",
"bytes": "31273"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, unicode_literals, absolute_import
from django import template
__author__ = 'peter'
register = template.Library()
from .jqm import *
from .bootstrap import *
|
{
"content_hash": "0300bd634a4c815057f8107104f82128",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 72,
"avg_line_length": 20,
"alnum_prop": 0.74,
"repo_name": "PeterHo/mysite",
"id": "ff20b3e859ad3acf2a34f38bd30a1acac0fad0cd",
"size": "215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model2html/templatetags/model2html.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "53762"
},
{
"name": "HTML",
"bytes": "35270"
},
{
"name": "JavaScript",
"bytes": "411445"
},
{
"name": "Python",
"bytes": "138911"
}
],
"symlink_target": ""
}
|
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
Builder.load_string('''
<SeedOptionsDialog@Popup>
id: popup
opt_bip39: False
opt_ext: False
is_bip39: False
is_ext: False
title: _('Seed Options')
size_hint: 0.8, 0.8
pos_hint: {'top':0.9}
BoxLayout:
orientation: 'vertical'
Label:
id: description
text: _('You may extend your seed with custom words')
halign: 'left'
text_size: self.width, None
size: self.texture_size
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Label:
text: _('Extend Seed')
opacity: 1 if root.opt_ext else 0
CheckBox:
id:ext
disabled: not root.opt_ext
opacity: 1 if root.opt_ext else 0
active: root.is_ext
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Label:
text: _('BIP39')
id:bip39_label
opacity: 1 if root.opt_bip39 else 0
CheckBox:
id:bip39
disabled: not root.opt_bip39
opacity: 1 if root.opt_bip39 else 0
active: root.is_bip39
Widget:
size_hint: 1, 0.1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Button:
text: 'Cancel'
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: 'OK'
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback(ext.active, bip39.active)
popup.dismiss()
''')
class SeedOptionsDialog(Factory.Popup):
def __init__(self, opt_ext, opt_bip39, is_ext, is_bip39, callback):
Factory.Popup.__init__(self)
self.opt_ext = opt_ext
self.opt_bip39 = opt_bip39
self.is_ext = is_ext
self.is_bip39 = is_bip39
self.callback = callback
|
{
"content_hash": "3a8e03fa86cc91fac379db3d32d88d8f",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 71,
"avg_line_length": 30.22972972972973,
"alnum_prop": 0.49530621367903443,
"repo_name": "spesmilo/electrum",
"id": "f244c6d8a94b57bcff264fb6a43fa9b9fa1a1ef4",
"size": "2237",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "electrum/gui/kivy/uix/dialogs/seed_options.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "13136"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "2185"
},
{
"name": "NSIS",
"bytes": "7681"
},
{
"name": "Python",
"bytes": "5400804"
},
{
"name": "QML",
"bytes": "355804"
},
{
"name": "Ruby",
"bytes": "16748"
},
{
"name": "Shell",
"bytes": "105118"
},
{
"name": "kvlang",
"bytes": "67438"
}
],
"symlink_target": ""
}
|
"""
hmmlearn
========
``hmmlearn`` is a set of algorithms for learning and inference of
Hidden Markov Models.
"""
try:
import setuptools_scm
__version__ = setuptools_scm.get_version( # xref setup.py
root="../..", relative_to=__file__,
version_scheme="post-release", local_scheme="node-and-date")
except (ImportError, LookupError):
try:
from ._version import version as __version__
except ImportError:
pass
|
{
"content_hash": "b4424a8de3c8948cb5701afb2b498c19",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 68,
"avg_line_length": 25.38888888888889,
"alnum_prop": 0.6323851203501094,
"repo_name": "hmmlearn/hmmlearn",
"id": "da9221d1055399bf0948bb7162c4759cced5c532",
"size": "457",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "lib/hmmlearn/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "11273"
},
{
"name": "Python",
"bytes": "180130"
}
],
"symlink_target": ""
}
|
"""An object that maps git ref names to bzr branch names. Note that it is not
used to map git ref names to bzr tag names."""
import re
class BranchMapper(object):
_GIT_TRUNK_RE = re.compile('(?:git-)*trunk')
def git_to_bzr(self, ref_name):
"""Map a git reference name to a Bazaar branch name.
"""
parts = ref_name.split('/')
if parts[0] == 'refs':
parts.pop(0)
category = parts.pop(0)
if category == 'heads':
git_name = '/'.join(parts)
bazaar_name = self._git_to_bzr_name(git_name)
else:
if category == 'remotes' and parts[0] == 'origin':
parts.pop(0)
git_name = '/'.join(parts)
if category.endswith('s'):
category = category[:-1]
name_no_ext = self._git_to_bzr_name(git_name)
bazaar_name = "%s.%s" % (name_no_ext, category)
return bazaar_name
def _git_to_bzr_name(self, git_name):
# Make a simple name more bzr-like, by mapping git 'master' to bzr 'trunk'.
# To avoid collision, map git 'trunk' to bzr 'git-trunk'. Likewise
# 'git-trunk' to 'git-git-trunk' and so on, such that the mapping is
# one-to-one in both directions.
if git_name == 'master':
bazaar_name = 'trunk'
elif self._GIT_TRUNK_RE.match(git_name):
bazaar_name = 'git-%s' % (git_name,)
else:
bazaar_name = git_name
return bazaar_name
|
{
"content_hash": "4f33ec64b0959dd77dab5bdc6ba31204",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 83,
"avg_line_length": 36,
"alnum_prop": 0.542989417989418,
"repo_name": "levibostian/launchpad-2-github",
"id": "047fb3f8ee05656ca0a8abb307b1a1e1760f3e4d",
"size": "2184",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bzr-fastimport/build/lib.linux-x86_64-2.7/bzrlib/plugins/fastimport/branch_mapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "634960"
}
],
"symlink_target": ""
}
|
import configparser
from os.path import expanduser
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
KFSERVING_TEST_NAMESPACE = "kfserving-ci-e2e-test"
gcp_testing_creds = '''ewogICAgImNsaWVudF9pZCI6ICI3NjA1MTg1MDY0MDgtNnFyNHA2Z3BpNmhuNTA2cH\
Q4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLAogICAgImNsaWVudF9zZWNyZXQiOiAi\
ZC1GTDk1UTE5cTdNUW1IRDBUeUZwZDdoIiwKICAgICJyZWZyZXNoX3Rva2VuIjogIjEvYnFZbWt4bkRieEVzdEcxMlh\
jbU9ack4wLWV5STNiZWFuSmJSZDRrcXM2ZyIsCiAgICAidHlwZSI6ICJhdXRob3JpemVkX3VzZXIiCn0K'''
def get_created_secret(secret_name):
return client.CoreV1Api().read_namespaced_secret(
name=secret_name,
namespace=KFSERVING_TEST_NAMESPACE
)
def get_created_sa(sa_name):
return client.CoreV1Api().read_namespaced_service_account(
name=sa_name,
namespace=KFSERVING_TEST_NAMESPACE
)
def delete_sa(sa_name):
return client.CoreV1Api().delete_namespaced_service_account( # pylint:disable=no-value-for-parameter
name=sa_name,
namespace=KFSERVING_TEST_NAMESPACE
)
def check_sa_exists(service_account):
'''Check if the specified service account existing.'''
sa_list = client.CoreV1Api().list_namespaced_service_account(namespace=KFSERVING_TEST_NAMESPACE)
sa_name_list = []
for item in range(0, len(sa_list.items) - 1):
sa_name_list.append(sa_list.items[item].metadata.name)
if service_account in sa_name_list:
return True
return False
def test_set_credentials_s3():
"""Test S3 credentials creating."""
kfserving = KFServingClient()
credentials_file = './credentials/aws_credentials'
# Test creating service account case.
sa_name = constants.DEFAULT_SA_NAME
if check_sa_exists(sa_name):
delete_sa(sa_name)
kfserving.set_credentials(storage_type='s3',
namespace=KFSERVING_TEST_NAMESPACE,
credentials_file=credentials_file,
s3_profile='default',
s3_endpoint='s3.us-west-2.amazonaws.com',
s3_region='us-west-2',
s3_use_https='1',
s3_verify_ssl='0')
sa_body = get_created_sa(sa_name)
created_secret_name = sa_body.secrets[0].name
created_secret = get_created_secret(created_secret_name)
config = configparser.ConfigParser()
config.read([expanduser(credentials_file)])
s3_access_key_id = config.get('default', 'aws_access_key_id')
s3_secret_access_key = config.get(
'default', 'aws_secret_access_key')
assert created_secret.data[constants.S3_ACCESS_KEY_ID_DEFAULT_NAME] == s3_access_key_id
assert created_secret.data[constants.S3_SECRET_ACCESS_KEY_DEFAULT_NAME] == s3_secret_access_key
assert created_secret.metadata.annotations[constants.KFSERVING_GROUP +
'/s3-endpoint'] == 's3.us-west-2.amazonaws.com'
assert created_secret.metadata.annotations[constants.KFSERVING_GROUP +
'/s3-region'] == 'us-west-2'
assert created_secret.metadata.annotations[constants.KFSERVING_GROUP +
'/s3-usehttps'] == '1'
assert created_secret.metadata.annotations[constants.KFSERVING_GROUP +
'/s3-verifyssl'] == '0'
def test_set_credentials_gcp():
'''Test GCP credentials creating'''
KFServing = KFServingClient()
sa_name = constants.DEFAULT_SA_NAME
KFServing.set_credentials(storage_type='gcs',
namespace=KFSERVING_TEST_NAMESPACE,
credentials_file='./credentials/gcp_credentials.json',
sa_name=sa_name)
created_sa = get_created_sa(sa_name)
created_secret_name = created_sa.secrets[0].name
created_secret = get_created_secret(created_secret_name)
assert created_secret.data[constants.GCS_CREDS_FILE_DEFAULT_NAME] == gcp_testing_creds
def test_azure_credentials():
'''Test Azure credentials creating'''
KFServing = KFServingClient()
sa_name = constants.DEFAULT_SA_NAME
KFServing.set_credentials(storage_type='Azure',
namespace=KFSERVING_TEST_NAMESPACE,
credentials_file='./credentials/azure_credentials.json',
sa_name=sa_name)
created_sa = get_created_sa(sa_name)
created_secret_name = created_sa.secrets[0].name
created_secret = get_created_secret(created_secret_name)
assert created_secret.data['AZ_CLIENT_ID'] == 'dXNlcgo='
assert created_secret.data['AZ_CLIENT_SECRET'] == 'cGFzc3dvcmQ='
assert created_secret.data['AZ_SUBSCRIPTION_ID'] == 'MzMzMzMzMzMtMzMzMy0zMzMzLTMzMzMtMzMzMzMz'
assert created_secret.data['AZ_TENANT_ID'] == 'MTIzNAo='
|
{
"content_hash": "de7ca010c506851eb8ef00f905bfac18",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 105,
"avg_line_length": 42.706896551724135,
"alnum_prop": 0.6522002422285023,
"repo_name": "kubeflow/kfserving-lts",
"id": "d90096e878609fdd356f77db34fe0ff86fd0ba5b",
"size": "5531",
"binary": false,
"copies": "1",
"ref": "refs/heads/release-0.6",
"path": "test/e2e/credentials/test_set_creds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "224"
},
{
"name": "Dockerfile",
"bytes": "10549"
},
{
"name": "Go",
"bytes": "1251102"
},
{
"name": "HTML",
"bytes": "17922"
},
{
"name": "JavaScript",
"bytes": "1828"
},
{
"name": "Jsonnet",
"bytes": "2434415"
},
{
"name": "Makefile",
"bytes": "16071"
},
{
"name": "Python",
"bytes": "1860674"
},
{
"name": "SCSS",
"bytes": "1789"
},
{
"name": "Shell",
"bytes": "36788"
},
{
"name": "TypeScript",
"bytes": "78886"
}
],
"symlink_target": ""
}
|
"""Server trace events.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import enum # pylint: disable=wrong-import-order
import logging
from .. import _events
_LOGGER = logging.getLogger(__name__)
class ServerTraceEvent(_events.TraceEvent):
"""Parent class of all server trace events.
Contains the basic attributes of all events as well as the factory method
`from_data` that instanciate an event object from its data representation.
All server event classes must derive from this class.
"""
__slots__ = (
'event_type',
'timestamp',
'source',
'servername',
'payload',
)
def __init__(self,
timestamp=None, source=None, servername=None, payload=None):
self.event_type = ServerTraceEventTypes(self.__class__).name
if timestamp is None:
self.timestamp = None
else:
self.timestamp = float(timestamp)
self.source = source
self.payload = payload
self.servername = servername
@property
@abc.abstractmethod
def event_data(self):
"""Return an event's event_data.
"""
@classmethod
def _class_from_type(cls, event_type):
"""Return the class for a given event_type.
"""
etype = getattr(ServerTraceEventTypes, event_type, None)
if etype is None:
_LOGGER.warning('Unknown event type %r', event_type)
return None
eclass = etype.value
return eclass
@classmethod
def from_data(cls, timestamp, source, servername, event_type, event_data,
payload=None):
"""Intantiate an event from given event data.
"""
eclass = cls._class_from_type(event_type)
if eclass is None:
return None
try:
event = eclass.from_data(
timestamp=timestamp,
source=source,
servername=servername,
event_type=event_type,
event_data=event_data,
payload=payload
)
except Exception: # pylint: disable=broad-except
_LOGGER.warning('Failed to parse event type %r:', event_type,
exc_info=True)
event = None
return event
def to_data(self):
"""Return a 6 tuple represtation of an event.
"""
event_data = self.event_data
if event_data is None:
event_data = ''
return (
self.timestamp,
self.source,
self.servername,
self.event_type,
event_data,
self.payload
)
@classmethod
def from_dict(cls, event_data):
"""Instantiate an event from a dict of its data.
"""
event_type = event_data.pop('event_type')
eclass = cls._class_from_type(event_type)
if eclass is None:
return None
try:
event = eclass(**event_data)
except Exception: # pylint: disable=broad-except
_LOGGER.warning('Failed to instanciate event type %r:', event_type,
exc_info=True)
event = None
return event
def to_dict(self):
"""Return a dictionary representation of an event.
"""
return {
k: getattr(self, k)
for k in super(self.__class__, self).__slots__ + self.__slots__
}
class ServerStateTraceEvent(ServerTraceEvent):
"""Event emitted when server state changes.
"""
__slots__ = (
'state',
)
def __init__(self, state,
timestamp=None, source=None, servername=None, payload=None):
super(ServerStateTraceEvent, self).__init__(
timestamp=timestamp,
source=source,
servername=servername,
payload=payload
)
self.state = state
@classmethod
def from_data(cls, timestamp, source, servername, event_type, event_data,
payload=None):
assert cls == getattr(ServerTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
servername=servername,
payload=payload,
state=event_data
)
@property
def event_data(self):
return self.state
class ServerBlackoutTraceEvent(ServerTraceEvent):
"""Event emitted when server is blackedout.
"""
__slots__ = (
)
@classmethod
def from_data(cls, timestamp, source, servername, event_type, event_data,
payload=None):
assert cls == getattr(ServerTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
servername=servername,
payload=payload
)
@property
def event_data(self):
pass
class ServerBlackoutClearedTraceEvent(ServerTraceEvent):
"""Event emitted when server blackout is cleared.
"""
__slots__ = (
)
@classmethod
def from_data(cls, timestamp, source, servername, event_type, event_data,
payload=None):
assert cls == getattr(ServerTraceEventTypes, event_type).value
return cls(
timestamp=timestamp,
source=source,
servername=servername,
payload=payload
)
@property
def event_data(self):
pass
class ServerTraceEventTypes(enum.Enum):
"""Enumeration of all server event type names.
"""
server_state = ServerStateTraceEvent
server_blackout = ServerBlackoutTraceEvent
server_blackout_cleared = ServerBlackoutClearedTraceEvent
class ServerTraceEventHandler(_events.TraceEventHandler):
"""Base class for processing server trace events.
"""
DISPATCH = {
ServerStateTraceEvent:
lambda self, event: self.on_server_state(
when=event.timestamp,
servername=event.servername,
state=event.state
),
ServerBlackoutTraceEvent:
lambda self, event: self.on_server_blackout(
when=event.timestamp,
servername=event.servername
),
ServerBlackoutClearedTraceEvent:
lambda self, event: self.on_server_blackout_cleared(
when=event.timestamp,
servername=event.servername
),
}
def dispatch(self, event):
"""Dispatch event to one of the handler methods.
"""
return self.DISPATCH.get(type(event), None)
@abc.abstractmethod
def on_server_state(self, when, servername, state):
"""Invoked when server state changes.
"""
@abc.abstractmethod
def on_server_blackout(self, when, servername):
"""Invoked when server is blackedout.
"""
@abc.abstractmethod
def on_server_blackout_cleared(self, when, servername):
"""Invoked when server blackout is cleared.
"""
|
{
"content_hash": "851ffc0631c436161cdd9f0210ac7d78",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 79,
"avg_line_length": 27.427480916030536,
"alnum_prop": 0.5754244364041191,
"repo_name": "ceache/treadmill",
"id": "3c0f1dd308b4da1ed5e5e044b109325ddf1acd5e",
"size": "7186",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/trace/server/events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3750"
},
{
"name": "Python",
"bytes": "3362298"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "51646"
}
],
"symlink_target": ""
}
|
"""This module contains a Google Cloud Video Intelligence Hook."""
from __future__ import annotations
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.videointelligence_v1 import VideoIntelligenceServiceClient
from google.cloud.videointelligence_v1.types import VideoContext
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudVideoIntelligenceHook(GoogleBaseHook):
"""
Hook for Google Cloud Video Intelligence APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._conn = None
def get_conn(self) -> VideoIntelligenceServiceClient:
"""
Returns Gcp Video Intelligence Service client
:rtype: google.cloud.videointelligence_v1.VideoIntelligenceServiceClient
"""
if not self._conn:
self._conn = VideoIntelligenceServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO
)
return self._conn
@GoogleBaseHook.quota_retry()
def annotate_video(
self,
input_uri: str | None = None,
input_content: bytes | None = None,
features: list[VideoIntelligenceServiceClient.enums.Feature] | None = None,
video_context: dict | VideoContext = None,
output_uri: str | None = None,
location: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Performs video annotation.
:param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported,
which must be specified in the following format: ``gs://bucket-id/object-id``.
:param input_content: The video data bytes.
If unset, the input video(s) should be specified via ``input_uri``.
If set, ``input_uri`` should be unset.
:param features: Requested video annotation features.
:param output_uri: Optional, location where the output (in JSON format) should be stored. Currently,
only Google Cloud Storage URIs are supported, which must be specified in the following format:
``gs://bucket-id/object-id``.
:param video_context: Optional, Additional video context and/or feature-specific parameters.
:param location: Optional, cloud region where annotation should take place. Supported cloud regions:
us-east1, us-west1, europe-west1, asia-east1.
If no region is specified, a region will be determined based on video file location.
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:param timeout: Optional, The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:param metadata: Optional, Additional metadata that is provided to the method.
"""
client = self.get_conn()
return client.annotate_video(
input_uri=input_uri,
input_content=input_content,
features=features,
video_context=video_context,
output_uri=output_uri,
location_id=location,
retry=retry,
timeout=timeout,
metadata=metadata,
)
|
{
"content_hash": "22977d75a69abaa5707fa00d281df6a8",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 108,
"avg_line_length": 46.30555555555556,
"alnum_prop": 0.6716656668666267,
"repo_name": "cfei18/incubator-airflow",
"id": "9c7f8847f34405cce539e2bb4d90baf8f985219f",
"size": "5788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/providers/google/cloud/hooks/video_intelligence.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
}
|
"""
This is your project's main settings file that can be committed to your
repo. If you need to override a setting locally, use local.py
"""
import os
import logging
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return os.environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
# Your project root
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../../")
SUPPORTED_NONLOCALES = ['media', 'admin', 'static']
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# Defines the views served for root URLs.
ROOT_URLCONF = '{{ project_name }}.urls'
# Application definition
INSTALLED_APPS = (
# Django contrib apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.markup',
'django.contrib.humanize',
'django.contrib.syndication',
'django.contrib.staticfiles',
# Third-party apps, patches, fixes
'djcelery',
'debug_toolbar',
'compressor',
#'debug_toolbar_user_panel',
# Database migrations
'south',
# Application base, containing global templates.
'base',
# Local apps, referenced via appname
)
# Place bcrypt first in the list, so it will be the default password hashing
# mechanism
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
# Sessions
#
# By default, be at least somewhat secure with our session cookies.
SESSION_COOKIE_HTTPONLY = True
# Set this to true if you are using https
SESSION_COOKIE_SECURE = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.example.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.example.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL prefix for static files.
# Example: "http://media.example.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.i18n',
'django.core.context_processors.static',
'django.core.context_processors.csrf',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
]
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
def custom_show_toolbar(request):
""" Only show the debug toolbar to users with the superuser flag. """
return request.user.is_superuser
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': custom_show_toolbar,
'HIDE_DJANGO_SQL': True,
'TAG': 'body',
'SHOW_TEMPLATE_CONTEXT': True,
'ENABLE_STACKTRACES': True,
}
DEBUG_TOOLBAR_PANELS = (
#'debug_toolbar_user_panel.panels.UserPanel',
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
# Specify a custom user model to use
#AUTH_USER_MODEL = 'accounts.MyUser'
FILE_UPLOAD_PERMISSIONS = 0664
# The WSGI Application to use for runserver
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Define your database connections
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
#'OPTIONS': {
# 'init_command': 'SET storage_engine=InnoDB',
# 'charset' : 'utf8',
# 'use_unicode' : True,
#},
#'TEST_CHARSET': 'utf8',
#'TEST_COLLATION': 'utf8_general_ci',
},
# 'slave': {
# ...
# },
}
# Uncomment this and set to all slave DBs in use on the site.
# SLAVE_DATABASES = ['slave']
# Recipients of traceback emails and other notifications.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# SECURITY WARNING: don't run with debug turned on in production!
# Debugging displays nice error messages, but leaks memory. Set this to False
# on all server instances and True only for development.
DEBUG = TEMPLATE_DEBUG = False
# Is this a development instance? Set this to True on development/master
# instances and False on stage/prod.
DEV = False
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# SECURITY WARNING: keep the secret key used in production secret!
# Hardcoded values can leak through source control.
# This is an example method of getting the value from an environment setting.
# Uncomment to use, and then make sure you set the SECRET_KEY environment variable.
# This is good to use in production, and on services that support it such as Heroku.
#SECRET_KEY = get_env_setting('SECRET_KEY')
# Uncomment these to activate and customize Celery:
# CELERY_ALWAYS_EAGER = False # required to activate celeryd
# BROKER_HOST = 'localhost'
# BROKER_PORT = 5672
# BROKER_USER = 'django'
# BROKER_PASSWORD = 'django'
# BROKER_VHOST = 'django'
# CELERY_RESULT_BACKEND = 'amqp'
INTERNAL_IPS = ('127.0.0.1')
# Enable these options for memcached
#CACHE_BACKEND= "memcached://127.0.0.1:11211/"
#CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True
# Set this to true if you use a proxy that sets X-Forwarded-Host
#USE_X_FORWARDED_HOST = False
SERVER_EMAIL = "webmaster@example.com"
DEFAULT_FROM_EMAIL = "webmaster@example.com"
SYSTEM_EMAIL_PREFIX = "[{{ project_name }}]"
## Log settings
LOG_LEVEL = logging.INFO
HAS_SYSLOG = True
SYSLOG_TAG = "http_app_{{ project_name }}" # Make this unique to your project.
# Remove this configuration variable to use your custom logging configuration
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'loggers': {
'{{ project_name }}': {
'level': "DEBUG"
}
}
}
# Common Event Format logging parameters
#CEF_PRODUCT = '{{ project_name }}'
#CEF_VENDOR = 'Your Company'
#CEF_VERSION = '0'
#CEF_DEVICE_VERSION = '0'
|
{
"content_hash": "ceb4cf5ccecbf8c79454792e026be16d",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 84,
"avg_line_length": 31.9375,
"alnum_prop": 0.7116077865897621,
"repo_name": "michaelBenin/django-base-template",
"id": "8be75a06a384eba8ddf1b6065ad6d4a237e78867",
"size": "9709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_name/settings/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import
import unittest
from pysasl import SASLAuth
from pysasl.creds.client import ClientCredentials
from pysasl.creds.external import (ExternalVerificationRequired,
ExternalCredentials)
from pysasl.exception import InvalidResponse, UnexpectedChallenge
from pysasl.identity import ClearIdentity
from pysasl.mechanism import ServerChallenge, ChallengeResponse
from pysasl.mechanism.oauth import OAuth2Mechanism
class TestOAuth2Mechanism(unittest.TestCase):
def setUp(self) -> None:
self.mech = OAuth2Mechanism()
def test_availability(self) -> None:
sasl = SASLAuth.defaults()
self.assertIsNone(sasl.get_server(b'XOAUTH2'))
self.assertIsNone(sasl.get_client(b'XOAUTH2'))
sasl = SASLAuth.named([b'XOAUTH2'])
self.assertEqual(self.mech, sasl.get_server(b'XOAUTH2'))
self.assertEqual(self.mech, sasl.get_client(b'XOAUTH2'))
sasl = SASLAuth([self.mech])
self.assertEqual([self.mech], sasl.client_mechanisms)
self.assertEqual([self.mech], sasl.server_mechanisms)
def test_server_attempt_issues_challenge(self) -> None:
try:
self.mech.server_attempt([])
except ServerChallenge as exc:
self.assertEqual(b'', exc.data)
else:
self.fail('ServerChallenge not raised')
def test_server_attempt_bad_response(self) -> None:
self.assertRaises(InvalidResponse,
self.mech.server_attempt,
[ChallengeResponse(b'', b'abcdefghi')])
def test_server_attempt_successful(self) -> None:
result, final = self.mech.server_attempt([ChallengeResponse(
b'', b'user=testuser\x01auth=Bearer testtoken\x01\x01')])
self.assertIsNone(final)
self.assertIsInstance(result, ExternalCredentials)
self.assertEqual('', result.authcid)
self.assertEqual('testuser', result.authzid)
with self.assertRaises(ExternalVerificationRequired) as exc:
result.verify(ClearIdentity('testuser', 'secret'))
self.assertEqual('testtoken', exc.exception.token)
with self.assertRaises(ExternalVerificationRequired) as exc:
result.verify(None)
self.assertEqual('testtoken', exc.exception.token)
def test_client_attempt(self) -> None:
creds = ClientCredentials('testuser', 'testtoken')
resp1 = self.mech.client_attempt(creds, [])
self.assertEqual(b'user=testuser\x01auth=Bearer testtoken\x01\x01',
resp1.response)
resp2 = self.mech.client_attempt(creds, [ServerChallenge(b'')])
self.assertEqual(b'user=testuser\x01auth=Bearer testtoken\x01\x01',
resp2.response)
self.assertRaises(UnexpectedChallenge,
self.mech.client_attempt,
creds, [ServerChallenge(b'')]*2)
def test_client_attempt_error(self) -> None:
creds = ClientCredentials('testuser', 'testtoken')
resp1 = self.mech.client_attempt(creds, [])
self.assertEqual(b'user=testuser\x01auth=Bearer testtoken\x01\x01',
resp1.response)
resp2 = self.mech.client_attempt(creds, [
ServerChallenge(b'{"status":"401","schemes":"bearer mac",'
b'"scope":"https://mail.google.com/"}\n')])
self.assertEqual(b'', resp2.response)
|
{
"content_hash": "c0b16d99451d4fae5c0e1fe05ccbd2c2",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 75,
"avg_line_length": 44.48717948717949,
"alnum_prop": 0.6446685878962536,
"repo_name": "icgood/pysasl",
"id": "3b7268a87a5aaf5b1a8b79b037340e585a8a5328",
"size": "3471",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/test_oauth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71258"
}
],
"symlink_target": ""
}
|
import requests
import defaults
class address_test():
def __init__(self, user_id, base_url):
self.request = None
self.user_id = user_id
self.base_url = base_url
def usps_address_verify(self, user_id, base_url, api='Verify'):
url = '%s?API=%s&XML=<AddressValidateRequest USERID="%s">\
<Address>\
<Address1></Address1>\
<Address2>6406 Ivy Lane</Address2>\
<City>Greenbelt</City>\
<State>MD</State> \
<Zip5></Zip5>\
<Zip4></Zip4>\
</Address>\
</AddressValidateRequest>' % (base_url, api, user_id)
self.request = requests.get(url)
def usps_zipcode_lookup(self, user_id, base_url, api='ZipCodeLookup'):
url = '%s?API=%s&XML=<ZipCodeLookupRequest USERID="%s">\
<Address>\
<Address1></Address1>\
<Address2>6406 Ivy Lane</Address2>\
<City>Greenbelt</City>\
<State>MD</State> \
</Address>\
</ZipCodeLookupRequest>' % (base_url, api, user_id)
self.request = requests.get(url)
def usps_city_state_lookup(self, user_id, base_url, api='CityStateLookup'):
url = '%s?API=%s&XML=<CityStateLookupRequest USERID="%s">\
<ZipCode ID= "0">\
<Zip5>90210</Zip5>\
</ZipCode>\
</CityStateLookupRequest>' % (base_url, api, user_id)
self.request = requests.get(url)
def run_tests(self):
self.usps_address_verify(self.user_id, self.base_url)
print 'ADDRESS_VERIFY : %s' % self.request.text
self.usps_zipcode_lookup(self.user_id, self.base_url)
print 'ZIPCODE_LOOKUP : %s' % self.request.text
self.usps_city_state_lookup(self.user_id, self.base_url)
print 'CITY_STATE_LOOKUP : %s' % self.request.text
class tracking_test():
def __init__(self, user_id, base_url):
self.request = None
self.user_id = user_id
self.base_url = base_url
def tracking_test(self, user_id, base_url, api='TrackV2'):
url = '%s?API=%s&XML=<TrackRequest USERID="%s">\
<TrackID ID="EJ958083578US"></TrackID>\
</TrackRequest>' % (base_url, api, user_id)
self.request = requests.get(url)
def run_tests(self):
self.tracking_test(self.user_id, self.base_url)
print 'TACKING_TEST : %s' % self.request.text
class shipping_test():
def __init__(self, user_id, base_url):
self.request = None
self.base_url = base_url
self.user_id = user_id
def domestic_shipping_test(self, user_id, base_url, api='RateV4'):
url = '%s?API=%s&XML=<RateV4Request USERID="%s" >\
<Revision/>\
<Package ID="1ST">\
<Service>PRIORITY</Service>\
<ZipOrigination>44106</ZipOrigination>\
<ZipDestination>20770</ZipDestination>\
<Pounds>1</Pounds>\
<Ounces>8</Ounces>\
<Container>NONRECTANGULAR</Container>\
<Size>LARGE</Size>\
<Width>15</Width>\
<Length>30</Length>\
<Height>15</Height>\
<Girth>55</Girth>\
</Package>\
</RateV4Request>' % (base_url, api, user_id)
self.request = requests.get(url)
def inernational_shipping_test(self, user_id, base_url, api='IntlRateV2'):
url = '%s?API=%s&XML=<IntlRateV2Request USERID="%s">\
<Revision>2</Revision> \
<Package>\
<Pounds>69</Pounds>\
<Ounces>0</Ounces> \
<Machinable>True</Machinable>\
<MailType>all</MailType>\
<GXG>\
<POBoxFlag>N</POBoxFlag>\
<GiftFlag>N</GiftFlag>\
</GXG>\
<ValueOfContents>100.00</ValueOfContents>\
<Country>canada</Country>\
<Container>RECTANGULAR</Container>\
<Size>Regular</Size>\
<Width>10</Width>\
<Length>10</Length>\
<Height>11</Height>\
<Girth></Girth>\
<CommercialFlag>y</CommercialFlag>\
</Package>\
</IntlRateV2Request>' % (base_url, api, user_id)
self.request = requests.get(url)
def run_tests(self):
self.domestic_shipping_test(self.user_id, self.base_url)
print 'SHIPPING_TEST : %s' % self.request.text
self.inernational_shipping_test(self.user_id, self.base_url)
print 'INTERNATIONAL_TEST : %s' % self.request.text
def run_all_tests(user_id):
address = address_test(user_id, defaults.USPS_TEST_URL)
address.run_tests()
tracking = tracking_test(user_id, defaults.USPS_TEST_URL)
tracking.run_tests()
shipping = shipping_test(user_id, defaults.USPS_TEST_URL)
shipping.run_tests()
|
{
"content_hash": "06dae4461ddf2d959dd194100f918e63",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 76,
"avg_line_length": 27.89864864864865,
"alnum_prop": 0.6500363284088156,
"repo_name": "luxnovalabs/pyUspsLib",
"id": "4d938132262c553507acbdf34fbe2a0fcc34cc5a",
"size": "4129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyuspslib/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14568"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys, os.path as _coconut_os_path
_coconut_file_path = _coconut_os_path.dirname(_coconut_os_path.abspath(__file__))
_coconut_sys.path.insert(0, _coconut_file_path)
from __coconut__ import _coconut, _coconut_MatchError, _coconut_tail_call, _coconut_tco, _coconut_igetitem, _coconut_compose, _coconut_pipe, _coconut_starpipe, _coconut_backpipe, _coconut_backstarpipe, _coconut_bool_and, _coconut_bool_or, _coconut_minus, _coconut_map, _coconut_partial
from __coconut__ import *
_coconut_sys.path.remove(_coconut_file_path)
# Compiled Coconut: ------------------------------------------------------
from .base_class import Base
from tfinterface.decorators import return_self
import tensorflow as tf
from abc import abstractmethod
class PlaceholderDefaults(_coconut.collections.namedtuple("PlaceholderDefaults", "tensor predict fit"), _coconut.object):
__slots__ = ()
__ne__ = _coconut.object.__ne__
class NoValue(_coconut.collections.namedtuple("NoValue", ""), _coconut.object):
__slots__ = ()
__ne__ = _coconut.object.__ne__
class NoValueException(Exception):
pass
def fit_tuple(*_coconut_match_to_args, **_coconut_match_to_kwargs):
_coconut_match_check = False
if (_coconut.len(_coconut_match_to_args) == 1) and (_coconut.isinstance(_coconut_match_to_args[0], PlaceholderDefaults)) and (_coconut.len(_coconut_match_to_args[0]) == 3):
tensor = _coconut_match_to_args[0][0]
fit = _coconut_match_to_args[0][2]
if (not _coconut_match_to_kwargs):
_coconut_match_check = True
if not _coconut_match_check:
_coconut_match_err = _coconut_MatchError("pattern-matching failed for " "'def fit_tuple(PlaceholderDefaults(tensor, _, fit)):'" " in " + _coconut.repr(_coconut.repr(_coconut_match_to_args)))
_coconut_match_err.pattern = 'def fit_tuple(PlaceholderDefaults(tensor, _, fit)):'
_coconut_match_err.value = _coconut_match_to_args
raise _coconut_match_err
if isinstance(fit, NoValue):
raise NoValueException("No fit value given for {}".format(tensor))
return tensor, fit
def predict_tuple(*_coconut_match_to_args, **_coconut_match_to_kwargs):
_coconut_match_check = False
if (_coconut.len(_coconut_match_to_args) == 1) and (_coconut.isinstance(_coconut_match_to_args[0], PlaceholderDefaults)) and (_coconut.len(_coconut_match_to_args[0]) == 3):
tensor = _coconut_match_to_args[0][0]
predict = _coconut_match_to_args[0][1]
if (not _coconut_match_to_kwargs):
_coconut_match_check = True
if not _coconut_match_check:
_coconut_match_err = _coconut_MatchError("pattern-matching failed for " "'def predict_tuple(PlaceholderDefaults(tensor, predict, _)):'" " in " + _coconut.repr(_coconut.repr(_coconut_match_to_args)))
_coconut_match_err.pattern = 'def predict_tuple(PlaceholderDefaults(tensor, predict, _)):'
_coconut_match_err.value = _coconut_match_to_args
raise _coconut_match_err
if isinstance(predict, NoValue):
raise NoValueException("No predict value given for {}".format(tensor))
return tensor, predict
class Inputs(Base):
@abstractmethod
def fit_feed(self, *args, **kwargs):
pass
@abstractmethod
def predict_feed(self, *args, **kwargs):
pass
class GeneralInputs(Inputs):
"""docstring for GeneralInputs."""
def __init__(self, name, graph=None, sess=None, **input_specs):
super(GeneralInputs, self).__init__(name, graph=graph, sess=sess)
self._input_specs = input_specs
@return_self
def build_tensors(self, **input_overrides):
input_specs = self._input_specs.copy()
input_specs.update(input_overrides)
self._placeholder_defaults = {}
for name, spec in input_specs.items():
if type(spec) is not dict:
if type(spec) is tuple:
spec = dict(dtype=tf.float32, shape=spec)
elif hasattr(spec, "__call__"):
spec = dict(tensor_fn=spec)
else:
spec = dict(value=spec)
if "shape" in spec:
dtype = spec.get("dtype", tf.float32)
shape = spec.get("shape")
tensor = tf.placeholder(dtype=dtype, shape=shape, name=name)
self._placeholder_defaults[name] = PlaceholderDefaults(tensor, spec.get("predict", NoValue()), spec.get("fit", NoValue()))
elif "value" in spec:
value = spec.get("value")
dtype = spec.get("dtype", None)
tensor = tf.convert_to_tensor(value, dtype=dtype, name=name)
elif "tensor_fn" in spec:
tensor_fn = spec.get("tensor_fn")
tensor = tensor_fn()
setattr(self, name, tensor)
def get_feed(self, **kwargs):
return (dict(((getattr(self, key)), (value)) for key, value in kwargs.items()))
def _get_fit_defaults(self):
feed = {}
for name, placeholder_defaults in self._placeholder_defaults.items():
try:
tensor, value = fit_tuple(placeholder_defaults)
feed[tensor] = value
except NoValueException as e:
pass
return feed
def _get_predict_defaults(self):
feed = {}
for name, placeholder_defaults in self._placeholder_defaults.items():
try:
tensor, value = predict_tuple(placeholder_defaults)
feed[tensor] = value
except NoValueException as e:
pass
return feed
def fit_feed(self, *args, **kwargs):
feed = self._get_fit_defaults()
feed.update(self.get_feed(*args, **kwargs))
return feed
def predict_feed(self, *args, **kwargs):
feed = self._get_predict_defaults()
feed.update(self.get_feed(*args, **kwargs))
return feed
|
{
"content_hash": "c796f8f14d57d16365fe8c6ade4b784b",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 285,
"avg_line_length": 38.21518987341772,
"alnum_prop": 0.6142762504140444,
"repo_name": "cgarciae/tfinterface",
"id": "a565ece8eb46e4c84f638a5784ee7c394de603d8",
"size": "6252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfinterface/base/inputs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "190163"
},
{
"name": "Shell",
"bytes": "1393"
}
],
"symlink_target": ""
}
|
import twitter
import json
import requests
from pprint import pprint
import hackupc.settings as settings
from bestin.utils import convert_degs_to_decimal
from bestin.models import Activity
from bestin.process import content_analyzer
from instagram_scrabber import get_self_media
def analyze_feed(user):
if user.social_auth.get().provider == 'instagram':
process_instagram(user)
if user.social_auth.get().provider == 'twitter':
process_twitter(user)
def get_score(intput_text):
return content_analyzer().process_text(intput_text)
def process_instagram(user):
token = user.social_auth.get().extra_data["access_token"]
posts = get_self_media(token)
charity_twitts = []
geo_tagged = []
for post in posts:
score = get_score(post["text"])
print(score)
if score > 0:
geo = [post["lng"], post["lat"]]
post_id =int(post["id"].split('_')[0])%1000000
if geo[0] is not None:
print(geo)
geo_str = str(geo)
try:
activity = Activity.objects.get(social_status_id=post_id)
except Activity.DoesNotExist:
charity_twitts.append(Activity.create(social_status_id=post_id, user_id=user, source=post["text"],
score=score, geodata=geo_str))
if geo is not None:
geo_tagged.append({"UserID": user.id, "text": post["text"].replace('#', ' '),
"score": score, "lat": geo[1], "lon": geo[0]})
Activity.objects.bulk_create(charity_twitts)
adds = []
print(geo_tagged)
for tagged in geo_tagged:
adds.append({"geometry": {"x": 1.1*tagged["lon"]*10**5, "y": 1.2225*tagged["lat"]*10**5}, "attributes": tagged})
print(adds)
print(requests.post('https://services7.arcgis.com/0MAMn0h8N3f8X276/arcgis/rest/services/Social_Activity/FeatureServer/applyEdits?f=pjson&edits='+json.dumps([{"id": 0, "adds": adds}]),
headers={'Content-type': 'application/json', 'Accept': 'text/plain'}).text)
def process_twitter(user):
tokens = user.social_auth.get().extra_data["access_token"]
oauth_key = tokens["oauth_token"]
oauth_secret = tokens["oauth_token_secret"]
api = twitter.Api(consumer_key=settings.SOCIAL_AUTH_TWITTER_KEY,
consumer_secret=settings.SOCIAL_AUTH_TWITTER_SECRET,
access_token_key=oauth_key,
access_token_secret=oauth_secret)
statuses = api.GetUserTimeline(include_rts=False, count=20)
charity_twitts = []
geo_tagged = []
for status in statuses:
score = get_score(status.text)
if score > 0:
geo = status.coordinates
place = status.place
if geo is not None:
geo = geo["coordinates"]
elif place is not None:
place = place["full_name"]
responce = requests.get('http://maps.google.com/maps/api/geocode/json?address='+place)
location = json.loads(responce.text)["results"][0]["geometry"]["location"]
geo = [location["lng"], location["lat"]]
geo_str = str(geo)
status_id = status.id % 1000000
try:
activity = Activity.objects.get(social_status_id=status_id)
except Activity.DoesNotExist:
charity_twitts.append(Activity.create(social_status_id=status_id, user_id=user, source=status.text,
score=score, geodata=geo_str))
if geo is not None:
geo_tagged.append({"UserID": user.id, "text": status.text.replace('#', ' '),
"score": score, "lat": geo[1], "lon": geo[0]})
Activity.objects.bulk_create(charity_twitts)
adds = []
print(geo_tagged)
for tagged in geo_tagged:
adds.append({"geometry": {"x": 1.1*tagged["lon"]*10**5, "y": 1.2225*tagged["lat"]*10**5}, "attributes": tagged})
print(adds)
print(requests.post('https://services7.arcgis.com/0MAMn0h8N3f8X276/arcgis/rest/services/Social_Activity/FeatureServer/applyEdits?f=pjson&edits='+json.dumps([{"id": 0, "adds": adds}]),
headers={'Content-type': 'application/json', 'Accept': 'text/plain'}).text)
#print(api.GetStatus(status_id='837968179766886401'))
|
{
"content_hash": "7975985df78d7d02ec6e71d25acc59d7",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 187,
"avg_line_length": 46.82795698924731,
"alnum_prop": 0.5970149253731343,
"repo_name": "IKholopov/HackUPC2017",
"id": "bd1e2a1206dc804fec911b0cd061c6def251deb9",
"size": "4355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hackupc/bestin/services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63043"
},
{
"name": "HTML",
"bytes": "40996"
},
{
"name": "JavaScript",
"bytes": "272171"
},
{
"name": "Python",
"bytes": "40280"
}
],
"symlink_target": ""
}
|
import json
import httpretty
import mock
import requests.exceptions
from testfixtures import log_capture
from analyticsclient.constants import data_format
from analyticsclient.client import Client
from analyticsclient.exceptions import ClientError, TimeoutError
from analyticsclient.tests import ClientTestCase
class ClientTests(ClientTestCase):
def setUp(self):
super(ClientTests, self).setUp()
httpretty.enable()
self.test_endpoint = 'test'
self.test_url = self.get_api_url(self.test_endpoint)
def tearDown(self):
httpretty.disable()
httpretty.reset()
def test_date_format(self):
self.assertEqual(Client.DATE_FORMAT, '%Y-%m-%d')
self.assertEqual(Client('').DATE_FORMAT, '%Y-%m-%d')
def test_has_resource(self):
httpretty.register_uri(httpretty.GET, self.test_url, body='')
self.assertEquals(self.client.has_resource(self.test_endpoint), True)
def test_missing_resource(self):
httpretty.register_uri(httpretty.GET, self.test_url, body='', status=404)
self.assertEquals(self.client.has_resource(self.test_endpoint), False)
def test_failed_authentication(self):
client = Client(base_url=self.api_url, auth_token='atoken')
httpretty.register_uri(httpretty.GET, self.test_url, body='', status=401)
self.assertEquals(client.has_resource(self.test_endpoint), False)
self.assertEquals(httpretty.last_request().headers['Authorization'], 'Token atoken')
def test_get(self):
data = {'foo': 'bar'}
httpretty.register_uri(httpretty.GET, self.test_url, body=json.dumps(data))
self.assertEquals(self.client.get(self.test_endpoint), data)
def test_get_invalid_response_body(self):
""" Verify that client raises a ClientError if the response body cannot be properly parsed. """
data = {'foo': 'bar'}
httpretty.register_uri(httpretty.GET, self.test_url, body=json.dumps(data)[:6])
with self.assertRaises(ClientError):
self.client.get(self.test_endpoint)
def test_strip_trailing_slash(self):
url = 'http://example.com'
client = Client(url)
self.assertEqual(client.base_url, url)
url_with_slash = 'http://example.com/'
client = Client(url_with_slash)
self.assertEqual(client.base_url, url)
# pylint: disable=protected-access
@mock.patch('requests.get', side_effect=requests.exceptions.Timeout)
@log_capture()
def test_request_timeout(self, mock_get, lc):
url = self.test_url
timeout = None
headers = {'Accept': 'application/json'}
self.assertRaises(TimeoutError, self.client._request, self.test_endpoint, timeout=timeout)
msg = 'Response from {0} exceeded timeout of {1}s.'.format(self.test_endpoint, self.client.timeout)
lc.check(('analyticsclient.client', 'ERROR', msg))
lc.clear()
mock_get.assert_called_once_with(url, headers=headers, timeout=self.client.timeout)
mock_get.reset_mock()
timeout = 10
self.assertRaises(TimeoutError, self.client._request, self.test_endpoint, timeout=timeout)
mock_get.assert_called_once_with(url, headers=headers, timeout=timeout)
msg = 'Response from {0} exceeded timeout of {1}s.'.format(self.test_endpoint, timeout)
lc.check(('analyticsclient.client', 'ERROR', msg))
def test_request_format(self):
httpretty.register_uri(httpretty.GET, self.test_url, body='{}')
response = self.client.get(self.test_endpoint)
self.assertEquals(httpretty.last_request().headers['Accept'], 'application/json')
self.assertDictEqual(response, {})
httpretty.register_uri(httpretty.GET, self.test_url, body='not-json')
response = self.client.get(self.test_endpoint, data_format=data_format.CSV)
self.assertEquals(httpretty.last_request().headers['Accept'], 'text/csv')
self.assertEqual(response, 'not-json')
httpretty.register_uri(httpretty.GET, self.test_url, body='{}')
response = self.client.get(self.test_endpoint, data_format=data_format.JSON)
self.assertEquals(httpretty.last_request().headers['Accept'], 'application/json')
self.assertDictEqual(response, {})
|
{
"content_hash": "bcceac578107a0b971259f12b1e40374",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 107,
"avg_line_length": 42.088235294117645,
"alnum_prop": 0.6771488469601677,
"repo_name": "nagyistoce/edx-analytics-data-api-client",
"id": "2a8d981f70929028e5d6ba45210f47074b31ea84",
"size": "4293",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "analyticsclient/tests/test_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "604"
},
{
"name": "Python",
"bytes": "34095"
},
{
"name": "Shell",
"bytes": "784"
}
],
"symlink_target": ""
}
|
"""
EasyBuild support for intel compiler toolchain (includes Intel compilers (icc, ifort), Parastation MPICH,
Intel Math Kernel Library (MKL), and Intel FFTW wrappers).
"""
from easybuild.toolchains.ipsmpi import Ipsmpi
from easybuild.toolchains.fft.intelfftw import IntelFFTW
from easybuild.toolchains.linalg.intelmkl import IntelMKL
class IntelPara(Ipsmpi, IntelMKL, IntelFFTW):
"""
Compiler toolchain with Intel compilers (icc/ifort), Parastation MPICH,
Intel Math Kernel Library (MKL) and Intel FFTW wrappers.
"""
NAME = 'intel-para'
|
{
"content_hash": "29d42885d3da984940618d0fb47b46a6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 105,
"avg_line_length": 31.5,
"alnum_prop": 0.7548500881834215,
"repo_name": "ULHPC/modules",
"id": "31e12d638bfaab4add160793da3a44a56589bcf0",
"size": "1625",
"binary": false,
"copies": "5",
"ref": "refs/heads/devel",
"path": "easybuild/easybuild-framework/easybuild/toolchains/intel-para.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "36174"
},
{
"name": "Perl",
"bytes": "34780"
},
{
"name": "Python",
"bytes": "2711250"
},
{
"name": "Ruby",
"bytes": "932"
},
{
"name": "Shell",
"bytes": "51560"
}
],
"symlink_target": ""
}
|
"""Vocoder functions
"""
import numpy as np
from scipy.signal import butter, lfilter, filtfilt
import warnings
from .._utils import verbose_dec
def _freq_to_erbn(f):
"""Convert frequency to ERB number"""
return 21.4 * np.log10(0.00437 * f + 1)
def _erbn_to_freq(e):
"""Convert ERB number to frequency"""
return (10 ** (e / 21.4) - 1) / 0.00437
@verbose_dec
def get_band_freqs(fs, n_bands=16, freq_lims=(200., 8000.), scale='erb'):
"""Calculate frequency band edges.
Parameters
----------
fs : float
Sample rate.
n_bands : int
Number of bands to use.
freq_lims : tuple
2-element list of lower and upper frequency bounds (in Hz).
scale : str
Scale on which to equally space the bands. Possible values are "erb",
"log" (base-2), and "hz".
Returns
-------
edges : list of tuples
low- and high-cutoff frequencies for the bands.
"""
freq_lims = np.array(freq_lims, float)
fs = float(fs)
if np.any(freq_lims >= fs / 2.):
raise ValueError('frequency limits must not exceed Nyquist')
assert freq_lims.ndim == 1 and freq_lims.size == 2
if scale not in ('erb', 'log', 'hz'):
raise ValueError('Frequency scale must be "erb", "hz", or "log".')
if scale == 'erb':
freq_lims_erbn = _freq_to_erbn(freq_lims)
delta_erb = np.diff(freq_lims_erbn) / n_bands
cutoffs = _erbn_to_freq(freq_lims_erbn[0] +
delta_erb * np.arange(n_bands + 1))
assert np.allclose(cutoffs[[0, -1]], freq_lims) # should be
elif scale == 'log':
freq_lims_log = np.log2(freq_lims)
delta = np.diff(freq_lims_log) / n_bands
cutoffs = 2. ** (freq_lims_log[0] + delta * np.arange(n_bands + 1))
assert np.allclose(cutoffs[[0, -1]], freq_lims) # should be
else: # scale == 'hz'
delta = np.diff(freq_lims) / n_bands
cutoffs = freq_lims[0] + delta * np.arange(n_bands + 1)
edges = zip(cutoffs[:-1], cutoffs[1:])
return(edges)
def get_bands(data, fs, edges, order=2, zero_phase=False, axis=-1):
"""Separate a signal into frequency bands
Parameters
----------
data : array-like
Data array.
fs : float
Sample rate.
edges : list
List of tuples of band cutoff frequencies.
order : int
Order of analysis and synthesis.
NOTE: Using too high an order can cause instability,
always check outputs for order > 2!
zero_phase : bool
Use zero-phase forward-backward filtering.
axis : int
Axis to operate over.
Returns
-------
bands, filts : list of tuples
List of tuples (ndarray of bandpassed signal,
(numerator, denominator coefficients of filter))
"""
data = np.atleast_1d(np.array(data, float)) # will make a copy
fs = float(fs)
bands = []
filts = []
for lf, hf in edges:
# band-pass
b, a = butter(order, [2 * lf / fs, 2 * hf / fs], 'bandpass')
filt = filtfilt if zero_phase else lfilter
band = filt(b, a, data, axis=axis)
bands.append(band)
filts.append((b, a))
return(bands, filts)
def get_env(data, fs, lp_order=4, lp_cutoff=160., zero_phase=False, axis=-1):
"""Calculate a low-pass envelope of a signal
Parameters
----------
data : array-like
Data array.
fs : float
Sample rate.
lp_order : int
Order of the envelope low-pass.
lp_cutoff : float
Cutoff frequency of the envelope low-pass.
zero_phase : bool
Use zero-phase forward-backward filtering.
axis : int
Axis to operate over.
Returns
-------
env : numpy.ndarray
The rectified and low-pass filtered envelope of ``data``.
filt : tuple
The filter coefficients (numerator, denominator).
"""
if lp_cutoff >= fs / 2.:
raise ValueError('frequency limits must not exceed Nyquist')
cutoff = 2 * lp_cutoff / float(fs)
data[data < 0] = 0. # half-wave rectify
b, a = butter(lp_order, cutoff, 'lowpass')
filt = filtfilt if zero_phase else lfilter
env = filt(b, a, data, axis=axis)
return(env, (b, a))
def get_carriers(data, fs, edges, order=2, axis=-1, mode='tone', rate=None,
seed=None):
"""Generate carriers for frequency bands of a signal
Parameters
----------
data : array-like
Data array.
fs : float
Sample rate.
edges : list
List of tuples of band cutoff frequencies.
order : int
Order of analysis and synthesis.
NOTE: Using too high an order can cause instability,
always check outputs for order > 2!
axis : int
Axis to operate over.
mode : str
The type of signal used to excite each band. Options are "noise" for
band-limited noise, "tone" for sinewave-at-center-frequency, or
"poisson" for a poisson process of band-limited clicks at the rate
given by ``rate``.
rate : int
The mean rate of stimulation when ``mode=='poisson'`` (in clicks per
second). Ignored when ``mode != 'poisson'``.
seed : np.random.RandomState | int | None
Random seed to use. If ``None``, no seeding is done.
Returns
-------
carrs : list of numpy.ndarrays
List of numpy ndarrays of the carrier signals.
"""
# check args
if mode not in ('noise', 'tone', 'poisson'):
raise ValueError('mode must be "noise", "tone", or "poisson", not {0}'
''.format(mode))
if isinstance(seed, np.random.RandomState):
rng = seed
elif seed is None:
rng = np.random
elif isinstance(seed, int):
rng = np.random.RandomState(seed)
else:
raise TypeError('"seed" must be an int, an instance of '
'numpy.random.RandomState, or None.')
carrs = []
fs = float(fs)
n_samp = data.shape[axis]
for lf, hf in edges:
if mode == 'tone':
cf = (lf + hf) / 2.
carrier = np.sin(2 * np.pi * cf * np.arange(n_samp) / fs)
carrier *= np.sqrt(2) # rms of 1
shape = np.ones_like(data.shape)
shape[axis] = n_samp
carrier.shape = shape
else:
if mode == 'noise':
carrier = rng.rand(*data.shape)
else: # mode == 'poisson'
prob = rate / fs
with warnings.catch_warnings(record=True): # numpy silliness
carrier = rng.choice([0., 1.], n_samp, p=[1 - prob, prob])
b, a = butter(order, [2 * lf / fs, 2 * hf / fs], 'bandpass')
carrier = lfilter(b, a, carrier, axis=axis)
carrier /= np.sqrt(np.mean(carrier * carrier, axis=axis,
keepdims=True)) # rms of 1
carrs.append(carrier)
return(carrs)
@verbose_dec
def vocode(data, fs, n_bands=16, freq_lims=(200., 8000.), scale='erb',
order=2, lp_cutoff=160., lp_order=4, mode='noise',
rate=200, seed=None, axis=-1, verbose=None):
"""Vocode stimuli using a variety of methods
Parameters
----------
data : array-like
Data array.
fs : float
Sample rate.
n_bands : int
Number of bands to use.
freq_lims : tuple
2-element list of lower and upper frequency bounds.
scale : str
Scale on which to equally space the bands. Possible values are "erb",
"log" (base-2), and "hz".
order : int
Order of analysis and synthesis.
NOTE: Using too high an order can cause instability,
always check outputs for order > 2!
lp_cutoff : float
Frequency of the envelope low-pass.
lp_order : int
Order of the envelope low-pass.
mode : str
The type of signal used to excite each band. Options are "noise" for
band-limited noise, "tone" for sinewave-at-center-frequency, or
"poisson" for a poisson process of band-limited clicks at the rate
given by ``poisson_rate``.
rate : int
Average number of clicks per second for the poisson train used to
excite each band (when mode=="poisson").
seed : int | None
Random seed to use. If ``None``, no seeding is done.
axis : int
Axis to operate over.
Returns
-------
voc : array-like
Vocoded stimuli of the same shape as data.
Notes
-----
The default settings are adapted from a cochlear implant simulation
algorithm described by Zachary Smith (Cochlear Corp.).
"""
edges = get_band_freqs(fs, n_bands=n_bands, freq_lims=freq_lims,
scale=scale)
bands, filts = get_bands(data, fs, edges, order=order, axis=axis)
envs, env_filts = zip(*[get_env(x, fs, lp_order=lp_order,
lp_cutoff=lp_cutoff, axis=axis)
for x in bands])
carrs = get_carriers(data, fs, edges, order=order, axis=axis, mode=mode,
rate=rate, seed=seed)
# reconstruct
voc = np.zeros_like(data)
for carr, env in zip(carrs, envs):
voc += carr * env
return voc
|
{
"content_hash": "5a2092074b068a3432459299a4bfacb2",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 78,
"avg_line_length": 33.50902527075812,
"alnum_prop": 0.5717517776341305,
"repo_name": "lkishline/expyfun",
"id": "d0837417305f1ba74e23a7e975f2973e62be1ea6",
"size": "9306",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "expyfun/stimuli/_vocoder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1018"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "390894"
}
],
"symlink_target": ""
}
|
import sys
import csv
import pandas as pd
import numpy as np
import codecs
class Transcript():
def __init__(self, inputFileName,outputFileName):
self.inputFileName = inputFileName
self.outputFileName = outputFileName
self.raw_messages = []
self.speakerlist = []
self.messagelist = []
self.paragraphList = []
self.datelist = []
self.timelist = []
def open_file(self):
arq = codecs.open(self.inputFileName, "r", "utf-8-sig")
content = arq.read()
arq.close()
lines = content.split("\n")
lines = [l for l in lines if len(l) > 4]
for l in lines:
self.raw_messages.append(l.encode("utf-8"))
def valid_date(self,date_str):
valid = True
separator="/"
try:
year, month, day = map(int, date_str.split(separator))
except ValueError:
valid = False
return valid
def feed_lists(self):
lineNo = 0
seqNo = 0
for l in self.raw_messages:
l = l.rstrip()
msg_date, sep, msg = l.decode().partition("- ")
#Date and time has a , separator
raw_date, sep, time = msg_date.partition(", ")
speaker, sep, message = msg.partition(": ")
#speaker = speaker.encode('utf-8')
lineNo += 1
# A proper whatsapp conversation with date, time, speaker, text
if message:
self.datelist.append(raw_date)
self.timelist.append(time)
self.speakerlist.append(speaker)
self.messagelist.append(message)
# store the previous speaker so that you can use it to print when there is only a line
prevSender = speaker
prevRawDate = raw_date
prevTime = time
seqNo +=1
# A message. date, time, message
elif ((speaker != "") & (self.valid_date(raw_date))):
self.datelist.append(raw_date)
self.timelist.append(time)
self.speakerlist.append('MESSAGE')
self.messagelist.append(speaker)
# store the previous speaker so that you can use it to print when there is only a line
prevSender = 'MESSAGE'
prevRawDate = raw_date
prevTime = time
seqNo +=1
# A continuing conversation with no date time or name
else:
self.datelist.append(prevRawDate)
self.timelist.append(prevTime)
self.speakerlist.append(prevSender)
self.messagelist.append(l)
self.paragraphList.append(seqNo)
def write_transcript(self, end=0):
if end == 0:
end = len(self.messagelist)
writer = csv.writer(open(self.outputFileName, 'w'))
writer.writerow(["SentenceNo","SequenceNo","Date","Time","Speaker","Text"])
for i in range(len(self.messagelist[:end])):
writer.writerow([i,self.paragraphList[i],self.datelist[i], self.timelist[i],self.speakerlist[i], self.messagelist[i]])
def get_speakers(self):
speakers_set = set(self.speakerlist)
return [e for e in speakers_set]
|
{
"content_hash": "d4164d6bee69ec4fa77483d45c435d65",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 121,
"avg_line_length": 31.13953488372093,
"alnum_prop": 0.6807318894697535,
"repo_name": "gtadiparthi/whatsapp-parser-lite",
"id": "745d03eca19f81db334b8ea6e0d20754973a0065",
"size": "2761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transcript.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3567"
}
],
"symlink_target": ""
}
|
"""Built-in WideNDeep model classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import layers as layer_module
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.experimental.WideDeepModel')
class WideDeepModel(keras_training.Model):
r"""Wide & Deep Model for regression and classification problems.
This model jointly train a linear and a dnn model.
Example:
```python
linear_model = LinearModel()
dnn_model = keras.Sequential([keras.layers.Dense(units=64),
keras.layers.Dense(units=1)])
combined_model = WideDeepModel(linear_model, dnn_model)
combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse'])
# define dnn_inputs and linear_inputs as separate numpy arrays or
# a single numpy array if dnn_inputs is same as linear_inputs.
combined_model.fit([linear_inputs, dnn_inputs], y, epochs)
# or define a single `tf.data.Dataset` that contains a single tensor or
# separate tensors for dnn_inputs and linear_inputs.
dataset = tf.data.Dataset.from_tensors(([linear_inputs, dnn_inputs], y))
combined_model.fit(dataset, epochs)
```
Both linear and dnn model can be pre-compiled and trained separately
before jointly training:
Example:
```python
linear_model = LinearModel()
linear_model.compile('adagrad', 'mse')
linear_model.fit(linear_inputs, y, epochs)
dnn_model = keras.Sequential([keras.layers.Dense(units=1)])
dnn_model.compile('rmsprop', 'mse')
dnn_model.fit(dnn_inputs, y, epochs)
combined_model = WideDeepModel(linear_model, dnn_model)
combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse'])
combined_model.fit([linear_inputs, dnn_inputs], y, epochs)
```
"""
def __init__(self, linear_model, dnn_model, activation=None, **kwargs):
"""Create a Wide & Deep Model.
Args:
linear_model: a premade LinearModel, its output must match the output of
the dnn model.
dnn_model: a `tf.keras.Model`, its output must match the output of the
linear model.
activation: Activation function. Set it to None to maintain a linear
activation.
**kwargs: The keyword arguments that are passed on to BaseLayer.__init__.
Allowed keyword arguments include `name`.
"""
super(WideDeepModel, self).__init__(**kwargs)
base_layer.keras_premade_model_gauge.get_cell('WideDeep').set(True)
self.linear_model = linear_model
self.dnn_model = dnn_model
self.activation = activations.get(activation)
def call(self, inputs, training=None):
if not isinstance(inputs, (tuple, list)) or len(inputs) != 2:
linear_inputs = dnn_inputs = inputs
else:
linear_inputs, dnn_inputs = inputs
linear_output = self.linear_model(linear_inputs)
# pylint: disable=protected-access
if self.dnn_model._expects_training_arg:
if training is None:
training = K.learning_phase()
dnn_output = self.dnn_model(dnn_inputs, training=training)
else:
dnn_output = self.dnn_model(dnn_inputs)
output = nest.map_structure(lambda x, y: (x + y), linear_output, dnn_output)
if self.activation:
return nest.map_structure(self.activation, output)
return output
# This does not support gradient scaling and LossScaleOptimizer.
def train_step(self, data):
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
x, y, sample_weight = data_adapter.expand_1d((x, y, sample_weight))
with backprop.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
self.compiled_metrics.update_state(y, y_pred, sample_weight)
if isinstance(self.optimizer, (list, tuple)):
linear_vars = self.linear_model.trainable_variables
dnn_vars = self.dnn_model.trainable_variables
linear_grads, dnn_grads = tape.gradient(loss, (linear_vars, dnn_vars))
linear_optimizer = self.optimizer[0]
dnn_optimizer = self.optimizer[1]
linear_optimizer.apply_gradients(zip(linear_grads, linear_vars))
dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars))
else:
trainable_variables = self.trainable_variables
grads = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(grads, trainable_variables))
return {m.name: m.result() for m in self.metrics}
def _make_train_function(self):
# Only needed for graph mode and model_to_estimator.
has_recompiled = self._recompile_weights_loss_and_weighted_metrics()
self._check_trainable_weights_consistency()
# If we have re-compiled the loss/weighted metric sub-graphs then create
# train function even if one exists already. This is because
# `_feed_sample_weights` list has been updated on re-compile.
if getattr(self, 'train_function', None) is None or has_recompiled:
# Restore the compiled trainable state.
current_trainable_state = self._get_trainable_state()
self._set_trainable_state(self._compiled_trainable_state)
inputs = (
self._feed_inputs + self._feed_targets + self._feed_sample_weights)
if not isinstance(K.symbolic_learning_phase(), int):
inputs += [K.symbolic_learning_phase()]
if isinstance(self.optimizer, (list, tuple)):
linear_optimizer = self.optimizer[0]
dnn_optimizer = self.optimizer[1]
else:
linear_optimizer = self.optimizer
dnn_optimizer = self.optimizer
with K.get_graph().as_default():
with K.name_scope('training'):
# Training updates
updates = []
linear_updates = linear_optimizer.get_updates(
params=self.linear_model.trainable_weights, # pylint: disable=protected-access
loss=self.total_loss)
updates += linear_updates
dnn_updates = dnn_optimizer.get_updates(
params=self.dnn_model.trainable_weights, # pylint: disable=protected-access
loss=self.total_loss)
updates += dnn_updates
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self.inputs)
metrics = self._get_training_eval_metrics()
metrics_tensors = [
m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access
]
with K.name_scope('training'):
# Gets loss and metrics. Updates weights at each call.
fn = K.function(
inputs, [self.total_loss] + metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
setattr(self, 'train_function', fn)
# Restore the current trainable state
self._set_trainable_state(current_trainable_state)
def get_config(self):
linear_config = generic_utils.serialize_keras_object(self.linear_model)
dnn_config = generic_utils.serialize_keras_object(self.dnn_model)
config = {
'linear_model': linear_config,
'dnn_model': dnn_config,
'activation': activations.serialize(self.activation),
}
base_config = base_layer.Layer.get_config(self)
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
linear_config = config.pop('linear_model')
linear_model = layer_module.deserialize(linear_config, custom_objects)
dnn_config = config.pop('dnn_model')
dnn_model = layer_module.deserialize(dnn_config, custom_objects)
activation = activations.deserialize(
config.pop('activation', None), custom_objects=custom_objects)
return cls(
linear_model=linear_model,
dnn_model=dnn_model,
activation=activation,
**config)
|
{
"content_hash": "dac3ef64239b9efbf28c8c9ae3f007bf",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 109,
"avg_line_length": 40.926829268292686,
"alnum_prop": 0.6829558998808105,
"repo_name": "annarev/tensorflow",
"id": "1f70a38cc9306a846947a36f56b7587ef72ea396",
"size": "9079",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/premade/wide_deep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49343974"
},
{
"name": "CMake",
"bytes": "195286"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "863222"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41289329"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "469612"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
from mapperpy.object_mapper import ObjectMapper
from mapperpy.one_way_mapper import OneWayMapper
from mapperpy.mapper_options import MapperOptions
from mapperpy.exceptions import ConfigurationException
|
{
"content_hash": "f3314d8627f25c559f7311797c20379e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 54,
"avg_line_length": 50.5,
"alnum_prop": 0.8811881188118812,
"repo_name": "lgrech/MapperPy",
"id": "28ce25e5ebe7b048d1df90ef1cd313e6a1448841",
"size": "202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapperpy/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "112094"
}
],
"symlink_target": ""
}
|
"""
Default database bootstrap.
"""
from antioch.core import interface, bootstrap
from antioch.util import sql
for name in interface.default_permissions:
exchange.connection.runOperation(sql.build_insert('permission', name=name))
exchange.load_permissions()
system = exchange.instantiate('object', name='System Object')
set_default_permissions_verb = interface.Verb(system)
set_default_permissions_verb._method = True
set_default_permissions_verb._code = bootstrap.get_source('system_set_default_permissions.py')
exchange.save(set_default_permissions_verb)
set_default_permissions_verb.add_name('set_default_permissions')
set_default_permissions_verb(set_default_permissions_verb)
set_default_permissions_verb(system)
wizard = exchange.instantiate('object', name='Wizard', unique_name=True)
wizard.set_owner(wizard)
system.set_owner(wizard)
set_default_permissions_verb.set_owner(wizard)
player_defaults = exchange.instantiate('object', name= 'player defaults')
player_defaults.set_owner(wizard)
wizard.add_parent(player_defaults)
room = exchange.instantiate('object', name='The First Room', unique_name=True)
room.set_owner(wizard)
user = exchange.instantiate('object', name= 'User', unique_name=True)
user.set_owner(user)
user.add_parent(player_defaults)
wizard.set_location(room)
user.set_location(room)
wizard.set_player(True, is_wizard=True, passwd='wizard')
user.set_player(True, passwd='user')
wizard.add_verb('edit', **dict(
ability = True,
filename = 'wizard_class_edit.py',
repo = 'default',
ref = 'master'
))
wizard.add_verb('exec', **dict(
ability = True,
filename = 'wizard_class_exec.py',
repo = 'default',
ref = 'master'
)).allow('wizards', 'execute')
wizard.add_verb('eval', **dict(
ability = True,
filename = 'wizard_class_eval.py',
repo = 'default',
ref = 'master'
)).allow('wizards', 'execute')
player_defaults.add_verb('set', **dict(
ability = True,
filename = 'player_class_set.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
player_defaults.add_verb('look', **dict(
ability = True,
method = True,
filename = 'player_class_look.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
player_defaults.add_verb('passwd', **dict(
ability = True,
method = True,
filename = 'player_class_passwd.py',
repo = 'default',
ref = 'master'
)).allow('everyone', 'execute')
|
{
"content_hash": "b8fc956184d03d4bb62bd31990aa1261",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 94,
"avg_line_length": 29.0561797752809,
"alnum_prop": 0.6635730858468677,
"repo_name": "philchristensen/antioch",
"id": "f077651aac798a4274337209cc70aaee83003173",
"size": "2670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "antioch/core/bootstrap/minimal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12977"
},
{
"name": "Dockerfile",
"bytes": "850"
},
{
"name": "HTML",
"bytes": "21856"
},
{
"name": "JavaScript",
"bytes": "18844"
},
{
"name": "Python",
"bytes": "353482"
},
{
"name": "Shell",
"bytes": "1260"
}
],
"symlink_target": ""
}
|
from .flow_controller import FlowController
from .outstanding_count_flow_controller import OutstandingCountFlowController
from .rate_limiter_flow_controller import RateLimiterFlowController
|
{
"content_hash": "ba078c3fec30cbb0f3f3d12643e711ce",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 77,
"avg_line_length": 63.333333333333336,
"alnum_prop": 0.8842105263157894,
"repo_name": "GoogleCloudPlatform/pubsub",
"id": "4b7de1d1ace4a88894779c8eaa758196f86c6c51",
"size": "190",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "load-test-framework/python_src/clients/flow_control/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "24013"
},
{
"name": "Java",
"bytes": "419379"
},
{
"name": "JavaScript",
"bytes": "26647"
},
{
"name": "Python",
"bytes": "29321"
},
{
"name": "Shell",
"bytes": "9077"
}
],
"symlink_target": ""
}
|
import csv
from bs4 import BeautifulSoup
from collections import Counter
import re
csv_file = open('data_detikcom_740.csv')
csv_reader = csv.DictReader(csv_file)
words = []
for row in csv_reader:
title = row['title'].strip().lower()
raw_content = row['raw_content']
clean_content = BeautifulSoup(raw_content, 'lxml').text
# Compile regex to remove non-alphanum char
nonalpha = re.compile('[\W_]+')
for word in title.split(' '):
word = word.lower()
word = nonalpha.sub('', word)
if word != '':
words.append(word)
for word in clean_content.split(' '):
word = word.lower()
word = nonalpha.sub('', word)
if word != '':
words.append(word)
counter = Counter(words)
for word in counter.most_common(len(counter)):
print '{},{}'.format(word[0], word[1])
csv_file.close()
|
{
"content_hash": "ebdb9d6ee7e4e8d7c548abb02fc0049a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 59,
"avg_line_length": 26.424242424242426,
"alnum_prop": 0.6100917431192661,
"repo_name": "bobbypriambodo/rojak",
"id": "10df2fb6cb84e6a1c6e7fef8683e9e3748b9c0bd",
"size": "872",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "rojak-analyzer/show_word_frequency.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2303"
},
{
"name": "Elixir",
"bytes": "74193"
},
{
"name": "HTML",
"bytes": "889"
},
{
"name": "Java",
"bytes": "20877"
},
{
"name": "JavaScript",
"bytes": "31166"
},
{
"name": "Python",
"bytes": "204215"
},
{
"name": "Ruby",
"bytes": "927"
},
{
"name": "Shell",
"bytes": "2094"
},
{
"name": "Swift",
"bytes": "5899"
}
],
"symlink_target": ""
}
|
from communities.models import Community
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.shortcuts import get_object_or_404
from issues.models import Issue, IssueStatus, IssueComment, \
IssueCommentRevision, Proposal, ProposalVote
from meetings.models import Meeting, AgendaItem, MeetingParticipant, \
MeetingExternalParticipant
from users.models import Membership, Invitation
import json
from datetime import datetime
class Command(BaseCommand):
help = "gather results after straw voting ends"
def handle(self, *args, **options):
cid = int(args[0])
community = get_object_or_404(Community, pk=cid)
community.sum_vote_results()
|
{
"content_hash": "733f92e8c1c11fb071372d6dac5a17c0",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 33.22727272727273,
"alnum_prop": 0.7606019151846786,
"repo_name": "nonZero/OpenCommunity",
"id": "f205a6a724b6be73ebb7fc9f33738baad91b16d4",
"size": "731",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/communities/management/commands/get_straw_vote_results.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "610648"
},
{
"name": "HTML",
"bytes": "258907"
},
{
"name": "JavaScript",
"bytes": "2403446"
},
{
"name": "Python",
"bytes": "1385625"
},
{
"name": "Shell",
"bytes": "185"
}
],
"symlink_target": ""
}
|
"""Test to verify that Home Assistant core works."""
# pylint: disable=protected-access
import asyncio
import logging
import os
import unittest
from unittest.mock import patch, MagicMock, sentinel
from datetime import datetime, timedelta
from tempfile import TemporaryDirectory
import pytz
import pytest
import homeassistant.core as ha
from homeassistant.exceptions import (InvalidEntityFormatError,
InvalidStateError)
from homeassistant.util.async import run_coroutine_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import (METRIC_SYSTEM)
from homeassistant.const import (
__version__, EVENT_STATE_CHANGED, ATTR_FRIENDLY_NAME, CONF_UNIT_SYSTEM,
ATTR_NOW, EVENT_TIME_CHANGED, EVENT_HOMEASSISTANT_STOP,
EVENT_HOMEASSISTANT_CLOSE, EVENT_SERVICE_REGISTERED, EVENT_SERVICE_REMOVED)
from tests.common import get_test_home_assistant
PST = pytz.timezone('America/Los_Angeles')
def test_split_entity_id():
"""Test split_entity_id."""
assert ha.split_entity_id('domain.object_id') == ['domain', 'object_id']
def test_async_add_job_schedule_callback():
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, ha.callback(job))
assert len(hass.loop.call_soon.mock_calls) == 1
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=True)
def test_async_add_job_schedule_coroutinefunction(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 1
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=False)
def test_async_add_job_add_threaded_job_to_pool(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.loop.run_in_executor.mock_calls) == 1
def test_async_run_job_calls_callback():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, ha.callback(job))
assert len(calls) == 1
assert len(hass.async_add_job.mock_calls) == 0
def test_async_run_job_delegates_non_async():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, job)
assert len(calls) == 0
assert len(hass.async_add_job.mock_calls) == 1
def test_stage_shutdown():
"""Simulate a shutdown, test calling stuff."""
hass = get_test_home_assistant()
test_stop = []
test_close = []
test_all = []
hass.bus.listen(
EVENT_HOMEASSISTANT_STOP, lambda event: test_stop.append(event))
hass.bus.listen(
EVENT_HOMEASSISTANT_CLOSE, lambda event: test_close.append(event))
hass.bus.listen('*', lambda event: test_all.append(event))
hass.stop()
assert len(test_stop) == 1
assert len(test_close) == 1
assert len(test_all) == 1
class TestHomeAssistant(unittest.TestCase):
"""Test the Home Assistant core classes."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_pending_sheduler(self):
"""Add a coro to pending tasks."""
call_count = []
@asyncio.coroutine
def test_coro():
"""Test Coro."""
call_count.append('call')
for i in range(3):
self.hass.add_job(test_coro())
run_coroutine_threadsafe(
asyncio.wait(self.hass._pending_tasks, loop=self.hass.loop),
loop=self.hass.loop
).result()
assert len(self.hass._pending_tasks) == 3
assert len(call_count) == 3
def test_async_add_job_pending_tasks_coro(self):
"""Add a coro to pending tasks."""
call_count = []
@asyncio.coroutine
def test_coro():
"""Test Coro."""
call_count.append('call')
for i in range(2):
self.hass.add_job(test_coro())
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
assert len(self.hass._pending_tasks) == 2
self.hass.block_till_done()
assert len(call_count) == 2
def test_async_add_job_pending_tasks_executor(self):
"""Run a executor in pending tasks."""
call_count = []
def test_executor():
"""Test executor."""
call_count.append('call')
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
for i in range(2):
self.hass.add_job(test_executor)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
assert len(self.hass._pending_tasks) == 2
self.hass.block_till_done()
assert len(call_count) == 2
def test_async_add_job_pending_tasks_callback(self):
"""Run a callback in pending tasks."""
call_count = []
@ha.callback
def test_callback():
"""Test callback."""
call_count.append('call')
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
for i in range(2):
self.hass.add_job(test_callback)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
self.hass.block_till_done()
assert len(self.hass._pending_tasks) == 0
assert len(call_count) == 2
def test_add_job_with_none(self):
"""Try to add a job with None as function."""
with pytest.raises(ValueError):
self.hass.add_job(None, 'test_arg')
class TestEvent(unittest.TestCase):
"""A Test Event class."""
def test_eq(self):
"""Test events."""
now = dt_util.utcnow()
data = {'some': 'attr'}
event1, event2 = [
ha.Event('some_type', data, time_fired=now)
for _ in range(2)
]
self.assertEqual(event1, event2)
def test_repr(self):
"""Test that repr method works."""
self.assertEqual(
"<Event TestEvent[L]>",
str(ha.Event("TestEvent")))
self.assertEqual(
"<Event TestEvent[R]: beer=nice>",
str(ha.Event("TestEvent",
{"beer": "nice"},
ha.EventOrigin.remote)))
def test_as_dict(self):
"""Test as dictionary."""
event_type = 'some_type'
now = dt_util.utcnow()
data = {'some': 'attr'}
event = ha.Event(event_type, data, ha.EventOrigin.local, now)
expected = {
'event_type': event_type,
'data': data,
'origin': 'LOCAL',
'time_fired': now,
}
self.assertEqual(expected, event.as_dict())
class TestEventBus(unittest.TestCase):
"""Test EventBus methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.bus = self.hass.bus
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_add_remove_listener(self):
"""Test remove_listener method."""
self.hass.allow_pool = False
old_count = len(self.bus.listeners)
def listener(_): pass
unsub = self.bus.listen('test', listener)
self.assertEqual(old_count + 1, len(self.bus.listeners))
# Remove listener
unsub()
self.assertEqual(old_count, len(self.bus.listeners))
# Should do nothing now
unsub()
def test_unsubscribe_listener(self):
"""Test unsubscribe listener from returned function."""
calls = []
@ha.callback
def listener(event):
"""Mock listener."""
calls.append(event)
unsub = self.bus.listen('test', listener)
self.bus.fire('test')
self.hass.block_till_done()
assert len(calls) == 1
unsub()
self.bus.fire('event')
self.hass.block_till_done()
assert len(calls) == 1
def test_listen_once_event_with_callback(self):
"""Test listen_once_event method."""
runs = []
@ha.callback
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_listen_once_event_with_coroutine(self):
"""Test listen_once_event method."""
runs = []
@asyncio.coroutine
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_listen_once_event_with_thread(self):
"""Test listen_once_event method."""
runs = []
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_thread_event_listener(self):
"""Test a event listener listeners."""
thread_calls = []
def thread_listener(event):
thread_calls.append(event)
self.bus.listen('test_thread', thread_listener)
self.bus.fire('test_thread')
self.hass.block_till_done()
assert len(thread_calls) == 1
def test_callback_event_listener(self):
"""Test a event listener listeners."""
callback_calls = []
@ha.callback
def callback_listener(event):
callback_calls.append(event)
self.bus.listen('test_callback', callback_listener)
self.bus.fire('test_callback')
self.hass.block_till_done()
assert len(callback_calls) == 1
def test_coroutine_event_listener(self):
"""Test a event listener listeners."""
coroutine_calls = []
@asyncio.coroutine
def coroutine_listener(event):
coroutine_calls.append(event)
self.bus.listen('test_coroutine', coroutine_listener)
self.bus.fire('test_coroutine')
self.hass.block_till_done()
assert len(coroutine_calls) == 1
class TestState(unittest.TestCase):
"""Test State methods."""
def test_init(self):
"""Test state.init."""
self.assertRaises(
InvalidEntityFormatError, ha.State,
'invalid_entity_format', 'test_state')
self.assertRaises(
InvalidStateError, ha.State,
'domain.long_state', 't' * 256)
def test_domain(self):
"""Test domain."""
state = ha.State('some_domain.hello', 'world')
self.assertEqual('some_domain', state.domain)
def test_object_id(self):
"""Test object ID."""
state = ha.State('domain.hello', 'world')
self.assertEqual('hello', state.object_id)
def test_name_if_no_friendly_name_attr(self):
"""Test if there is no friendly name."""
state = ha.State('domain.hello_world', 'world')
self.assertEqual('hello world', state.name)
def test_name_if_friendly_name_attr(self):
"""Test if there is a friendly name."""
name = 'Some Unique Name'
state = ha.State('domain.hello_world', 'world',
{ATTR_FRIENDLY_NAME: name})
self.assertEqual(name, state.name)
def test_dict_conversion(self):
"""Test conversion of dict."""
state = ha.State('domain.hello', 'world', {'some': 'attr'})
self.assertEqual(state, ha.State.from_dict(state.as_dict()))
def test_dict_conversion_with_wrong_data(self):
"""Test conversion with wrong data."""
self.assertIsNone(ha.State.from_dict(None))
self.assertIsNone(ha.State.from_dict({'state': 'yes'}))
self.assertIsNone(ha.State.from_dict({'entity_id': 'yes'}))
def test_repr(self):
"""Test state.repr."""
self.assertEqual("<state happy.happy=on @ 1984-12-08T12:00:00+00:00>",
str(ha.State(
"happy.happy", "on",
last_changed=datetime(1984, 12, 8, 12, 0, 0))))
self.assertEqual(
"<state happy.happy=on; brightness=144 @ "
"1984-12-08T12:00:00+00:00>",
str(ha.State("happy.happy", "on", {"brightness": 144},
datetime(1984, 12, 8, 12, 0, 0))))
class TestStateMachine(unittest.TestCase):
"""Test State machine methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.states = self.hass.states
self.states.set("light.Bowl", "on")
self.states.set("switch.AC", "off")
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_is_state(self):
"""Test is_state method."""
self.assertTrue(self.states.is_state('light.Bowl', 'on'))
self.assertFalse(self.states.is_state('light.Bowl', 'off'))
self.assertFalse(self.states.is_state('light.Non_existing', 'on'))
def test_entity_ids(self):
"""Test get_entity_ids method."""
ent_ids = self.states.entity_ids()
self.assertEqual(2, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
self.assertTrue('switch.ac' in ent_ids)
ent_ids = self.states.entity_ids('light')
self.assertEqual(1, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
def test_all(self):
"""Test everything."""
states = sorted(state.entity_id for state in self.states.all())
self.assertEqual(['light.bowl', 'switch.ac'], states)
def test_remove(self):
"""Test remove method."""
events = []
@ha.callback
def callback(event):
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.assertIn('light.bowl', self.states.entity_ids())
self.assertTrue(self.states.remove('light.bowl'))
self.hass.block_till_done()
self.assertNotIn('light.bowl', self.states.entity_ids())
self.assertEqual(1, len(events))
self.assertEqual('light.bowl', events[0].data.get('entity_id'))
self.assertIsNotNone(events[0].data.get('old_state'))
self.assertEqual('light.bowl', events[0].data['old_state'].entity_id)
self.assertIsNone(events[0].data.get('new_state'))
# If it does not exist, we should get False
self.assertFalse(self.states.remove('light.Bowl'))
self.hass.block_till_done()
self.assertEqual(1, len(events))
def test_case_insensitivty(self):
"""Test insensitivty."""
runs = []
@ha.callback
def callback(event):
runs.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.states.set('light.BOWL', 'off')
self.hass.block_till_done()
self.assertTrue(self.states.is_state('light.bowl', 'off'))
self.assertEqual(1, len(runs))
def test_last_changed_not_updated_on_same_state(self):
"""Test to not update the existing, same state."""
state = self.states.get('light.Bowl')
future = dt_util.utcnow() + timedelta(hours=10)
with patch('homeassistant.util.dt.utcnow', return_value=future):
self.states.set("light.Bowl", "on", {'attr': 'triggers_change'})
self.hass.block_till_done()
state2 = self.states.get('light.Bowl')
assert state2 is not None
assert state.last_changed == state2.last_changed
def test_force_update(self):
"""Test force update option."""
events = []
@ha.callback
def callback(event):
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.states.set('light.bowl', 'on')
self.hass.block_till_done()
self.assertEqual(0, len(events))
self.states.set('light.bowl', 'on', None, True)
self.hass.block_till_done()
self.assertEqual(1, len(events))
class TestServiceCall(unittest.TestCase):
"""Test ServiceCall class."""
def test_repr(self):
"""Test repr method."""
self.assertEqual(
"<ServiceCall homeassistant.start>",
str(ha.ServiceCall('homeassistant', 'start')))
self.assertEqual(
"<ServiceCall homeassistant.start: fast=yes>",
str(ha.ServiceCall('homeassistant', 'start', {"fast": "yes"})))
class TestServiceRegistry(unittest.TestCase):
"""Test ServicerRegistry methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.services = self.hass.services
@ha.callback
def mock_service(call):
pass
self.services.register("Test_Domain", "TEST_SERVICE", mock_service)
self.calls_register = []
@ha.callback
def mock_event_register(event):
"""Mock register event."""
self.calls_register.append(event)
self.hass.bus.listen(EVENT_SERVICE_REGISTERED, mock_event_register)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_has_service(self):
"""Test has_service method."""
self.assertTrue(
self.services.has_service("tesT_domaiN", "tesT_servicE"))
self.assertFalse(
self.services.has_service("test_domain", "non_existing"))
self.assertFalse(
self.services.has_service("non_existing", "test_service"))
def test_services(self):
"""Test services."""
expected = {
'test_domain': {'test_service': {'description': '', 'fields': {}}}
}
self.assertEqual(expected, self.services.services)
def test_call_with_blocking_done_in_time(self):
"""Test call with blocking."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler."""
calls.append(call)
self.services.register(
"test_domain", "register_calls", service_handler)
self.hass.block_till_done()
assert len(self.calls_register) == 1
assert self.calls_register[-1].data['domain'] == 'test_domain'
assert self.calls_register[-1].data['service'] == 'register_calls'
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.assertEqual(1, len(calls))
def test_call_non_existing_with_blocking(self):
"""Test non-existing with blocking."""
prior = ha.SERVICE_CALL_LIMIT
try:
ha.SERVICE_CALL_LIMIT = 0.01
assert not self.services.call('test_domain', 'i_do_not_exist',
blocking=True)
finally:
ha.SERVICE_CALL_LIMIT = prior
def test_async_service(self):
"""Test registering and calling an async service."""
calls = []
@asyncio.coroutine
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register(
'test_domain', 'register_calls', service_handler)
self.hass.block_till_done()
assert len(self.calls_register) == 1
assert self.calls_register[-1].data['domain'] == 'test_domain'
assert self.calls_register[-1].data['service'] == 'register_calls'
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.hass.block_till_done()
self.assertEqual(1, len(calls))
def test_callback_service(self):
"""Test registering and calling an async service."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register(
'test_domain', 'register_calls', service_handler)
self.hass.block_till_done()
assert len(self.calls_register) == 1
assert self.calls_register[-1].data['domain'] == 'test_domain'
assert self.calls_register[-1].data['service'] == 'register_calls'
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.hass.block_till_done()
self.assertEqual(1, len(calls))
def test_remove_service(self):
"""Test remove service."""
calls_remove = []
@ha.callback
def mock_event_remove(event):
"""Mock register event."""
calls_remove.append(event)
self.hass.bus.listen(EVENT_SERVICE_REMOVED, mock_event_remove)
assert self.services.has_service('test_Domain', 'test_Service')
self.services.remove('test_Domain', 'test_Service')
self.hass.block_till_done()
assert not self.services.has_service('test_Domain', 'test_Service')
assert len(calls_remove) == 1
assert calls_remove[-1].data['domain'] == 'test_domain'
assert calls_remove[-1].data['service'] == 'test_service'
def test_remove_service_that_not_exists(self):
"""Test remove service that not exists."""
calls_remove = []
@ha.callback
def mock_event_remove(event):
"""Mock register event."""
calls_remove.append(event)
self.hass.bus.listen(EVENT_SERVICE_REMOVED, mock_event_remove)
assert not self.services.has_service('test_xxx', 'test_yyy')
self.services.remove('test_xxx', 'test_yyy')
self.hass.block_till_done()
assert len(calls_remove) == 0
class TestConfig(unittest.TestCase):
"""Test configuration methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.config = ha.Config()
self.assertIsNone(self.config.config_dir)
def test_path_with_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
self.assertEqual("/tmp/ha-config/test.conf",
self.config.path("test.conf"))
def test_path_with_dir_and_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
self.assertEqual("/tmp/ha-config/dir/test.conf",
self.config.path("dir", "test.conf"))
def test_as_dict(self):
"""Test as dict."""
self.config.config_dir = '/tmp/ha-config'
expected = {
'latitude': None,
'longitude': None,
'elevation': None,
CONF_UNIT_SYSTEM: METRIC_SYSTEM.as_dict(),
'location_name': None,
'time_zone': 'UTC',
'components': set(),
'config_dir': '/tmp/ha-config',
'whitelist_external_dirs': set(),
'version': __version__,
}
self.assertEqual(expected, self.config.as_dict())
def test_is_allowed_path(self):
"""Test is_allowed_path method."""
with TemporaryDirectory() as tmp_dir:
self.config.whitelist_external_dirs = set((
tmp_dir,
))
test_file = os.path.join(tmp_dir, "test.jpg")
with open(test_file, "w") as tmp_file:
tmp_file.write("test")
valid = [
test_file,
]
for path in valid:
assert self.config.is_allowed_path(path)
self.config.whitelist_external_dirs = set(('/home', '/var'))
unvalid = [
"/hass/config/secure",
"/etc/passwd",
"/root/secure_file",
"/var/../etc/passwd",
test_file,
]
for path in unvalid:
assert not self.config.is_allowed_path(path)
with self.assertRaises(AssertionError):
self.config.is_allowed_path(None)
@patch('homeassistant.core.monotonic')
def test_create_timer(mock_monotonic, loop):
"""Test create timer."""
hass = MagicMock()
funcs = []
orig_callback = ha.callback
def mock_callback(func):
funcs.append(func)
return orig_callback(func)
mock_monotonic.side_effect = 10.2, 10.3
with patch.object(ha, 'callback', mock_callback), \
patch('homeassistant.core.dt_util.utcnow',
return_value=sentinel.mock_date):
ha._async_create_timer(hass)
assert len(funcs) == 2
fire_time_event, stop_timer = funcs
assert len(hass.bus.async_listen_once.mock_calls) == 1
assert len(hass.bus.async_fire.mock_calls) == 1
assert len(hass.loop.call_later.mock_calls) == 1
event_type, callback = hass.bus.async_listen_once.mock_calls[0][1]
assert event_type == EVENT_HOMEASSISTANT_STOP
assert callback is stop_timer
slp_seconds, callback, nxt = hass.loop.call_later.mock_calls[0][1]
assert abs(slp_seconds - 0.9) < 0.001
assert callback is fire_time_event
assert abs(nxt - 11.2) < 0.001
event_type, event_data = hass.bus.async_fire.mock_calls[0][1]
assert event_type == EVENT_TIME_CHANGED
assert event_data[ATTR_NOW] is sentinel.mock_date
@patch('homeassistant.core.monotonic')
def test_timer_out_of_sync(mock_monotonic, loop):
"""Test create timer."""
hass = MagicMock()
funcs = []
orig_callback = ha.callback
def mock_callback(func):
funcs.append(func)
return orig_callback(func)
mock_monotonic.side_effect = 10.2, 11.3, 11.3
with patch.object(ha, 'callback', mock_callback), \
patch('homeassistant.core.dt_util.utcnow',
return_value=sentinel.mock_date):
ha._async_create_timer(hass)
assert len(funcs) == 2
fire_time_event, stop_timer = funcs
assert len(hass.loop.call_later.mock_calls) == 1
slp_seconds, callback, nxt = hass.loop.call_later.mock_calls[0][1]
assert slp_seconds == 1
assert callback is fire_time_event
assert abs(nxt - 12.3) < 0.001
@asyncio.coroutine
def test_hass_start_starts_the_timer(loop):
"""Test when hass starts, it starts the timer."""
hass = ha.HomeAssistant(loop=loop)
try:
with patch('homeassistant.core._async_create_timer') as mock_timer:
yield from hass.async_start()
assert hass.state == ha.CoreState.running
assert not hass._track_task
assert len(mock_timer.mock_calls) == 1
assert mock_timer.mock_calls[0][1][0] is hass
finally:
yield from hass.async_stop()
assert hass.state == ha.CoreState.not_running
@asyncio.coroutine
def test_start_taking_too_long(loop, caplog):
"""Test when async_start takes too long."""
hass = ha.HomeAssistant(loop=loop)
caplog.set_level(logging.WARNING)
try:
with patch('homeassistant.core.timeout',
side_effect=asyncio.TimeoutError), \
patch('homeassistant.core._async_create_timer') as mock_timer:
yield from hass.async_start()
assert hass.state == ha.CoreState.running
assert len(mock_timer.mock_calls) == 1
assert mock_timer.mock_calls[0][1][0] is hass
assert 'Something is blocking Home Assistant' in caplog.text
finally:
yield from hass.async_stop()
assert hass.state == ha.CoreState.not_running
@asyncio.coroutine
def test_track_task_functions(loop):
"""Test function to start/stop track task and initial state."""
hass = ha.HomeAssistant(loop=loop)
try:
assert hass._track_task
hass.async_stop_track_tasks()
assert not hass._track_task
hass.async_track_tasks()
assert hass._track_task
finally:
yield from hass.async_stop()
|
{
"content_hash": "6e5c3f0947b049abbe49b012026e7d3b",
"timestamp": "",
"source": "github",
"line_count": 955,
"max_line_length": 79,
"avg_line_length": 31.352879581151832,
"alnum_prop": 0.5930799545788524,
"repo_name": "ewandor/home-assistant",
"id": "09ddf721628790999039a9722fa75731f977f808",
"size": "29942",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/test_core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8860790"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12639"
}
],
"symlink_target": ""
}
|
"""
#####################################################################################
#
# Copyright (c) 2013 M.R.Z <zgd1348833@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#====================================================================================
#
#
#####################################################################################
"""
from __future__ import division
import time;
import pydoc;
import sys;
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage
from com.android.monkeyrunner.easy import EasyMonkeyDevice, By
TAG = "AutoOpenApps"
DEBUG = 1
testCount = 1000;
NOT_FOUND = -1
PAGE = 3
ONE_X = 60
ONE_Y = 153
TWO_X = 180
TWO_Y = 313
THREE_X = 300
THREE_Y = 463
FOUR_X = 420
FOUR_Y = 633
END_X = 450
END_Y = 650
UP_X = 120
UP_Y = 160
def checkParams():
dType = sys.argv[1]
print "%s" %(dType)
print "dType.length=%d" %(len(dType))
# Just show all characters of get from args
for i in range(0,len(dType)):
print "dType[%d]=%c" %(i, dType[i])
""" Modify for this just because sys.argv, which is get from shell,
which contain some special non-display character
"""
tmp = dType
if(NOT_FOUND != tmp.find("7060S")):
print "This is 7060S"
OPEN_CLOSE_X = 340
OPEN_CLOSE_Y = 260
elif(NOT_FOUND != tmp.find("7061")):
print "This is 7061"
OPEN_CLOSE_X = 335
OPEN_CLOSE_Y = 255
elif(NOT_FOUND != tmp.find("YourType")):
""" If you want add devices, just modify bellow
Add your device's position here.
"""
print "What you want to show"
else:
print "Why are you goto here"
checkParams()
imageA = MonkeyRunner.loadImageFromFile("./screen/screen.png")
device = MonkeyRunner.waitForConnection()
t = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()));
print "%s [DEBUG] This test is beginning..." %(t);
i=0;
for i in range(0,testCount):
j=0;
for j in range(0,PAGE):
y = ONE_Y;
b = 0;
s = 0;
for b in range(0,4):
x = ONE_X;
a = 1;
for a in range(1,5):
t = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()));
device.touch(x,y,MonkeyDevice.DOWN_AND_UP);
s = s+1;
print "x=%d y=%d" %(x,y);
print "%s Round %d Open the %d app in page %d" %(t,i+1,s,j+1);
MonkeyRunner.sleep(2);
imageB = device.takeSnapshot();
Files = "./test/time_" + t + "_count_" + str(i) + "_pages_"+ str(j) +"_app_"+ str(a+b)+".png"
imageB.writeToFile(Files,'png');
device.press('KEYCODE_HOME','DOWN_AND_UP');
MonkeyRunner.sleep(2);
device.press('KEYCODE_HOME','DOWN_AND_UP');
MonkeyRunner.sleep(2);
device.touch(238,813,MonkeyDevice.DOWN_AND_UP);
MonkeyRunner.sleep(2);
x = x+UP_X;
else:
y = y+UP_Y;
else:
device.drag((430,330),(30,330),0.1,1);
s = 0;
MonkeyRunner.sleep(2);
|
{
"content_hash": "4e419fdaf10c15b769815d2caad28af9",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 109,
"avg_line_length": 33.368852459016395,
"alnum_prop": 0.5824121837386391,
"repo_name": "GdZ/AutoTest",
"id": "9d95a9de80eab214cc73504a1af3203c32a70da5",
"size": "4071",
"binary": false,
"copies": "1",
"ref": "refs/heads/release",
"path": "AutoOpenApps/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4949"
},
{
"name": "Python",
"bytes": "39472"
},
{
"name": "Shell",
"bytes": "41175"
}
],
"symlink_target": ""
}
|
class LoopType(object):
def __init__(self):
self.loop = None
def pretest(self):
return self.loop == 0
def posttest(self):
return self.loop == 1
def endless(self):
return self.loop == 2
def set_pretest(self):
self.loop = 0
def set_posttest(self):
self.loop = 1
def set_endless(self):
self.loop = 2
class Node(object):
def __init__(self, name):
self.name = name
self.num = 0
self.looptype = LoopType()
self.interval = None
self.startloop = False
self.endloop = False
self.type = -1
self.latch = None
self.if_follow = None
self.loop_follow = None
self.switch_follow = None
self.loop_nodes = []
def copy_from(self, node):
self.num = node.num
self.looptype = node.looptype
self.interval = node.interval
self.startloop = node.startloop
self.endloop = node.endloop
self.type = node.type
self.latch = node.latch
self.if_follow = node.if_follow
self.loop_follow = node.loop_follow
self.switch_follow = node.switch_follow
self.loop_nodes = node.loop_nodes
def update_attribute_with(self, n_map):
self.latch = n_map.get(self.latch, self.latch)
self.if_follow = n_map.get(self.if_follow, self.if_follow)
self.loop_follow = n_map.get(self.loop_follow, self.loop_follow)
self.switch_follow = n_map.get(self.switch_follow, self.switch_follow)
self.loop_nodes = list(set(n_map.get(n, n) for n in self.loop_nodes))
def is_cond(self):
return self.type == 0
def set_cond(self):
self.type = 0
def is_switch(self):
return self.type == 1
def set_switch(self):
self.type = 1
def is_stmt(self):
return self.type == 2
def set_stmt(self):
self.type = 2
def is_return(self):
return self.type == 3
def set_return(self):
self.type = 3
def is_throw(self):
return self.type == 4
def set_throw(self):
self.type = 4
def set_loop_pretest(self):
self.looptype.set_pretest()
def set_loop_posttest(self):
self.looptype.set_posttest()
def set_loop_endless(self):
self.looptype.set_endless()
def get_head(self):
return self
def get_end(self):
return self
def set_loop_nodes(self, nodes):
self.loop_nodes = nodes
def set_start_loop(self, b=True):
self.startloop = b
def set_end_loop(self, b=True):
self.endloop = b
def set_if_follow(self, node):
self.if_follow = node
def get_if_follow(self):
return self.if_follow
def set_loop_follow(self, node):
self.loop_follow = node
def get_loop_follow(self):
return self.loop_follow
def set_switch_follow(self, node):
self.switch_follow = node
def get_switch_follow(self):
return self.switch_follow
def set_latch_node(self, node):
self.latch = node
def is_start_loop(self):
return self.startloop
def is_end_loop(self):
return self.endloop
def __repr__(self):
return str(self)
class Interval(Node):
def __init__(self, head):
super(Interval, self).__init__(head.name)
self.name = 'Interval-%s' % head.name
self.content = set([head])
self.end = None
self.head = head
head.interval = self
def __contains__(self, item):
# If the interval contains nodes, check if the item is one of them
if item in self.content:
return True
# If the interval contains intervals, we need to check them
return any(item in node for node in self.content
if isinstance(node, Interval))
def add_node(self, node):
if node in self.content:
return False
self.content.add(node)
node.interval = self
return True
def compute_end(self, graph):
for node in self.content:
for suc in graph.sucs(node):
if suc not in self.content:
self.end = node
def get_end(self):
return self.end.get_end()
def set_next(self, nxt):
self.head.set_next(nxt.get_head())
def get_next(self):
return self.head.get_next()
def set_loop_type(self, _type):
self.looptype = _type
self.get_head().set_loop_type(_type)
def set_startloop(self):
self.head.set_startloop()
def get_head(self):
return self.head.get_head()
def __iter__(self):
for item in self.content:
yield item
def __len__(self):
return len(self.content)
def __repr__(self):
return '%s(%s)' % (self.name, self.content)
|
{
"content_hash": "47d08afa10f3202c62b2703d17acf3de",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 78,
"avg_line_length": 24.33830845771144,
"alnum_prop": 0.5676614881439084,
"repo_name": "mrmans0n/sublime-text-3-config",
"id": "c7f1c8562783852f574c544c94d7d163f31b4983",
"size": "5672",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Packages/Androguard/androguard/decompiler/dad/node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "75293"
},
{
"name": "Python",
"bytes": "1613409"
},
{
"name": "Ruby",
"bytes": "1872"
},
{
"name": "Shell",
"bytes": "1570"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
class Clients(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
iban = models.CharField(max_length=34)
country = models.CharField(null=True, blank=True, max_length=10)
createdTime = models.DateTimeField(auto_now_add=True)
createdBy = models.ForeignKey(User, blank=True, null=True, on_delete=models.SET_NULL,
related_name='created_clients')
|
{
"content_hash": "5bd8723d96a74acf8b56e22b6ae93e70",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 43.76923076923077,
"alnum_prop": 0.7065026362038664,
"repo_name": "rexhepberlajolli/RHChallenge",
"id": "ada690b71674bd393510afdae8fc12f43635ce2a",
"size": "593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/user_administration/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8650"
},
{
"name": "Python",
"bytes": "22125"
}
],
"symlink_target": ""
}
|
import jedi
def test_path_issues():
"""
See pull request #684 for details.
"""
source = '''from datetime import '''
assert jedi.Script(source).completions()
|
{
"content_hash": "80ce39862c54bee41d2b3f037246ecfb",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 44,
"avg_line_length": 22.25,
"alnum_prop": 0.6235955056179775,
"repo_name": "NcLang/vimrc",
"id": "e5d8cd0d4edc5d084dfe06b55d28efb2b29b11af",
"size": "178",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sources_non_forked/YouCompleteMe/third_party/ycmd/third_party/JediHTTP/vendor/jedi/test/test_windows.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "568"
},
{
"name": "CSS",
"bytes": "6320"
},
{
"name": "CoffeeScript",
"bytes": "1402"
},
{
"name": "Erlang",
"bytes": "3232"
},
{
"name": "GCC Machine Description",
"bytes": "525"
},
{
"name": "Go",
"bytes": "2239"
},
{
"name": "HTML",
"bytes": "134"
},
{
"name": "JavaScript",
"bytes": "1064"
},
{
"name": "Makefile",
"bytes": "8657"
},
{
"name": "Perl",
"bytes": "2705"
},
{
"name": "Python",
"bytes": "704814"
},
{
"name": "Ruby",
"bytes": "33390"
},
{
"name": "Shell",
"bytes": "9370"
},
{
"name": "TeX",
"bytes": "6193"
},
{
"name": "VimL",
"bytes": "3170590"
},
{
"name": "XSLT",
"bytes": "4217"
}
],
"symlink_target": ""
}
|
from django import template
from django.utils.safestring import mark_safe
from django.template.defaultfilters import stringfilter
import markdown
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def md(text):
html = markdown.markdown(text, safe_mode='replace', output_format='html5')
return mark_safe(html)
|
{
"content_hash": "9f86aaa9c15e852685f0cf804f6df920",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 78,
"avg_line_length": 23.066666666666666,
"alnum_prop": 0.7774566473988439,
"repo_name": "jaseiler/researchcompendia",
"id": "4114ac87f57a2430a8c342f3ad2847526ac371b4",
"size": "346",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "companionpages/home/templatetags/home_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22922"
},
{
"name": "JavaScript",
"bytes": "8926"
},
{
"name": "Python",
"bytes": "193374"
},
{
"name": "Shell",
"bytes": "12214"
},
{
"name": "TeX",
"bytes": "2388"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function
import warnings
import numpy as np
from scipy.special import gammainccinv
from scipy.ndimage.filters import convolve
def _inv_nchi_cdf(N, K, alpha):
"""Inverse CDF for the noncentral chi distribution
See [1]_ p.3 section 2.3"""
return gammainccinv(N * K, 1 - alpha) / K
def piesno(data, N, alpha=0.01, l=100, itermax=100, eps=1e-5, return_mask=False):
"""
Probabilistic Identification and Estimation of Noise (PIESNO).
Parameters
-----------
data : ndarray
The magnitude signals to analyse. The last dimension must contain the
same realisation of the volume, such as dMRI or fMRI data.
N : int
The number of phase array coils of the MRI scanner.
If your scanner does a SENSE reconstruction, ALWAYS use N=1, as the noise
profile is always Rician.
If your scanner does a GRAPPA reconstruction, set N as the number
of phase array coils.
alpha : float
Probabilistic estimation threshold for the gamma function.
l : int
number of initial estimates for sigma to try.
itermax : int
Maximum number of iterations to execute if convergence
is not reached.
eps : float
Tolerance for the convergence criterion. Convergence is
reached if two subsequent estimates are smaller than eps.
return_mask : bool
If True, return a mask identyfing all the pure noise voxel
that were found.
Returns
--------
sigma : float
The estimated standard deviation of the gaussian noise.
mask : ndarray (optional)
A boolean mask indicating the voxels identified as pure noise.
Note
------
This function assumes two things : 1. The data has a noisy, non-masked
background and 2. The data is a repetition of the same measurements
along the last axis, i.e. dMRI or fMRI data, not structural data like T1/T2.
This function processes the data slice by slice, as originally designed in
the paper. Use it to get a slice by slice estimation of the noise, as in
spinal cord imaging for example.
References
------------
.. [1] Koay CG, Ozarslan E and Pierpaoli C.
"Probabilistic Identification and Estimation of Noise (PIESNO):
A self-consistent approach and its applications in MRI."
Journal of Magnetic Resonance 2009; 199: 94-103.
.. [2] Koay CG, Ozarslan E and Basser PJ.
"A signal transformational framework for breaking the noise floor
and its applications in MRI."
Journal of Magnetic Resonance 2009; 197: 108-119.
"""
# This method works on a 2D array with repetitions as the third dimension,
# so process the dataset slice by slice.
if data.ndim < 3:
e_s = "This function only works on datasets of at least 3 dimensions."
raise ValueError(e_s)
if data.ndim == 4:
sigma = np.zeros(data.shape[-2], dtype=np.float32)
mask_noise = np.zeros(data.shape[:-1], dtype=np.bool)
for idx in range(data.shape[-2]):
sigma[idx], mask_noise[..., idx] = _piesno_3D(data[..., idx, :], N,
alpha=alpha,
l=l,
itermax=itermax,
eps=eps,
return_mask=True)
else:
sigma, mask_noise = _piesno_3D(data, N,
alpha=alpha,
l=l,
itermax=itermax,
eps=eps,
return_mask=True)
if return_mask:
return sigma, mask_noise
return sigma
def _piesno_3D(data, N, alpha=0.01, l=100, itermax=100, eps=1e-5,
return_mask=False):
"""
Probabilistic Identification and Estimation of Noise (PIESNO).
This is the slice by slice version for working on a 4D array.
Parameters
-----------
data : ndarray
The magnitude signals to analyse. The last dimension must contain the
same realisation of the volume, such as dMRI or fMRI data.
N : int
The number of phase array coils of the MRI scanner.
alpha : float (optional)
Probabilistic estimation threshold for the gamma function.
Default: 0.01.
l : int (optional)
number of initial estimates for sigma to try. Default: 100.
itermax : int (optional)
Maximum number of iterations to execute if convergence
is not reached. Default: 100.
eps : float (optional)
Tolerance for the convergence criterion. Convergence is
reached if two subsequent estimates are smaller than eps.
Default: 1e-5.
return_mask : bool (optional)
If True, return a mask identyfing all the pure noise voxel
that were found. Default: False.
Returns
--------
sigma : float
The estimated standard deviation of the gaussian noise.
mask : ndarray
A boolean mask indicating the voxels identified as pure noise.
Notes
------
This function assumes two things : 1. The data has a noisy, non-masked
background and 2. The data is a repetition of the same measurements
along the last axis, i.e. dMRI or fMRI data, not structural data like T1/T2.
References
------------
.. [1] Koay CG, Ozarslan E and Pierpaoli C.
"Probabilistic Identification and Estimation of Noise (PIESNO):
A self-consistent approach and its applications in MRI."
Journal of Magnetic Resonance 2009; 199: 94-103.
.. [2] Koay CG, Ozarslan E and Basser PJ.
"A signal transformational framework for breaking the noise floor
and its applications in MRI."
Journal of Magnetic Resonance 2009; 197: 108-119.
"""
# Get optimal quantile for N if available, else use the median.
opt_quantile = {1: 0.79681213002002,
2: 0.7306303027491917,
4: 0.6721952960782169,
8: 0.6254030432343569,
16: 0.5900487123737876,
32: 0.5641772300866416,
64: 0.5455611840489607,
128: 0.5322811923303339}
if N in opt_quantile:
q = opt_quantile[N]
else:
q = 0.5
# Initial estimation of sigma
denom = np.sqrt(2 * _inv_nchi_cdf(N, 1, q))
m = np.percentile(data, q * 100) / denom
phi = np.arange(1, l + 1) * m / l
K = data.shape[-1]
sum_m2 = np.sum(data**2, axis=-1, dtype=np.float32)
sigma = np.zeros(phi.shape, dtype=phi.dtype)
mask = np.zeros(phi.shape + data.shape[:-1])
lambda_minus = _inv_nchi_cdf(N, K, alpha/2)
lambda_plus = _inv_nchi_cdf(N, K, 1 - alpha/2)
pos = 0
max_length_omega = 0
for num, sig in enumerate(phi):
sig_prev = 0
omega_size = 1
idx = np.zeros(sum_m2.shape, dtype=np.bool)
for n in range(itermax):
if np.abs(sig - sig_prev) < eps:
break
s = sum_m2 / (2 * K * sig**2)
idx = np.logical_and(lambda_minus <= s, s <= lambda_plus)
omega = data[idx, :]
# If no point meets the criterion, exit
if omega.size == 0:
omega_size = 0
break
sig_prev = sig
# Numpy percentile must range in 0 to 100, hence q*100
sig = np.percentile(omega, q * 100) / denom
omega_size = omega.size / K
# Remember the biggest omega array as giving the optimal
# sigma amongst all initial estimates from phi
if omega_size > max_length_omega:
pos, max_length_omega = num, omega_size
sigma[num] = sig
mask[num] = idx
if return_mask:
return sigma[pos], mask[pos]
return sigma[pos]
def estimate_sigma(arr, disable_background_masking=False):
"""Standard deviation estimation from local patches
Parameters
----------
arr : 3D or 4D ndarray
The array to be estimated
disable_background_masking : bool, default False
If True, uses all voxels for the estimation, otherwise, only non-zeros
voxels are used. Useful if the background is masked by the scanner.
Returns
-------
sigma : ndarray
standard deviation of the noise, one estimation per volume.
"""
k = np.zeros((3, 3, 3), dtype=np.int8)
k[0, 1, 1] = 1
k[2, 1, 1] = 1
k[1, 0, 1] = 1
k[1, 2, 1] = 1
k[1, 1, 0] = 1
k[1, 1, 2] = 1
if arr.ndim == 3:
sigma = np.zeros(1, dtype=np.float32)
arr = arr[..., None]
elif arr.ndim == 4:
sigma = np.zeros(arr.shape[-1], dtype=np.float32)
else:
raise ValueError("Array shape is not supported!", arr.shape)
if disable_background_masking:
mask = arr[..., 0].astype(np.bool)
else:
mask = np.ones_like(arr[..., 0], dtype=np.bool)
conv_out = np.zeros(arr[..., 0].shape, dtype=np.float64)
for i in range(sigma.size):
convolve(arr[..., i], k, output=conv_out)
mean_block = np.sqrt(6/7) * (arr[..., i] - 1/6 * conv_out)
sigma[i] = np.sqrt(np.mean(mean_block[mask]**2))
return sigma
|
{
"content_hash": "75e6315e558143f0267e4881c7a12e49",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 81,
"avg_line_length": 31.737373737373737,
"alnum_prop": 0.5838107362614047,
"repo_name": "Messaoud-Boudjada/dipy",
"id": "17d80560c0f853e46697f4474cde918d3f873437",
"size": "9426",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dipy/denoise/noise_estimate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2694"
},
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Python",
"bytes": "2138637"
}
],
"symlink_target": ""
}
|
"""
Quick and dirty script to generate test case inputs.
"""
from __future__ import print_function
from os.path import (
dirname,
join,
)
from pandas_datareader.data import DataReader
here = join(dirname(__file__))
def main():
symbols = ['AAPL', 'MSFT', 'BRK-A']
# Specifically chosen to include the AAPL split on June 9, 2014.
for symbol in symbols:
data = DataReader(
symbol,
'yahoo',
start='2014-03-01',
end='2014-09-01',
)
data.rename(
columns={
'Open': 'open',
'High': 'high',
'Low': 'low',
'Close': 'close',
'Volume': 'volume',
},
inplace=True,
)
del data['Adj Close']
dest = join(here, symbol + '.csv')
print("Writing %s -> %s" % (symbol, dest))
data.to_csv(dest, index_label='day')
if __name__ == '__main__':
main()
|
{
"content_hash": "0c4aa2dbfd8ae87041c710250957fe90",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 68,
"avg_line_length": 23.926829268292682,
"alnum_prop": 0.48216106014271154,
"repo_name": "magne-max/zipline-ja",
"id": "42021d40aca0b007d3b581ae55c66f0a293c8fa3",
"size": "981",
"binary": false,
"copies": "2",
"ref": "refs/heads/japan",
"path": "tests/resources/pipeline_inputs/generate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7251"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Jupyter Notebook",
"bytes": "166848"
},
{
"name": "PowerShell",
"bytes": "3260"
},
{
"name": "Python",
"bytes": "2949355"
},
{
"name": "Shell",
"bytes": "7508"
}
],
"symlink_target": ""
}
|
"""
Edget detection routines
"""
import warnings
import numpy as np
from scipy import ndimage, signal
try:
# Protect this import as it is compiled code
from nipy.algorithms.statistics import quantile
except ImportError, e:
warnings.warn('Could not import fast quantile function: %s' % e)
quantile = None
################################################################################
# Edge detection
def _fast_abs_percentile(map, percentile=80):
""" A fast version of the percentile of the absolute value.
"""
if hasattr(map, 'mask'):
map = np.asarray(map[np.logical_not(map.mask)])
map = np.abs(map)
map = map.ravel()
if quantile is not None:
return quantile(map, .01*percentile)
map.sort()
nb = map.size
return map[.01*percentile*nb]
def _orientation_kernel(t):
""" structure elements for calculating the value of neighbors in several
directions
"""
sin = np.sin
pi = np.pi
t = pi*t
arr = np.array([[sin(t), sin(t+.5*pi), sin(t+pi) ],
[sin(t+1.5*pi), 0, sin(t+1.5*pi)],
[sin(t+pi), sin(t+.5*pi), sin(t) ]])
return np.round(.5*((1+arr))**2).astype(np.bool)
def _edge_detect(image, high_threshold=.75, low_threshold=.4):
""" Edge detection for 2D images based on Canny filtering.
Parameters
==========
image: 2D array
The image on which edge detection is applied
high_threshold: float, optional
The quantile defining the upper threshold of the hysteries
thresholding: decrease this to keep more edges
low_threshold: float, optional
The quantile defining the lower threshold of the hysteries
thresholding: decrease this to extract wider edges
Returns
========
grad_mag: 2D array of floats
The magnitude of the gradient
edge_mask: 2D array of booleans
A mask of where have edges been detected
Notes
======
This function is based on a Canny filter, however it has been
taylored to visualization purposes on brain images: don't use it
in the general case.
It computes the norm of the gradient, extracts the ridge by
keeping only local maximum in each direction, and performs
hysteresis filtering to keep only edges with high gradient
magnitude.
"""
# This code is loosely based on code by Stefan van der Waalt
# Convert to floats to avoid overflows
np_err = np.seterr(all='ignore')
img = signal.wiener(image.astype(np.float))
np.seterr(**np_err)
# Where the noise variance is 0, Wiener can create nans
img[np.isnan(img)] = image[np.isnan(img)]
img /= img.max()
grad_x = ndimage.sobel(img, mode='constant', axis=0)
grad_y = ndimage.sobel(img, mode='constant', axis=1)
grad_mag = np.sqrt(grad_x**2 + grad_y**2)
grad_angle = np.arctan2(grad_y, grad_x)
# Scale the angles in the range [0, 2]
grad_angle = (grad_angle + np.pi) / np.pi
# Non-maximal suppression: an edge pixel is only good if its magnitude is
# greater than its neighbors normal to the edge direction.
thinner = np.zeros(grad_mag.shape, dtype=np.bool)
for angle in np.arange(0, 2, .25):
thinner = thinner | (
(grad_mag > .85*ndimage.maximum_filter(grad_mag,
footprint=_orientation_kernel(angle)))
& (((grad_angle - angle) % 2) < .75)
)
# Remove the edges next to the side of the image: they are not reliable
thinner[0] = 0
thinner[-1] = 0
thinner[:, 0] = 0
thinner[:, -1] = 0
thinned_grad = thinner * grad_mag
# Hysteresis thresholding: find seeds above a high threshold, then
# expand out until we go below the low threshold
grad_values = thinned_grad[thinner]
high = thinned_grad > _fast_abs_percentile(grad_values, 100*high_threshold)
low = thinned_grad > _fast_abs_percentile(grad_values, 100*low_threshold)
edge_mask = ndimage.binary_dilation(high, structure=np.ones((3, 3)),
iterations=-1, mask=low)
return grad_mag, edge_mask
def _edge_map(image):
""" Return a maps of edges suitable for visualization.
Parameters
==========
image: 2D array
The image that the edges are extracted from.
Returns
========
edge_mask: 2D masked array
A mask of the edge as a masked array with parts without
edges masked and the large extents detected with lower
coefficients.
"""
edge_mask = _edge_detect(image)[-1]
edge_mask = edge_mask.astype(np.float)
edge_mask = -np.sqrt(ndimage.distance_transform_cdt(edge_mask))
edge_mask[edge_mask != 0] -= -.05+edge_mask.min()
edge_mask = np.ma.masked_less(edge_mask, .01)
return edge_mask
|
{
"content_hash": "ba8952ecc167944a7c2a9d63ea4d02fe",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 80,
"avg_line_length": 35.687943262411345,
"alnum_prop": 0.5945945945945946,
"repo_name": "bthirion/nipy",
"id": "5530b2c22803bc5aa53c65f3f360ea49ef75f3e1",
"size": "5146",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipy/labs/viz_tools/edge_detect.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6381240"
},
{
"name": "C++",
"bytes": "6189"
},
{
"name": "CSS",
"bytes": "8170"
},
{
"name": "M",
"bytes": "560"
},
{
"name": "Matlab",
"bytes": "4948"
},
{
"name": "Python",
"bytes": "3068962"
},
{
"name": "TeX",
"bytes": "238"
}
],
"symlink_target": ""
}
|
"""Tests for grr.lib.email_alerts."""
from grr.lib import config_lib
from grr.lib import email_alerts
from grr.lib import flags
from grr.lib import test_lib
class SendEmailTests(test_lib.GRRBaseTest):
def testSplitEmailsAndAppendEmailDomain(self):
self.assertEqual(email_alerts.SplitEmailsAndAppendEmailDomain(""), [])
def testSendEmail(self):
testdomain = "test.com"
config_lib.CONFIG.Set("Email.default_domain", testdomain)
smtp_conn = self.mock_smtp.return_value
# Single fully qualified address
to_address = "testto@example.com"
from_address = "me@example.com"
subject = "test"
message = ""
email_alerts.SendEmail(to_address, from_address, subject, message)
c_from, c_to, msg = smtp_conn.sendmail.call_args[0]
self.assertEqual(from_address, c_from)
self.assertEqual([to_address], c_to)
self.assertFalse("CC:" in msg)
# Multiple unqualified to addresses, one cc
to_address = "testto,abc,def"
to_address_expected = [
x + testdomain for x in ["testto@", "abc@", "def@"]]
cc_address = "testcc"
email_alerts.SendEmail(to_address, from_address, subject, message,
addresses=cc_address)
c_from, c_to, message = smtp_conn.sendmail.call_args[0]
self.assertEqual(from_address, c_from)
self.assertEqual(to_address_expected, c_to)
self.assertTrue("CC: testcc@%s" % testdomain in message)
# Multiple unqualified to addresses, two cc, message_id set
to_address_expected = [
x + testdomain for x in ["testto@", "abc@", "def@"]]
cc_address = "testcc,testcc2"
email_msg_id = "123123"
email_alerts.SendEmail(to_address, from_address, subject, message,
addresses=cc_address, message_id=email_msg_id)
c_from, c_to, message = smtp_conn.sendmail.call_args[0]
self.assertEqual(from_address, c_from)
self.assertEqual(to_address_expected, c_to)
self.assertTrue("CC: testcc@%s,testcc2@%s" % (testdomain, testdomain) in
message)
self.assertTrue("Message-ID: %s" % email_msg_id)
# Multiple unqualified to addresses, two cc, no default domain
to_address_expected = ["testto", "abc", "def"]
config_lib.CONFIG.Set("Email.default_domain", None)
email_alerts.SendEmail(to_address, from_address, subject, message,
cc_addresses=cc_address)
c_from, c_to, message = smtp_conn.sendmail.call_args[0]
self.assertEqual(from_address, c_from)
self.assertEqual(to_address_expected, c_to)
self.assertTrue("CC: testcc@%s,testcc2@%s" % (testdomain, testdomain) in
message)
def main(argv):
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
{
"content_hash": "ddf2b4bf75c95c5acb7099e4f8fed668",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 76,
"avg_line_length": 37.189189189189186,
"alnum_prop": 0.6613372093023255,
"repo_name": "defaultnamehere/grr",
"id": "bd53a472c35fbb5f3c09502c15cd21f34a165261",
"size": "2774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/email_alerts_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36345"
},
{
"name": "JavaScript",
"bytes": "831633"
},
{
"name": "Makefile",
"bytes": "5939"
},
{
"name": "Python",
"bytes": "4541648"
},
{
"name": "Shell",
"bytes": "31077"
}
],
"symlink_target": ""
}
|
"""
Collect the elasticsearch stats for the local node
#### Dependencies
* urlib2
"""
import urllib2
import re
try:
import json
json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
import diamond.collector
RE_LOGSTASH_INDEX = re.compile('^(.*)-\d\d\d\d\.\d\d\.\d\d$')
class ElasticSearchCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(ElasticSearchCollector,
self).get_default_config_help()
config_help.update({
'host': "",
'port': "",
'stats': "Available stats: \n"
+ " - jvm (JVM information) \n"
+ " - thread_pool (Thread pool information) \n"
+ " - indices (Individual index stats)\n",
'logstash_mode': "If 'indices' stats are gathered, remove "
+ "the YYYY.MM.DD suffix from the index name "
+ "(e.g. logstash-adm-syslog-2014.01.03) and use that "
+ "as a bucket for all 'day' index stats.",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ElasticSearchCollector, self).get_default_config()
config.update({
'host': '127.0.0.1',
'port': 9200,
'path': 'elasticsearch',
'stats': ['jvm', 'thread_pool', 'indices'],
'logstash_mode': False,
})
return config
def _get(self, path):
url = 'http://%s:%i/%s' % (
self.config['host'], int(self.config['port']), path)
try:
response = urllib2.urlopen(url)
except Exception, err:
self.log.error("%s: %s", url, err)
return False
try:
return json.load(response)
except (TypeError, ValueError):
self.log.error("Unable to parse response from elasticsearch as a"
+ " json object")
return False
def _copy_one_level(self, metrics, prefix, data, filter=lambda key: True):
for key, value in data.iteritems():
if filter(key):
metric_path = '%s.%s' % (prefix, key)
self._set_or_sum_metric(metrics, metric_path, value)
def _copy_two_level(self, metrics, prefix, data, filter=lambda key: True):
for key1, d1 in data.iteritems():
self._copy_one_level(metrics, '%s.%s' % (prefix, key1), d1, filter)
def _index_metrics(self, metrics, prefix, index):
if self.config['logstash_mode']:
"""Remove the YYYY.MM.DD bit from logstash indices.
This way we keep using the same metric naming and not polute
our metrics system (e.g. Graphite) with new metrics every day."""
m = RE_LOGSTASH_INDEX.match(prefix)
if m:
prefix = m.group(1)
# keep a telly of the number of indexes
self._set_or_sum_metric(metrics,
'%s.indexes_in_group' % prefix, 1)
self._add_metric(metrics, '%s.docs.count' % prefix, index,
['docs', 'count'])
self._add_metric(metrics, '%s.docs.deleted' % prefix, index,
['docs', 'deleted'])
self._add_metric(metrics, '%s.datastore.size' % prefix, index,
['store', 'size_in_bytes'])
# publish all 'total' and 'time_in_millis' stats
self._copy_two_level(
metrics, prefix, index,
lambda key: key.endswith('total') or key.endswith('time_in_millis'))
def _add_metric(self, metrics, metric_path, data, data_path):
"""If the path specified by data_path (a list) exists in data,
add to metrics. Use when the data path may not be present"""
current_item = data
for path_element in data_path:
current_item = current_item.get(path_element)
if current_item is None:
return
self._set_or_sum_metric(metrics, metric_path, current_item)
def _set_or_sum_metric(self, metrics, metric_path, value):
"""If we already have a datapoint for this metric, lets add
the value. This is used when the logstash mode is enabled."""
if metric_path in metrics:
metrics[metric_path] += value
else:
metrics[metric_path] = value
def collect(self):
if json is None:
self.log.error('Unable to import json')
return {}
result = self._get('_nodes/_local/stats?all=true')
if not result:
return
metrics = {}
node = result['nodes'].keys()[0]
data = result['nodes'][node]
#
# http connections to ES
metrics['http.current'] = data['http']['current_open']
#
# indices
indices = data['indices']
metrics['indices.docs.count'] = indices['docs']['count']
metrics['indices.docs.deleted'] = indices['docs']['deleted']
metrics['indices.datastore.size'] = indices['store']['size_in_bytes']
transport = data['transport']
metrics['transport.rx.count'] = transport['rx_count']
metrics['transport.rx.size'] = transport['rx_size_in_bytes']
metrics['transport.tx.count'] = transport['tx_count']
metrics['transport.tx.size'] = transport['tx_size_in_bytes']
# elasticsearch < 0.90RC2
if 'cache' in indices:
cache = indices['cache']
self._add_metric(metrics, 'cache.bloom.size', cache,
['bloom_size_in_bytes'])
self._add_metric(metrics, 'cache.field.evictions', cache,
['field_evictions'])
self._add_metric(metrics, 'cache.field.size', cache,
['field_size_in_bytes'])
metrics['cache.filter.count'] = cache['filter_count']
metrics['cache.filter.evictions'] = cache['filter_evictions']
metrics['cache.filter.size'] = cache['filter_size_in_bytes']
self._add_metric(metrics, 'cache.id.size', cache,
['id_cache_size_in_bytes'])
# elasticsearch >= 0.90RC2
if 'filter_cache' in indices:
cache = indices['filter_cache']
metrics['cache.filter.evictions'] = cache['evictions']
metrics['cache.filter.size'] = cache['memory_size_in_bytes']
self._add_metric(metrics, 'cache.filter.count', cache, ['count'])
# elasticsearch >= 0.90RC2
if 'id_cache' in indices:
cache = indices['id_cache']
self._add_metric(metrics, 'cache.id.size', cache,
['memory_size_in_bytes'])
# elasticsearch >= 0.90
if 'fielddata' in indices:
fielddata = indices['fielddata']
self._add_metric(metrics, 'fielddata.size', fielddata,
['memory_size_in_bytes'])
self._add_metric(metrics, 'fielddata.evictions', fielddata,
['evictions'])
#
# process mem/cpu (may not be present, depending on access restrictions)
self._add_metric(metrics, 'process.cpu.percent', data,
['process', 'cpu', 'percent'])
self._add_metric(metrics, 'process.mem.resident', data,
['process', 'mem', 'resident_in_bytes'])
self._add_metric(metrics, 'process.mem.share', data,
['process', 'mem', 'share_in_bytes'])
self._add_metric(metrics, 'process.mem.virtual', data,
['process', 'mem', 'total_virtual_in_bytes'])
#
# filesystem (may not be present, depending on access restrictions)
if 'fs' in data and 'data' in data['fs'] and data['fs']['data']:
fs_data = data['fs']['data'][0]
self._add_metric(metrics, 'disk.reads.count', fs_data,
['disk_reads'])
self._add_metric(metrics, 'disk.reads.size', fs_data,
['disk_read_size_in_bytes'])
self._add_metric(metrics, 'disk.writes.count', fs_data,
['disk_writes'])
self._add_metric(metrics, 'disk.writes.size', fs_data,
['disk_write_size_in_bytes'])
#
# jvm
if 'jvm' in self.config['stats']:
jvm = data['jvm']
mem = jvm['mem']
for k in ('heap_used', 'heap_committed', 'non_heap_used',
'non_heap_committed'):
metrics['jvm.mem.%s' % k] = mem['%s_in_bytes' % k]
for pool, d in mem['pools'].iteritems():
pool = pool.replace(' ', '_')
metrics['jvm.mem.pools.%s.used' % pool] = d['used_in_bytes']
metrics['jvm.mem.pools.%s.max' % pool] = d['max_in_bytes']
metrics['jvm.threads.count'] = jvm['threads']['count']
gc = jvm['gc']
collection_count = 0
collection_time_in_millis = 0
for collector, d in gc['collectors'].iteritems():
metrics['jvm.gc.collection.%s.count' % collector] = d[
'collection_count']
collection_count += d['collection_count']
metrics['jvm.gc.collection.%s.time' % collector] = d[
'collection_time_in_millis']
collection_time_in_millis += d['collection_time_in_millis']
# calculate the totals, as they're absent in elasticsearch > 0.90.10
if 'collection_count' in gc:
metrics['jvm.gc.collection.count'] = gc['collection_count']
else:
metrics['jvm.gc.collection.count'] = collection_count
k = 'collection_time_in_millis'
if k in gc:
metrics['jvm.gc.collection.time'] = gc[k]
else:
metrics['jvm.gc.collection.time'] = collection_time_in_millis
#
# thread_pool
if 'thread_pool' in self.config['stats']:
self._copy_two_level(metrics, 'thread_pool', data['thread_pool'])
#
# network
self._copy_two_level(metrics, 'network', data['network'])
if 'indices' in self.config['stats']:
#
# individual index stats
result = self._get('_stats?clear=true&docs=true&store=true&'
+ 'indexing=true&get=true&search=true')
if not result:
return
_all = result['_all']
self._index_metrics(metrics, 'indices._all', _all['primaries'])
if 'indices' in _all:
indices = _all['indices']
elif 'indices' in result: # elasticsearch >= 0.90RC2
indices = result['indices']
else:
return
for name, index in indices.iteritems():
self._index_metrics(metrics, 'indices.%s' % name,
index['primaries'])
for key in metrics:
self.publish(key, metrics[key])
|
{
"content_hash": "bb8322ca9edde056155779ef84277b5f",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 80,
"avg_line_length": 38.855670103092784,
"alnum_prop": 0.52383479260635,
"repo_name": "metamx/Diamond",
"id": "536957faaf0a2ba874a94bf88f3c45807dc5a7d9",
"size": "11323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/collectors/elasticsearch/elasticsearch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "D",
"bytes": "1074"
},
{
"name": "Python",
"bytes": "1074097"
},
{
"name": "Ruby",
"bytes": "230"
},
{
"name": "Shell",
"bytes": "4650"
}
],
"symlink_target": ""
}
|
"""Tests for GCI logic for profiles.
"""
import unittest
from soc.modules.gci.logic import profile as profile_logic
from soc.modules.gci.models.organization import GCIOrganization
from soc.modules.gci.models.profile import GCIProfile
from soc.modules.seeder.logic.seeder import logic as seeder_logic
class ProfileTest(unittest.TestCase):
"""Tests the logic for GCI profiles.
"""
def setUp(self):
self.foo_org = seeder_logic.seed(GCIOrganization)
self.bar_org = seeder_logic.seed(GCIOrganization)
def testQueryAllMentorKeysForOrg(self):
"""Tests if a list of keys of all the mentors for an organization is
returned.
"""
#Since there are no mentors assigned to foo_org or bar_org, an empty list
#should be returned.
expected_keys = []
actual_keys = profile_logic.queryAllMentorsKeysForOrg(self.foo_org)
self.assertEqual(expected_keys, actual_keys)
actual_keys = profile_logic.queryAllMentorsKeysForOrg(self.bar_org)
self.assertEqual(expected_keys,actual_keys)
mentor_properties = {'mentor_for': [self.foo_org.key()], 'is_mentor': True}
foo_mentors = seeder_logic.seedn(GCIProfile, 5, mentor_properties)
org_admin_properties = {'org_admin_for': [self.foo_org.key()],
'mentor_for': [self.foo_org.key()],
'is_mentor': True, 'is_org_admin': True}
foo_org_admin = seeder_logic.seed(GCIProfile, org_admin_properties)
mentor_properties['mentor_for'] = [self.bar_org.key()]
bar_mentors = seeder_logic.seedn(GCIProfile, 5, mentor_properties)
org_admin_properties['org_admin_for'] = [self.bar_org.key()]
org_admin_properties['mentor_for'] = [self.bar_org.key()]
bar_org_admin = seeder_logic.seed(GCIProfile, org_admin_properties)
expected = [mentor.key() for mentor in foo_mentors] + [foo_org_admin.key()]
actual = profile_logic.queryAllMentorsKeysForOrg(self.foo_org)
self.assertEqual(expected, actual)
expected = [mentor.key() for mentor in bar_mentors] + [bar_org_admin.key()]
actual = profile_logic.queryAllMentorsKeysForOrg(self.bar_org)
self.assertEqual(expected, actual)
|
{
"content_hash": "89d4cc98013b7d63a69060f2d7b94fc2",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 38.5,
"alnum_prop": 0.699443413729128,
"repo_name": "adviti/melange",
"id": "067f4e92f98dbb469b0f6f927eae52f8b05c4e79",
"size": "2766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/app/soc/modules/gci/logic/test_profile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import gettext_lazy
from uuid import uuid4
import json
from memoized import memoized
from corehq import toggles
class DialerSettings(models.Model):
domain = models.CharField(max_length=128, unique=True)
aws_instance_id = models.CharField(max_length=255)
is_enabled = models.BooleanField(default=False)
dialer_page_header = models.CharField(max_length=255)
dialer_page_subheader = models.CharField(max_length=255)
class GaenOtpServerSettings(models.Model):
SERVER_OPTIONS = (('NEARFORM', gettext_lazy('NearForm OTP Server')),
('APHL', gettext_lazy('APHL Exposure Notifications')),)
domain = models.CharField(max_length=128, unique=True)
is_enabled = models.BooleanField(default=False)
server_type = models.CharField(max_length=255, default='NEARFORM', choices=SERVER_OPTIONS)
server_url = models.CharField(max_length=255)
auth_token = models.CharField(max_length=255)
def get_property_map(self):
property_map = {
'test_date': 'testDate',
'test_type': 'testType',
}
if self.server_type == "NEARFORM":
property_map['phone_number'] = 'mobile'
property_map['onset_date'] = 'onsetDate'
elif self.server_type == "APHL":
property_map['phone_number'] = 'phone'
property_map['onset_date'] = 'symptomDate'
property_map['tz_offset'] = 'tzOffset'
return property_map
def get_post_params(self):
if self.server_type == "NEARFORM":
return {'jobId': str(uuid4()), }
return {}
def change_post_data_type(self, post_data):
if self.server_type == "APHL":
return json.dumps(post_data)
return post_data
def get_otp_request_headers(self):
headers = {}
if self.server_type == "NEARFORM":
headers = {"Authorization": "Bearer %s" % self.auth_token}
elif self.server_type == "APHL":
headers = {"x-api-key": "%s" % self.auth_token,
"content-type": "application/json",
"accept": "application/json"}
return headers
class HmacCalloutSettings(models.Model):
domain = models.CharField(max_length=128)
destination_url = models.CharField(max_length=255)
is_enabled = models.BooleanField(default=False)
api_key = models.CharField(max_length=255)
api_secret = models.CharField(max_length=255)
class Meta(object):
unique_together = [
# HACK work around unique=True implies db_index=True
# https://code.djangoproject.com/ticket/24082
# Avoid extra varchar_pattern_ops index
# since we do not do LIKE queries on these
# https://stackoverflow.com/a/50926644/10840
("domain",),
]
class SimprintsIntegration(models.Model):
domain = models.CharField(max_length=128, unique=True)
is_enabled = models.BooleanField(default=False)
project_id = models.CharField(max_length=255)
user_id = models.CharField(max_length=255)
module_id = models.CharField(max_length=255)
class ApplicationIntegrationMixin(object):
"""
Contain all integration options in one place for Application object.
assumes access to self.domain from Application
"""
@property
@memoized
def is_biometric_enabled(self):
existing, _ = SimprintsIntegration.objects.get_or_create(
domain=self.domain,
)
return (existing.is_enabled
and toggles.BIOMETRIC_INTEGRATION.enabled(self.domain))
@property
@memoized
def biometric_context(self):
config = SimprintsIntegration.objects.get(domain=self.domain)
return {
'projectId': config.project_id,
'userId': config.user_id,
'moduleId': config.module_id,
'packageName': 'org.commcare.dalvik',
}
|
{
"content_hash": "e6266c158a5ccda93be6651251994a4c",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 94,
"avg_line_length": 34.6695652173913,
"alnum_prop": 0.6328066215199398,
"repo_name": "dimagi/commcare-hq",
"id": "b3ab6957dcb0d1d74e4305ec820581264dcb4a74",
"size": "3987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/integration/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
"""
Allows utilizing telegram webhooks.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/telegram_bot.webhooks/
"""
import asyncio
import datetime as dt
from ipaddress import ip_network
import logging
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.util import get_real_ip
from homeassistant.components.telegram_bot import (
CONF_ALLOWED_CHAT_IDS, BaseTelegramBotEntity, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_API_KEY, EVENT_HOMEASSISTANT_STOP, HTTP_BAD_REQUEST,
HTTP_UNAUTHORIZED, CONF_URL)
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['http']
_LOGGER = logging.getLogger(__name__)
TELEGRAM_HANDLER_URL = '/api/telegram_webhooks'
REMOVE_HANDLER_URL = ''
CONF_TRUSTED_NETWORKS = 'trusted_networks'
DEFAULT_TRUSTED_NETWORKS = [
ip_network('149.154.167.197/32'),
ip_network('149.154.167.198/31'),
ip_network('149.154.167.200/29'),
ip_network('149.154.167.208/28'),
ip_network('149.154.167.224/29'),
ip_network('149.154.167.232/31')
]
# pylint: disable=no-value-for-parameter
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_URL): vol.Url(),
vol.Optional(CONF_TRUSTED_NETWORKS, default=DEFAULT_TRUSTED_NETWORKS):
vol.All(cv.ensure_list, [ip_network])
})
@asyncio.coroutine
def async_setup_platform(hass, config):
"""Set up the Telegram webhooks platform."""
import telegram
bot = telegram.Bot(config[CONF_API_KEY])
current_status = yield from hass.async_add_job(bot.getWebhookInfo)
base_url = config.get(CONF_URL, hass.config.api.base_url)
# Some logging of Bot current status:
last_error_date = getattr(current_status, 'last_error_date', None)
if (last_error_date is not None) and (isinstance(last_error_date, int)):
last_error_date = dt.datetime.fromtimestamp(last_error_date)
_LOGGER.info("telegram webhook last_error_date: %s. Status: %s",
last_error_date, current_status)
else:
_LOGGER.debug("telegram webhook Status: %s", current_status)
handler_url = "{0}{1}".format(base_url, TELEGRAM_HANDLER_URL)
if not handler_url.startswith('https'):
_LOGGER.error("Invalid telegram webhook %s must be https", handler_url)
return False
def _try_to_set_webhook():
retry_num = 0
while retry_num < 3:
try:
return bot.setWebhook(handler_url, timeout=5)
except telegram.error.TimedOut:
retry_num += 1
_LOGGER.warning("Timeout trying to set webhook (retry #%d)",
retry_num)
if current_status and current_status['url'] != handler_url:
result = yield from hass.async_add_job(_try_to_set_webhook)
if result:
_LOGGER.info("Set new telegram webhook %s", handler_url)
else:
_LOGGER.error("Set telegram webhook failed %s", handler_url)
return False
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP,
lambda event: bot.setWebhook(REMOVE_HANDLER_URL))
hass.http.register_view(BotPushReceiver(
hass, config[CONF_ALLOWED_CHAT_IDS], config[CONF_TRUSTED_NETWORKS]))
return True
class BotPushReceiver(HomeAssistantView, BaseTelegramBotEntity):
"""Handle pushes from Telegram."""
requires_auth = False
url = TELEGRAM_HANDLER_URL
name = 'telegram_webhooks'
def __init__(self, hass, allowed_chat_ids, trusted_networks):
"""Initialize the class."""
BaseTelegramBotEntity.__init__(self, hass, allowed_chat_ids)
self.trusted_networks = trusted_networks
@asyncio.coroutine
def post(self, request):
"""Accept the POST from telegram."""
real_ip = get_real_ip(request)
if not any(real_ip in net for net in self.trusted_networks):
_LOGGER.warning("Access denied from %s", real_ip)
return self.json_message('Access denied', HTTP_UNAUTHORIZED)
try:
data = yield from request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
if not self.process_message(data):
return self.json_message('Invalid message', HTTP_BAD_REQUEST)
return self.json({})
|
{
"content_hash": "08f8cb7faea6627ab794e07e905b2be6",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 35.208,
"alnum_prop": 0.6659850034083163,
"repo_name": "stefan-jonasson/home-assistant",
"id": "055f68884a6f3eccbf6c3f7e6798c5b19bea5be9",
"size": "4401",
"binary": false,
"copies": "8",
"ref": "refs/heads/dev",
"path": "homeassistant/components/telegram_bot/webhooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4056"
},
{
"name": "Python",
"bytes": "8360711"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12658"
}
],
"symlink_target": ""
}
|
from lxml import html
import requests
def text_tostring(element):
text_string = html.tostring(element, method="text", encoding="UTF-8")
return text_string.decode('unicode_escape').strip()
class Page(object):
url = None
text = None
tree = None
def __init__(self, url, html_text=None):
self.url = url
if html_text:
self.populate(html_text)
def get(self):
self.populate(requests.get(self.url).text)
def populate(self, html_text=None):
if not html_text:
html_text = requests.get(self.url).text
self.text = html_text
self.tree = html.fromstring(html_text)
def text_by_selector(self, element_id):
return text_tostring(self.tree.cssselect(element_id)[0])
class Band(Page):
def get_name(self):
og_site_title = self.tree.cssselect("meta[property='og:site_name']")[0]
return og_site_title.get("content")
def get_location(self):
return self.text_by_selector("#band-name-location .location")
def get_album_urls(self):
anchor_elements = self.tree.cssselect(".leftMiddleColumns li a, #discography a")
return [a.get("href") for a in anchor_elements]
class Album(Page):
def get_title(self):
return self.text_by_selector("#name-section .trackTitle")
def get_art(self):
og_image = self.tree.cssselect("meta[property='og:image']")[0]
return og_image.get("content")
def get_release_date(self):
date_published = self.tree.cssselect("meta[itemprop='datePublished']")[0]
return date_published.get("content")
def get_license(self):
return self.text_by_selector("#license")
|
{
"content_hash": "c06178edef0e71b84b26dafa75966161",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 88,
"avg_line_length": 27.868852459016395,
"alnum_prop": 0.6347058823529412,
"repo_name": "FreeMusicNinja/api.freemusic.ninja",
"id": "3fb70bc8a3a987a72a83246cd7438c1e2f760a82",
"size": "1700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bandcamp/pages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "135608"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 12);
|
{
"content_hash": "7c0e573f25b039550d6b492eb483cbea",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 169,
"avg_line_length": 38.42857142857143,
"alnum_prop": 0.7100371747211895,
"repo_name": "antoinecarme/pyaf",
"id": "d6ed06edf632bfb14f945c9283dd79be5f2f252f",
"size": "269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Anscombe/trend_MovingAverage/cycle_30/ar_12/test_artificial_128_Anscombe_MovingAverage_30_12_20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TextinfoValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="textinfo", parent_name="funnel", **kwargs):
super(TextinfoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", False),
edit_type=kwargs.pop("edit_type", "plot"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop(
"flags",
[
"label",
"text",
"percent initial",
"percent previous",
"percent total",
"value",
],
),
**kwargs,
)
|
{
"content_hash": "6b3e319bf457bc467a2e4d5faa62e906",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 33.875,
"alnum_prop": 0.46494464944649444,
"repo_name": "plotly/plotly.py",
"id": "7931d3e4bbccc7d897898a467fafa2d55c9ef28c",
"size": "813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnel/_textinfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from django import template
from django.contrib.contenttypes.models import ContentType
from django.template import Context
from hcomments import models
from hcomments import settings
import hashlib
import urllib
register = template.Library()
def _get_comment_list(object):
ctype = ContentType.objects.get_for_model(object)
tree = models.HComment.tree.root_nodes().filter(
content_type=ctype,
object_pk=object.id,
is_public=True,
is_removed=False,
)
comments = []
for root in tree:
comments.extend(root.get_descendants(True))
return comments
@register.tag
def get_comment_list(parser, token):
"""
{% get_comment_list object as comments %}
"""
class Node(template.Node):
def __init__(self, object, var_name):
self.object = template.Variable(object)
self.var_name = var_name
def render(self, context):
context[self.var_name] = _get_comment_list(self.object.resolve(context))
return ''
contents = token.split_contents()
tag_name = contents.pop(0)
object = contents.pop(0)
if contents[-2] != 'as':
raise template.TemplateSyntaxError("%r tag had invalid arguments" % tag_name)
var_name = contents[-1]
return Node(object, var_name)
@register.inclusion_tag('hcomments/show_comment_list.html', takes_context=True)
def show_comment_list(context, object):
ctx = Context(context)
ctx.update({
'comments': _get_comment_list(object),
})
return ctx
@register.inclusion_tag('hcomments/show_single_comment.html', takes_context=True)
def show_single_comment(context, comment):
request = context['request']
comment_owner = comment.id in request.session.get('user-comments', [])
if not comment_owner:
comment_owner = settings.MODERATOR_REQUEST(request, comment)
return {
'c': comment,
'comment_owner': comment_owner,
}
@register.filter
def thread_owner(comment):
if not comment.user:
return False
owners = settings.THREAD_OWNERS(comment.content_object)
if owners:
return comment.user in owners
else:
return False
@register.inclusion_tag('hcomments/show_comment_form.html', takes_context=True)
def show_comment_form(context, object):
ctx = Context(context)
ctx.update({
'object': object,
})
return ctx
@register.inclusion_tag('hcomments/show_subscribe_form.html', takes_context=True)
def show_subscribe_form(context, object):
ctx = Context(context)
ctx.update({
'object': object,
})
return ctx
@register.filter
def subscribed(object, user):
return models.ThreadSubscription.objects.subscribed(object, user)
@register.filter
def gravatar(email, args=''):
if args:
args = dict(a.split('=') for a in args.split(','))
else:
args = {}
# Set your variables here
default = args.get('default', '404')
size = args.get('size', '80')
rating = args.get('rating', 'r')
# construct the url
gravatar_url = 'http://www.gravatar.com/avatar/%s?' % hashlib.md5(email.lower()).hexdigest()
gravatar_url += urllib.urlencode({
'default': default,
'size': str(size),
'rating': rating,
})
return gravatar_url
|
{
"content_hash": "2b3551a57ee7ae03c26bcb1390a28ddc",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 96,
"avg_line_length": 26.261904761904763,
"alnum_prop": 0.6500453309156845,
"repo_name": "matrixise/epcon",
"id": "ad5dcf0979a9b7f865c825ab5cee93b2ba518d38",
"size": "3333",
"binary": false,
"copies": "4",
"ref": "refs/heads/ep2017",
"path": "hcomments/templatetags/hcomments_tags.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "1490"
},
{
"name": "CSS",
"bytes": "4791766"
},
{
"name": "HTML",
"bytes": "2176028"
},
{
"name": "JavaScript",
"bytes": "3470151"
},
{
"name": "Makefile",
"bytes": "3338"
},
{
"name": "PHP",
"bytes": "4506"
},
{
"name": "Python",
"bytes": "1269414"
},
{
"name": "Ruby",
"bytes": "1870"
},
{
"name": "Shell",
"bytes": "1679"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
import os
import logging
from multiprocessing import Process
from django.conf import settings
from django.core.cache import cache as django_cache
from django.core.management.base import BaseCommand
from django.db import connection as django_connection, connections
from kombu import Connection, Exchange, Queue
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.dispatch.control import Control
from awx.main.dispatch.pool import AutoscalePool
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
logger = logging.getLogger('awx.main.dispatch')
def construct_bcast_queue_name(common_name):
return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID
class Command(BaseCommand):
help = 'Launch the task dispatcher'
def add_arguments(self, parser):
parser.add_argument('--status', dest='status', action='store_true',
help='print the internal state of any running dispatchers')
parser.add_argument('--running', dest='running', action='store_true',
help='print the UUIDs of any tasked managed by this dispatcher')
parser.add_argument('--reload', dest='reload', action='store_true',
help=('cause the dispatcher to recycle all of its worker processes;'
'running jobs will run to completion first'))
def beat(self):
from celery import Celery
from celery.beat import PersistentScheduler
from celery.apps import beat
class AWXScheduler(PersistentScheduler):
def __init__(self, *args, **kwargs):
self.ppid = os.getppid()
super(AWXScheduler, self).__init__(*args, **kwargs)
def setup_schedule(self):
super(AWXScheduler, self).setup_schedule()
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
def tick(self, *args, **kwargs):
if os.getppid() != self.ppid:
# if the parent PID changes, this process has been orphaned
# via e.g., segfault or sigkill, we should exit too
raise SystemExit()
return super(AWXScheduler, self).tick(*args, **kwargs)
def apply_async(self, entry, producer=None, advance=True, **kwargs):
for conn in connections.all():
# If the database connection has a hiccup, re-establish a new
# connection
conn.close_if_unusable_or_obsolete()
task = TaskWorker.resolve_callable(entry.task)
result, queue = task.apply_async()
class TaskResult(object):
id = result['uuid']
return TaskResult()
app = Celery()
app.conf.BROKER_URL = settings.BROKER_URL
app.conf.CELERY_TASK_RESULT_EXPIRES = False
beat.Beat(
30,
app,
schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler
).run()
def handle(self, *arg, **options):
if options.get('status'):
print Control('dispatcher').status()
return
if options.get('running'):
print Control('dispatcher').running()
return
if options.get('reload'):
return Control('dispatcher').control({'control': 'reload'})
# It's important to close these because we're _about_ to fork, and we
# don't want the forked processes to inherit the open sockets
# for the DB and memcached connections (that way lies race conditions)
django_connection.close()
django_cache.close()
beat = Process(target=self.beat)
beat.daemon = True
beat.start()
reaper.reap()
consumer = None
with Connection(settings.BROKER_URL) as conn:
try:
bcast = 'tower_broadcast_all'
queues = [
Queue(q, Exchange(q), routing_key=q)
for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()])
]
queues.append(
Queue(
construct_bcast_queue_name(bcast),
exchange=Exchange(bcast, type='fanout'),
routing_key=bcast,
reply=True
)
)
consumer = AWXConsumer(
'dispatcher',
conn,
TaskWorker(),
queues,
AutoscalePool(min_workers=4)
)
consumer.run()
except KeyboardInterrupt:
logger.debug('Terminating Task Dispatcher')
if consumer:
consumer.stop()
|
{
"content_hash": "500cbdaad25a5dc991e9316ba2bb18ad",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 96,
"avg_line_length": 38.698412698412696,
"alnum_prop": 0.5607054963084496,
"repo_name": "wwitzel3/awx",
"id": "312c146e205045ba4a8777a730a26a193d6641d9",
"size": "4934",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "awx/main/management/commands/run_dispatcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "303046"
},
{
"name": "Dockerfile",
"bytes": "5713"
},
{
"name": "HTML",
"bytes": "496559"
},
{
"name": "JavaScript",
"bytes": "3513112"
},
{
"name": "Makefile",
"bytes": "21133"
},
{
"name": "PowerShell",
"bytes": "10176"
},
{
"name": "Python",
"bytes": "3904288"
},
{
"name": "Shell",
"bytes": "13833"
}
],
"symlink_target": ""
}
|
"""
Mixcloud OAuth2 backend, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/mixcloud.html
"""
from .oauth import BaseOAuth2
class MixcloudOAuth2(BaseOAuth2):
name = 'mixcloud'
ID_KEY = 'username'
AUTHORIZATION_URL = 'https://www.mixcloud.com/oauth/authorize'
ACCESS_TOKEN_URL = 'https://www.mixcloud.com/oauth/access_token'
ACCESS_TOKEN_METHOD = 'POST'
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(response['name'])
return {'username': response['username'],
'email': None,
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
return self.get_json('https://api.mixcloud.com/me/',
params={'access_token': access_token,
'alt': 'json'})
|
{
"content_hash": "760481442a9b44a390ae356f198f8a49",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 37.07692307692308,
"alnum_prop": 0.5881742738589212,
"repo_name": "IKholopov/HackUPC2017",
"id": "8b1b77e531dd1015aaaad0304f1a51f75c02f0da",
"size": "964",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "hackupc/env/lib/python3.5/site-packages/social_core/backends/mixcloud.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63043"
},
{
"name": "HTML",
"bytes": "40996"
},
{
"name": "JavaScript",
"bytes": "272171"
},
{
"name": "Python",
"bytes": "40280"
}
],
"symlink_target": ""
}
|
from django.forms import ModelForm
from bootstrap3_datetime.widgets import DateTimePicker
from django import forms
from django.db import models
from django.forms.fields import DateField, ChoiceField, MultipleChoiceField
from django.forms.widgets import RadioSelect, CheckboxSelectMultiple
import re
#from django.core import Validator
from django.shortcuts import get_object_or_404
from register.models import User, Login
from register.lists import LIST_OF_STATES
from register.helper import *
GENDER_CHOICES = (('M', 'Male'),('F','Female'))
INTEREST_CHOICES = (('Poetry', 'Poetry'), ('Seminar', 'Seminar'))
SKILLS_CHOICES = (('Writing', 'Writing'), ('Web Design', 'Web Design'))
#Custom Functions
def check_alpha(name):
if re.match("^[A-Za-z ]*$",name):
return True
else:
return False
def user_exists(email_):
if Login.objects.all().filter(email=email_):
return True
else:
return False
class UserForm(ModelForm):
name=forms.CharField(
required=True,
label='Name',
widget=forms.TextInput(
attrs={'placeholder': 'Name'}
)
)
date_of_birth = forms.DateField(
required=True,
widget=DateTimePicker(
options={
"pickTime": False},
attrs={'placeholder': 'Date of Birth'}
)
)
gender=forms.ChoiceField(
required=True,
label='Gender',
choices=GENDER_CHOICES,
widget=forms.RadioSelect()
)
state=forms.CharField(
required=True,
label='State',
widget=forms.Select(
choices=LIST_OF_STATES,
attrs={'placeholder': 'State'},
)
)
city=forms.CharField(
required=True,
label='City',
widget=forms.TextInput(
attrs={'placeholder': 'City'}
)
)
contact_no = forms.IntegerField(
required=True,
label='Phone number',
widget=forms.TextInput(
attrs={'placeholder':'Phone Number'}
)
)
department=forms.CharField(
required=True,
label='Department Name',
widget=forms.TextInput(
attrs={'placeholder': 'Department Name'}
)
)
email=forms.EmailField(
required=True,
label='Email',
widget=forms.TextInput(
attrs={'placeholder': 'Email Address'}
)
)
interest=forms.MultipleChoiceField(
required=True,
label='Interests',
choices=INTEREST_CHOICES,
widget=forms.CheckboxSelectMultiple(
attrs={'placeholder': 'Interests'}
)
)
skills=forms.MultipleChoiceField(
required=True,
label='Skills',
choices=SKILLS_CHOICES,
widget=forms.CheckboxSelectMultiple(
attrs={'placeholder': 'Skills'}
)
)
class Meta:
model = User
widgets = {'user_id':forms.HiddenInput()}
exclude = ('usermode')
usermode = 'Member'
def clean_name(self):
name = self.cleaned_data['name']
if check_alpha(name):
return name
else:
raise forms.ValidationError("Please enter only alphabets")
class LoginForm(ModelForm):
email=forms.EmailField(
required=True,
label='Email',
widget=forms.TextInput(
attrs={'placeholder': 'Email Address'}
)
)
password=forms.CharField(
required=True,
max_length=100,
label='Password',
widget=forms.PasswordInput(
attrs={'placeholder': 'Password'}
)
)
repass = forms.CharField(
max_length=100,
required=True,
label='Re-enter password',
widget=forms.PasswordInput(
attrs={'placeholder': 'Re Enter Your Password'}
),
)
class Meta:
model = Login
widgets = {'password':forms.PasswordInput(),'email':forms.HiddenInput()}
def clean_repass(self):
password = self.cleaned_data['password']
re_password = self.cleaned_data['repass']
if password == re_password:
return password
else:
raise forms.ValidationError("Passwords don't match")
|
{
"content_hash": "a310d592b5a2d1b56a2475e7e1ba4213",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 80,
"avg_line_length": 25.28994082840237,
"alnum_prop": 0.5807206364061769,
"repo_name": "seshagiriprabhu/ayudh-portal",
"id": "d8d8505b5d085fd9881079f526031fd28307accb",
"size": "4274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "register/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "196804"
},
{
"name": "JavaScript",
"bytes": "14135"
},
{
"name": "Python",
"bytes": "25259"
}
],
"symlink_target": ""
}
|
import logging
import numpy as np
import scipy.interpolate as intpl
from pySDC.core.Nodes import NodesGenerator
from pySDC.core.Errors import CollocationError
from pySDC.core.Lagrange import LagrangeApproximation
class CollBase(object):
"""
Generic collocation class, that contains everything to do integration over
intervals and between nodes.
It can be used to produce many kind of quadrature nodes from various
distribution (awesome!).
It is based on the two main parameters that define the nodes :
- node_type : the node distribution used for the collocation method
- quad_type : the type of quadrature used (inclusion of not of boundary)
Current implementation provides the following available parameter values
for node_type :
- EQUID : equidistant node distribution
- LEGENDRE : distribution from Legendre polynomials
- CHEBY-{1,2,3,4} : distribution from Chebyshev polynomials of a given kind
The type of quadrature cann be GAUSS (only inner nodes), RADAU-LEFT
(inclusion of the left boundary), RADAU-RIGHT (inclusion of the right
boundary) and LOBATTO (inclusion of left and right boundary).
Furthermore, the ``useSpline`` option can be activated to eventually use
spline interpolation when computing the weights.
Here is the equivalency table with the original classes implemented in
pySDC :
+-------------------------+-----------+-------------+-----------+
| Original Class | node_type | quad_type | useSpline |
+=========================+===========+=============+===========+
| Equidistant | EQUID | LOBATTO | False |
+-------------------------+-----------+-------------+-----------+
| EquidistantInner | EQUID | GAUSS | False |
+-------------------------+-----------+-------------+-----------+
| EquidistantNoLeft | EQUID | RADAU-RIGHT | False |
+-------------------------+-----------+-------------+-----------+
| EquidistantSpline_Right | EQUID | RADAU-RIGHT | True |
+-------------------------+-----------+-------------+-----------+
| CollGaussLegendre | LEGENDRE | GAUSS | False |
+-------------------------+-----------+-------------+-----------+
| CollGaussLobatto | LEGENDRE | LOBATTO | False |
+-------------------------+-----------+-------------+-----------+
| CollGaussRadau_Left | LEGENDRE | RADAU-LEFT | False |
+-------------------------+-----------+-------------+-----------+
| CollGaussRadau_Right | LEGENDRE | RADAU-RIGHT | False |
+-------------------------+-----------+-------------+-----------+
Attributes:
num_nodes (int): number of collocation nodes
tleft (float): left interval point
tright (float): right interval point
nodes (numpy.ndarray): array of quadrature nodes
weights (numpy.ndarray): array of quadrature weights for the full interval
Qmat (numpy.ndarray): matrix containing the weights for tleft to node
Smat (numpy.ndarray): matrix containing the weights for node to node
delta_m (numpy.ndarray): array of distances between nodes
right_is_node (bool): flag to indicate whether right point is collocation node
left_is_node (bool): flag to indicate whether left point is collocation node
"""
def __init__(
self, num_nodes=None, tleft=0, tright=1, node_type='LEGENDRE', quad_type=None, useSpline=False, **kwargs
):
"""
Initialization routine for a collocation object
Args:
num_nodes (int): number of collocation nodes
tleft (float): left interval point
tright (float): right interval point
"""
if not num_nodes > 0:
raise CollocationError('At least one quadrature node required, got %s' % num_nodes)
if not tleft < tright:
raise CollocationError('Interval boundaries are corrupt, got %s and %s' % (tleft, tright))
self.logger = logging.getLogger('collocation')
# Set number of nodes, left and right interval boundaries
self.num_nodes = num_nodes
self.tleft = tleft
self.tright = tright
self.node_type = node_type
self.quad_type = quad_type
# Instanciate attributes
self.nodeGenerator = NodesGenerator(self.node_type, self.quad_type)
if useSpline:
self._getWeights = self._getWeights_spline
# We need: 1<=order<=5 and order < num_nodes
self.order = min(num_nodes - 1, 3)
elif self.node_type == 'EQUID':
self.order = num_nodes
else:
if self.quad_type == 'GAUSS':
self.order = 2 * num_nodes
elif self.quad_type.startswith('RADAU'):
self.order = 2 * num_nodes - 1
elif self.quad_type == 'LOBATTO':
self.order = 2 * num_nodes - 2
self.left_is_node = self.quad_type in ['LOBATTO', 'RADAU-LEFT']
self.right_is_node = self.quad_type in ['LOBATTO', 'RADAU-RIGHT']
self.nodes = self._getNodes
self.weights = self._getWeights(tleft, tright)
self.Qmat = self._gen_Qmatrix_spline if useSpline else self._gen_Qmatrix
self.Smat = self._gen_Smatrix
self.delta_m = self._gen_deltas
@staticmethod
def evaluate(weights, data):
"""
Evaluates the quadrature over the full interval
Args:
weights (numpy.ndarray): array of quadrature weights for the full interval
data (numpy.ndarray): f(x) to be integrated
Returns:
numpy.ndarray: integral over f(x) between tleft and tright
"""
if not np.size(weights) == np.size(data):
raise CollocationError("Input size does not match number of weights, but is %s" % np.size(data))
return np.dot(weights, data)
def _getWeights(self, a, b):
"""
Computes weights using barycentric interpolation
Args:
a (float): left interval boundary
b (float): right interval boundary
Returns:
numpy.ndarray: weights of the collocation formula given by the nodes
"""
if self.nodes is None:
raise CollocationError(f"Need nodes before computing weights, got {self.nodes}")
# Instantiate the Lagrange interpolator object
approx = LagrangeApproximation(self.nodes, weightComputation='AUTO')
# Compute weights
tLeft = np.ravel(self.tleft)[0]
tRight = np.ravel(self.tright)[0]
weights = approx.getIntegrationMatrix([(tLeft, tRight)], numQuad='FEJER')
return np.ravel(weights)
@property
def _getNodes(self):
"""
Computes nodes using an internal NodesGenerator object
Returns:
np.ndarray: array of Gauss-Legendre nodes
"""
# Generate normalized nodes in [-1, 1]
nodes = self.nodeGenerator.getNodes(self.num_nodes)
# Scale nodes to [tleft, tright]
a = self.tleft
b = self.tright
nodes += 1.0
nodes /= 2.0
nodes *= b - a
nodes += a
if self.left_is_node:
nodes[0] = self.tleft
if self.right_is_node:
nodes[-1] = self.tright
return nodes
@property
def _gen_Qmatrix(self):
"""
Compute tleft-to-node integration matrix for later use in collocation formulation
Returns:
numpy.ndarray: matrix containing the weights for tleft to node
"""
if self.nodes is None:
raise CollocationError(f"Need nodes before computing weights, got {self.nodes}")
M = self.num_nodes
Q = np.zeros([M + 1, M + 1])
# Instantiate the Lagrange interpolator object
approx = LagrangeApproximation(self.nodes, weightComputation='AUTO')
# Compute tleft-to-node integration matrix
tLeft = np.ravel(self.tleft)[0]
intervals = [(tLeft, tau) for tau in self.nodes]
intQ = approx.getIntegrationMatrix(intervals, numQuad='FEJER')
# Store into Q matrix
Q[1:, 1:] = intQ
return Q
@property
def _gen_Smatrix(self):
"""
Compute node-to-node integration matrix for later use in collocation formulation
Returns:
numpy.ndarray: matrix containing the weights for node to node
"""
M = self.num_nodes
Q = self.Qmat
S = np.zeros([M + 1, M + 1])
S[1, :] = Q[1, :]
for m in np.arange(2, M + 1):
S[m, :] = Q[m, :] - Q[m - 1, :]
return S
@property
def _gen_deltas(self):
"""
Compute distances between the nodes
Returns:
numpy.ndarray: distances between the nodes
"""
M = self.num_nodes
delta = np.zeros(M)
delta[0] = self.nodes[0] - self.tleft
for m in np.arange(1, M):
delta[m] = self.nodes[m] - self.nodes[m - 1]
return delta
def _getWeights_spline(self, a, b):
"""
Computes weights using spline interpolation instead of Gaussian
quadrature
Args:
a (float): left interval boundary
b (float): right interval boundary
Returns:
np.ndarray: weights of the collocation formula given by the nodes
"""
# get the defining tck's for each spline basis function
circ_one = np.zeros(self.num_nodes)
circ_one[0] = 1.0
tcks = []
for i in range(self.num_nodes):
tcks.append(
intpl.splrep(self.nodes, np.roll(circ_one, i), xb=self.tleft, xe=self.tright, k=self.order, s=0.0)
)
weights = np.zeros(self.num_nodes)
for i in range(self.num_nodes):
weights[i] = intpl.splint(a, b, tcks[i])
return weights
@property
def _gen_Qmatrix_spline(self):
"""
Compute tleft-to-node integration matrix for later use in collocation formulation
Returns:
numpy.ndarray: matrix containing the weights for tleft to node
"""
M = self.num_nodes
Q = np.zeros([M + 1, M + 1])
# for all nodes, get weights for the interval [tleft,node]
for m in np.arange(M):
Q[m + 1, 1:] = self._getWeights(self.tleft, self.nodes[m])
return Q
|
{
"content_hash": "33a6f64fd4881bfc3af6d3aebf89ca3a",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 114,
"avg_line_length": 35.79251700680272,
"alnum_prop": 0.5620070322151478,
"repo_name": "Parallel-in-Time/pySDC",
"id": "e86ea5d9c65c0fada40faec62232df955984f5c9",
"size": "10523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pySDC/core/Collocation.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4264000"
},
{
"name": "Python",
"bytes": "2450453"
},
{
"name": "Shell",
"bytes": "18105"
}
],
"symlink_target": ""
}
|
from snovault import COLLECTIONS
from snovault.calculated import calculate_properties
from snovault.validation import ValidationFailure
from snovault.validators import no_validate_item_content_post
from operator import itemgetter
from pyramid.authentication import CallbackAuthenticationPolicy
import requests
from pyramid.httpexceptions import (
HTTPForbidden,
HTTPFound,
)
from pyramid.security import (
NO_PERMISSION_REQUIRED,
remember,
forget,
)
from pyramid.settings import (
asbool,
aslist,
)
from pyramid.traversal import find_resource
from pyramid.view import (
view_config,
)
_marker = object()
def includeme(config):
config.scan(__name__)
config.add_route('login', 'login')
config.add_route('logout', 'logout')
config.add_route('session', 'session')
config.add_route('session-properties', 'session-properties')
config.add_route('impersonate-user', 'impersonate-user')
class LoginDenied(HTTPForbidden):
title = 'Login failure'
class Auth0AuthenticationPolicy(CallbackAuthenticationPolicy):
"""
Checks assertion during authentication so login can construct user session.
"""
login_path = '/login'
method = 'POST'
def unauthenticated_userid(self, request):
if request.method != self.method or request.path != self.login_path:
return None
cached = getattr(request, '_auth0_authenticated', _marker)
if cached is not _marker:
return cached
try:
access_token = request.json['accessToken']
except (ValueError, TypeError, KeyError):
if self.debug:
self._log(
'Missing assertion.',
'unauthenticated_userid',
request)
request._auth0_authenticated = None
return None
try:
user_url = "https://{domain}/userinfo?access_token={access_token}" \
.format(domain='t2depi.auth0.com', access_token=access_token)
user_info = requests.get(user_url).json()
except Exception as e:
if self.debug:
self._log(
('Invalid assertion: %s (%s)', (e, type(e).__name__)),
'unauthenticated_userid',
request)
request._auth0_authenticated = None
return None
if user_info['email_verified'] == True:
email = request._auth0_authenticated = user_info['email'].lower()
return email
else:
return None
def remember(self, request, principal, **kw):
return []
def forget(self, request):
return []
# Unfortunately, X-Requested-With is not sufficient.
# http://lists.webappsec.org/pipermail/websecurity_lists.webappsec.org/2011-February/007533.html
# Checking the CSRF token in middleware is easier
@view_config(route_name='login', request_method='POST',
permission=NO_PERMISSION_REQUIRED)
def login(request):
"""View to check the auth0 assertion and remember the user"""
login = request.authenticated_userid
if login is None:
namespace = userid = None
else:
namespace, userid = login.split('.', 1)
if namespace != 'auth0':
request.session.invalidate()
request.response.headerlist.extend(forget(request))
raise LoginDenied()
request.session.invalidate()
request.session.get_csrf_token()
request.response.headerlist.extend(remember(request, 'mailto.' + userid))
properties = request.embed('/session-properties', as_user=userid)
if 'auth.userid' in request.session:
properties['auth.userid'] = request.session['auth.userid']
return properties
@view_config(route_name='logout',
permission=NO_PERMISSION_REQUIRED, http_cache=0)
def logout(request):
"""View to forget the user"""
request.session.invalidate()
request.session.get_csrf_token()
request.response.headerlist.extend(forget(request))
if asbool(request.params.get('redirect', True)):
raise HTTPFound(location=request.resource_path(request.root))
return {}
@view_config(route_name='session-properties', request_method='GET',
permission=NO_PERMISSION_REQUIRED)
def session_properties(request):
for principal in request.effective_principals:
if principal.startswith('userid.'):
break
else:
return {}
namespace, userid = principal.split('.', 1)
user = request.registry[COLLECTIONS]['user'][userid]
user_actions = calculate_properties(user, request, category='user_action')
properties = {
'user': request.embed(request.resource_path(user)),
'user_actions': [v for k, v in sorted(user_actions.items(), key=itemgetter(0))],
'admin': 'group.admin' in request.effective_principals
}
if 'auth.userid' in request.session:
properties['auth.userid'] = request.session['auth.userid']
return properties
@view_config(route_name='session', request_method='GET',
permission=NO_PERMISSION_REQUIRED)
def session(request):
request.session.get_csrf_token()
return request.session
@view_config(route_name='impersonate-user', request_method='POST',
validators=[no_validate_item_content_post],
permission='impersonate')
def impersonate_user(request):
"""As an admin, impersonate a different user."""
user = request.validated['user']
try:
user = find_resource(request.root, user)
except KeyError:
raise ValidationFailure('body', ['user'], 'User not found.')
if user.item_type != 'user':
raise ValidationFailure('body', ['user'], 'User not found.')
if user.properties.get('status') != 'current':
raise ValidationFailure('body', ['user'], 'User is not enabled.')
request.session.invalidate()
request.session.get_csrf_token()
request.response.headerlist.extend(
remember(request, 'mailto.%s' % user.uuid))
user_properties = request.embed(
'/session-properties', as_user=str(user.uuid))
if 'auth.userid' in request.session:
user_properties['auth.userid'] = request.session['auth.userid']
return user_properties
|
{
"content_hash": "7af98515d3016b21a8f01667110bb0cf",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 96,
"avg_line_length": 31.671717171717173,
"alnum_prop": 0.6479030457662255,
"repo_name": "T2DREAM/t2dream-portal",
"id": "04a2ac57105bd36e17b1d6a08147c672c93c077a",
"size": "6271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/encoded/auth0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AngelScript",
"bytes": "741"
},
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "Gherkin",
"bytes": "16776"
},
{
"name": "HTML",
"bytes": "373076"
},
{
"name": "JavaScript",
"bytes": "1320205"
},
{
"name": "Makefile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "1567328"
},
{
"name": "SCSS",
"bytes": "336182"
},
{
"name": "Shell",
"bytes": "4199"
}
],
"symlink_target": ""
}
|
from .response_codes import ResponseCodes
class Response(object):
def __init__(self, return_value, code=0, details={}):
self._return_value = return_value
self._code = code
self._details = details
def __repr__(self):
return 'Reply<return_value={}, code={}, details={}>'.format(self.return_value, self.code, self.details)
@property
def return_value(self):
return self._return_value
@property
def code(self):
return self._code
@property
def details(self):
return self._details
def __bool__(self):
return self.code == 0
# python 2.x version of __bool__
__nonzero__ = __bool__
@staticmethod
def loads(s, serializer):
try:
l = serializer.loads(s)
except (ValueError, TypeError):
return Response(None, ResponseCodes.MALFORMED_RESPONSE, s)
try:
rv, code, details = l[0:3]
except ValueError:
return Response(None, ResponseCodes.MALFORMED_RESPONSE, s)
else:
return Response(rv, code, details)
def dumps(self, serializer):
return serializer.dumps([self.return_value, self.code, self.details])
|
{
"content_hash": "32b4219a855aa5de0d443bd4d8560e5e",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 111,
"avg_line_length": 26.543478260869566,
"alnum_prop": 0.5921375921375921,
"repo_name": "cbigler/jackrabbit",
"id": "828f83f417f6c0540d30616f66ce6c3d5e528853",
"size": "1221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jackrabbit/response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19998"
}
],
"symlink_target": ""
}
|
"""
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
"""
import copy
import os
import pathlib
import shutil
import sys
import tarfile
import tempfile
import jinja2
import pytest
import salt.exceptions
import salt.utils.hashutils
import salt.utils.json
import salt.utils.platform
import salt.utils.stringutils
from salt.utils import thin
from salt.utils.stringutils import to_bytes as bts
from tests.support.helpers import TstSuiteLoggingHandler, VirtualEnv
from tests.support.mock import MagicMock, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
try:
import pytest
except ImportError:
pytest = None
def patch_if(condition, *args, **kwargs):
"""
Return a patch decorator if the provided condition is met
"""
if condition:
return patch(*args, **kwargs)
def inner(func):
return func
return inner
@skipIf(pytest is None, "PyTest is missing")
class SSHThinTestCase(TestCase):
"""
TestCase for SaltSSH-related parts.
"""
def setUp(self):
self.jinja_fp = os.path.dirname(jinja2.__file__)
self.ext_conf = {
"test": {
"py-version": [2, 7],
"path": RUNTIME_VARS.SALT_CODE_DIR,
"dependencies": {"jinja2": self.jinja_fp},
}
}
self.tops = copy.deepcopy(self.ext_conf)
self.tops["test"]["dependencies"] = [self.jinja_fp]
self.tar = self._tarfile(None).open()
self.digest = salt.utils.hashutils.DigestCollector()
self.exp_files = [
os.path.join("salt", "payload.py"),
os.path.join("jinja2", "__init__.py"),
]
lib_root = os.path.join(RUNTIME_VARS.TMP, "fake-libs")
self.fake_libs = {
"distro": os.path.join(lib_root, "distro"),
"jinja2": os.path.join(lib_root, "jinja2"),
"yaml": os.path.join(lib_root, "yaml"),
"tornado": os.path.join(lib_root, "tornado"),
"msgpack": os.path.join(lib_root, "msgpack"),
}
code_dir = pathlib.Path(RUNTIME_VARS.CODE_DIR).resolve()
self.exp_ret = {
"distro": str(code_dir / "distro.py"),
"jinja2": str(code_dir / "jinja2"),
"yaml": str(code_dir / "yaml"),
"tornado": str(code_dir / "tornado"),
"msgpack": str(code_dir / "msgpack"),
"certifi": str(code_dir / "certifi"),
"singledispatch": str(code_dir / "singledispatch.py"),
}
self.exc_libs = ["jinja2", "yaml"]
def tearDown(self):
for lib, fp in self.fake_libs.items():
if os.path.exists(fp):
shutil.rmtree(fp)
self.exc_libs = None
self.jinja_fp = None
self.ext_conf = None
self.tops = None
self.tar = None
self.digest = None
self.exp_files = None
self.fake_libs = None
self.exp_ret = None
def _popen(self, return_value=None, side_effect=None, returncode=0):
"""
Fake subprocess.Popen
:return:
"""
proc = MagicMock()
proc.communicate = MagicMock(return_value=return_value, side_effect=side_effect)
proc.returncode = returncode
popen = MagicMock(return_value=proc)
return popen
def _version_info(self, major=None, minor=None):
"""
Fake version info.
:param major:
:param minor:
:return:
"""
class VersionInfo(tuple):
pass
vi = VersionInfo([major, minor])
vi.major = major or sys.version_info.major
vi.minor = minor or sys.version_info.minor
return vi
def _tarfile(self, getinfo=False):
"""
Fake tarfile handler.
:return:
"""
spec = ["add", "close"]
if getinfo:
spec.append("getinfo")
tf = MagicMock()
tf.open = MagicMock(return_value=MagicMock(spec=spec))
return tf
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=False))
def test_get_ext_tops_cfg_missing_dependencies(self):
"""
Test thin.get_ext_tops contains all required dependencies.
:return:
"""
cfg = {"namespace": {"py-version": [0, 0], "path": "/foo", "dependencies": []}}
with pytest.raises(salt.exceptions.SaltSystemExit) as err:
thin.get_ext_tops(cfg)
self.assertIn("Missing dependencies", str(err.value))
self.assertTrue(thin.log.error.called)
self.assertIn("Missing dependencies", thin.log.error.call_args[0][0])
self.assertIn("jinja2, yaml, tornado, msgpack", thin.log.error.call_args[0][0])
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=False))
def test_get_ext_tops_cfg_missing_interpreter(self):
"""
Test thin.get_ext_tops contains interpreter configuration.
:return:
"""
cfg = {"namespace": {"path": "/foo", "dependencies": []}}
with pytest.raises(salt.exceptions.SaltSystemExit) as err:
thin.get_ext_tops(cfg)
self.assertIn("missing specific locked Python version", str(err.value))
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=False))
def test_get_ext_tops_cfg_wrong_interpreter(self):
"""
Test thin.get_ext_tops contains correct interpreter configuration.
:return:
"""
cfg = {"namespace": {"path": "/foo", "py-version": 2, "dependencies": []}}
with pytest.raises(salt.exceptions.SaltSystemExit) as err:
thin.get_ext_tops(cfg)
self.assertIn(
"specific locked Python version should be a list of major/minor version",
str(err.value),
)
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=False))
def test_get_ext_tops_cfg_interpreter(self):
"""
Test thin.get_ext_tops interpreter configuration.
:return:
"""
cfg = {
"namespace": {
"path": "/foo",
"py-version": [2, 6],
"dependencies": {
"jinja2": "",
"yaml": "",
"tornado": "",
"msgpack": "",
},
}
}
with pytest.raises(salt.exceptions.SaltSystemExit):
thin.get_ext_tops(cfg)
assert len(thin.log.warning.mock_calls) == 4
assert sorted(x[1][1] for x in thin.log.warning.mock_calls) == [
"jinja2",
"msgpack",
"tornado",
"yaml",
]
assert (
"Module test has missing configuration"
== thin.log.warning.mock_calls[0][1][0] % "test"
)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=False))
def test_get_ext_tops_dependency_config_check(self):
"""
Test thin.get_ext_tops dependencies are importable
:return:
"""
cfg = {
"namespace": {
"path": "/foo",
"py-version": [2, 6],
"dependencies": {
"jinja2": "/jinja/foo.py",
"yaml": "/yaml/",
"tornado": "/tornado/wrong.rb",
"msgpack": "msgpack.sh",
},
}
}
with pytest.raises(salt.exceptions.SaltSystemExit) as err:
thin.get_ext_tops(cfg)
self.assertIn(
"Missing dependencies for the alternative version in the "
"external configuration",
str(err.value),
)
messages = {}
for cl in thin.log.warning.mock_calls:
messages[cl[1][1]] = cl[1][0] % (cl[1][1], cl[1][2])
for mod in ["tornado", "yaml", "msgpack"]:
self.assertIn("not a Python importable module", messages[mod])
self.assertIn(
"configured with not a file or does not exist", messages["jinja2"]
)
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=True))
def test_get_ext_tops_config_pass(self):
"""
Test thin.get_ext_tops configuration
:return:
"""
cfg = {
"namespace": {
"path": "/foo",
"py-version": [2, 6],
"dependencies": {
"jinja2": "/jinja/foo.py",
"yaml": "/yaml/",
"tornado": "/tornado/tornado.py",
"msgpack": "msgpack.py",
"distro": "distro.py",
},
}
}
out = thin.get_ext_tops(cfg)
assert out["namespace"]["py-version"] == cfg["namespace"]["py-version"]
assert out["namespace"]["path"] == cfg["namespace"]["path"]
assert sorted(out["namespace"]["dependencies"]) == sorted(
[
"/tornado/tornado.py",
"/jinja/foo.py",
"/yaml/",
"msgpack.py",
"distro.py",
]
)
@patch("salt.utils.thin.sys.argv", [None, '{"foo": "bar"}'])
@patch("salt.utils.thin.get_tops", lambda **kw: kw)
def test_gte(self):
"""
Test thin.gte external call for processing the info about tops per interpreter.
:return:
"""
assert salt.utils.json.loads(thin.gte()).get("foo") == "bar"
def test_add_dep_path(self):
"""
Test thin._add_dependency function to setup dependency paths
:return:
"""
container = []
for pth in ["/foo/bar.py", "/something/else/__init__.py"]:
thin._add_dependency(container, type("obj", (), {"__file__": pth})())
assert "__init__" not in container[1]
assert container == ["/foo/bar.py", "/something/else"]
def test_thin_path(self):
"""
Test thin.thin_path returns the expected path.
:return:
"""
path = os.sep + os.path.join("path", "to")
expected = os.path.join(path, "thin", "thin.tgz")
self.assertEqual(thin.thin_path(path), expected)
def test_get_salt_call_script(self):
"""
Test get salt-call script rendered.
:return:
"""
out = thin._get_salt_call("foo", "bar", py26=[2, 6], py27=[2, 7], py34=[3, 4])
for line in salt.utils.stringutils.to_str(out).split(os.linesep):
if line.startswith("namespaces = {"):
data = salt.utils.json.loads(line.replace("namespaces = ", "").strip())
assert data.get("py26") == [2, 6]
assert data.get("py27") == [2, 7]
assert data.get("py34") == [3, 4]
if line.startswith("syspaths = "):
data = salt.utils.json.loads(line.replace("syspaths = ", ""))
assert data == ["foo", "bar"]
def test_get_ext_namespaces_empty(self):
"""
Test thin._get_ext_namespaces function returns an empty dictionary on nothing
:return:
"""
for obj in [None, {}, []]:
assert thin._get_ext_namespaces(obj) == {}
def test_get_ext_namespaces(self):
"""
Test thin._get_ext_namespaces function returns namespaces properly out of the config.
:return:
"""
cfg = {"ns": {"py-version": [2, 7]}}
assert thin._get_ext_namespaces(cfg).get("ns") == (
2,
7,
)
assert isinstance(thin._get_ext_namespaces(cfg).get("ns"), tuple)
def test_get_ext_namespaces_failure(self):
"""
Test thin._get_ext_namespaces function raises an exception
if python major/minor version is not configured.
:return:
"""
with pytest.raises(salt.exceptions.SaltSystemExit):
thin._get_ext_namespaces({"ns": {}})
@patch(
"salt.utils.thin.distro",
type("distro", (), {"__file__": "/site-packages/distro"}),
)
@patch(
"salt.utils.thin.salt",
type("salt", (), {"__file__": "/site-packages/salt"}),
)
@patch(
"salt.utils.thin.jinja2",
type("jinja2", (), {"__file__": "/site-packages/jinja2"}),
)
@patch(
"salt.utils.thin.yaml",
type("yaml", (), {"__file__": "/site-packages/yaml"}),
)
@patch(
"salt.utils.thin.tornado",
type("tornado", (), {"__file__": "/site-packages/tornado"}),
)
@patch(
"salt.utils.thin.msgpack",
type("msgpack", (), {"__file__": "/site-packages/msgpack"}),
)
@patch(
"salt.utils.thin.certifi",
type("certifi", (), {"__file__": "/site-packages/certifi"}),
)
@patch(
"salt.utils.thin.singledispatch",
type("singledispatch", (), {"__file__": "/site-packages/sdp"}),
)
@patch(
"salt.utils.thin.singledispatch_helpers",
type("singledispatch_helpers", (), {"__file__": "/site-packages/sdp_hlp"}),
)
@patch(
"salt.utils.thin.ssl_match_hostname",
type("ssl_match_hostname", (), {"__file__": "/site-packages/ssl_mh"}),
)
@patch(
"salt.utils.thin.markupsafe",
type("markupsafe", (), {"__file__": "/site-packages/markupsafe"}),
)
@patch(
"salt.utils.thin.backports_abc",
type("backports_abc", (), {"__file__": "/site-packages/backports_abc"}),
)
@patch(
"salt.utils.thin.concurrent",
type("concurrent", (), {"__file__": "/site-packages/concurrent"}),
)
@patch(
"salt.utils.thin.py_contextvars",
type("contextvars", (), {"__file__": "/site-packages/contextvars"}),
)
@patch_if(
salt.utils.thin.has_immutables,
"salt.utils.thin.immutables",
type("immutables", (), {"__file__": "/site-packages/immutables"}),
)
@patch("salt.utils.thin.log", MagicMock())
def test_get_tops(self):
"""
Test thin.get_tops to get top directories, based on the interpreter.
:return:
"""
base_tops = [
"distro",
"salt",
"jinja2",
"yaml",
"tornado",
"msgpack",
"certifi",
"sdp",
"sdp_hlp",
"ssl_mh",
"markupsafe",
"backports_abc",
"concurrent",
"contextvars",
]
if salt.utils.thin.has_immutables:
base_tops.extend(["immutables"])
tops = []
for top in thin.get_tops(extra_mods="foo,bar"):
if top.find("/") != -1:
spl = "/"
else:
spl = os.sep
tops.append(top.rsplit(spl, 1)[-1])
assert len(tops) == len(base_tops)
assert sorted(tops) == sorted(base_tops), sorted(tops)
@patch(
"salt.utils.thin.distro",
type("distro", (), {"__file__": "/site-packages/distro"}),
)
@patch(
"salt.utils.thin.salt",
type("salt", (), {"__file__": "/site-packages/salt"}),
)
@patch(
"salt.utils.thin.jinja2",
type("jinja2", (), {"__file__": "/site-packages/jinja2"}),
)
@patch(
"salt.utils.thin.yaml",
type("yaml", (), {"__file__": "/site-packages/yaml"}),
)
@patch(
"salt.utils.thin.tornado",
type("tornado", (), {"__file__": "/site-packages/tornado"}),
)
@patch(
"salt.utils.thin.msgpack",
type("msgpack", (), {"__file__": "/site-packages/msgpack"}),
)
@patch(
"salt.utils.thin.certifi",
type("certifi", (), {"__file__": "/site-packages/certifi"}),
)
@patch(
"salt.utils.thin.singledispatch",
type("singledispatch", (), {"__file__": "/site-packages/sdp"}),
)
@patch(
"salt.utils.thin.singledispatch_helpers",
type("singledispatch_helpers", (), {"__file__": "/site-packages/sdp_hlp"}),
)
@patch(
"salt.utils.thin.ssl_match_hostname",
type("ssl_match_hostname", (), {"__file__": "/site-packages/ssl_mh"}),
)
@patch(
"salt.utils.thin.markupsafe",
type("markupsafe", (), {"__file__": "/site-packages/markupsafe"}),
)
@patch(
"salt.utils.thin.backports_abc",
type("backports_abc", (), {"__file__": "/site-packages/backports_abc"}),
)
@patch(
"salt.utils.thin.concurrent",
type("concurrent", (), {"__file__": "/site-packages/concurrent"}),
)
@patch(
"salt.utils.thin.py_contextvars",
type("contextvars", (), {"__file__": "/site-packages/contextvars"}),
)
@patch_if(
salt.utils.thin.has_immutables,
"salt.utils.thin.immutables",
type("immutables", (), {"__file__": "/site-packages/immutables"}),
)
@patch("salt.utils.thin.log", MagicMock())
def test_get_tops_extra_mods(self):
"""
Test thin.get_tops to get extra-modules alongside the top directories, based on the interpreter.
:return:
"""
base_tops = [
"distro",
"salt",
"jinja2",
"yaml",
"tornado",
"msgpack",
"certifi",
"sdp",
"sdp_hlp",
"ssl_mh",
"concurrent",
"markupsafe",
"backports_abc",
"contextvars",
"foo",
"bar.py",
]
if salt.utils.thin.has_immutables:
base_tops.extend(["immutables"])
libs = salt.utils.thin.find_site_modules("contextvars")
foo = {"__file__": os.sep + os.path.join("custom", "foo", "__init__.py")}
bar = {"__file__": os.sep + os.path.join("custom", "bar")}
with patch("salt.utils.thin.find_site_modules", MagicMock(side_effect=[libs])):
with patch(
"builtins.__import__",
MagicMock(side_effect=[type("foo", (), foo), type("bar", (), bar)]),
):
tops = []
for top in thin.get_tops(extra_mods="foo,bar"):
if top.find("/") != -1:
spl = "/"
else:
spl = os.sep
tops.append(top.rsplit(spl, 1)[-1])
self.assertEqual(len(tops), len(base_tops))
self.assertListEqual(sorted(tops), sorted(base_tops))
@patch(
"salt.utils.thin.distro",
type("distro", (), {"__file__": "/site-packages/distro"}),
)
@patch(
"salt.utils.thin.salt",
type("salt", (), {"__file__": "/site-packages/salt"}),
)
@patch(
"salt.utils.thin.jinja2",
type("jinja2", (), {"__file__": "/site-packages/jinja2"}),
)
@patch(
"salt.utils.thin.yaml",
type("yaml", (), {"__file__": "/site-packages/yaml"}),
)
@patch(
"salt.utils.thin.tornado",
type("tornado", (), {"__file__": "/site-packages/tornado"}),
)
@patch(
"salt.utils.thin.msgpack",
type("msgpack", (), {"__file__": "/site-packages/msgpack"}),
)
@patch(
"salt.utils.thin.certifi",
type("certifi", (), {"__file__": "/site-packages/certifi"}),
)
@patch(
"salt.utils.thin.singledispatch",
type("singledispatch", (), {"__file__": "/site-packages/sdp"}),
)
@patch(
"salt.utils.thin.singledispatch_helpers",
type("singledispatch_helpers", (), {"__file__": "/site-packages/sdp_hlp"}),
)
@patch(
"salt.utils.thin.ssl_match_hostname",
type("ssl_match_hostname", (), {"__file__": "/site-packages/ssl_mh"}),
)
@patch(
"salt.utils.thin.markupsafe",
type("markupsafe", (), {"__file__": "/site-packages/markupsafe"}),
)
@patch(
"salt.utils.thin.backports_abc",
type("backports_abc", (), {"__file__": "/site-packages/backports_abc"}),
)
@patch(
"salt.utils.thin.concurrent",
type("concurrent", (), {"__file__": "/site-packages/concurrent"}),
)
@patch(
"salt.utils.thin.py_contextvars",
type("contextvars", (), {"__file__": "/site-packages/contextvars"}),
)
@patch_if(
salt.utils.thin.has_immutables,
"salt.utils.thin.immutables",
type("immutables", (), {"__file__": "/site-packages/immutables"}),
)
@patch("salt.utils.thin.log", MagicMock())
def test_get_tops_so_mods(self):
"""
Test thin.get_tops to get extra-modules alongside the top directories, based on the interpreter.
:return:
"""
base_tops = [
"distro",
"salt",
"jinja2",
"yaml",
"tornado",
"msgpack",
"certifi",
"sdp",
"sdp_hlp",
"ssl_mh",
"concurrent",
"markupsafe",
"backports_abc",
"contextvars",
"foo.so",
"bar.so",
]
if salt.utils.thin.has_immutables:
base_tops.extend(["immutables"])
libs = salt.utils.thin.find_site_modules("contextvars")
with patch("salt.utils.thin.find_site_modules", MagicMock(side_effect=[libs])):
with patch(
"builtins.__import__",
MagicMock(
side_effect=[
type("salt", (), {"__file__": "/custom/foo.so"}),
type("salt", (), {"__file__": "/custom/bar.so"}),
]
),
):
tops = []
for top in thin.get_tops(so_mods="foo,bar"):
if top.find("/") != -1:
spl = "/"
else:
spl = os.sep
tops.append(top.rsplit(spl, 1)[-1])
assert len(tops) == len(base_tops)
assert sorted(tops) == sorted(base_tops)
@patch("salt.utils.thin.gen_thin", MagicMock(return_value="/path/to/thin/thin.tgz"))
@patch("salt.utils.hashutils.get_hash", MagicMock(return_value=12345))
def test_thin_sum(self):
"""
Test thin.thin_sum function.
:return:
"""
assert thin.thin_sum("/cachedir", form="sha256")[1] == 12345
thin.salt.utils.hashutils.get_hash.assert_called()
assert thin.salt.utils.hashutils.get_hash.call_count == 1
path, form = thin.salt.utils.hashutils.get_hash.call_args[0]
assert path == "/path/to/thin/thin.tgz"
assert form == "sha256"
@patch("salt.utils.thin.gen_min", MagicMock(return_value="/path/to/thin/min.tgz"))
@patch("salt.utils.hashutils.get_hash", MagicMock(return_value=12345))
def test_min_sum(self):
"""
Test thin.thin_sum function.
:return:
"""
assert thin.min_sum("/cachedir", form="sha256") == 12345
thin.salt.utils.hashutils.get_hash.assert_called()
assert thin.salt.utils.hashutils.get_hash.call_count == 1
path, form = thin.salt.utils.hashutils.get_hash.call_args[0]
assert path == "/path/to/thin/min.tgz"
assert form == "sha256"
@patch("salt.utils.thin.sys.version_info", (2, 5))
@patch("salt.exceptions.SaltSystemExit", Exception)
def test_gen_thin_fails_ancient_python_version(self):
"""
Test thin.gen_thin function raises an exception
if Python major/minor version is lower than 2.6
:return:
"""
with pytest.raises(salt.exceptions.SaltSystemExit) as err:
thin.sys.exc_clear = lambda: None
thin.gen_thin("")
self.assertIn(
'The minimum required python version to run salt-ssh is "3"',
str(err.value),
)
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.makedirs", MagicMock())
@patch("salt.utils.files.fopen", MagicMock())
@patch("salt.utils.thin._get_salt_call", MagicMock())
@patch("salt.utils.thin._get_ext_namespaces", MagicMock())
@patch("salt.utils.thin.get_tops", MagicMock(return_value=["/foo3", "/bar3"]))
@patch("salt.utils.thin.get_ext_tops", MagicMock(return_value={}))
@patch("salt.utils.thin.os.path.isfile", MagicMock())
@patch("salt.utils.thin.os.path.isdir", MagicMock(return_value=True))
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.remove", MagicMock())
@patch("salt.utils.thin.os.path.exists", MagicMock())
@patch("salt.utils.path.os_walk", MagicMock(return_value=[]))
@patch(
"salt.utils.thin.subprocess.Popen",
_popen(
None,
side_effect=[(bts("2.7"), bts("")), (bts('["/foo27", "/bar27"]'), bts(""))],
),
)
@patch("salt.utils.thin.tarfile", MagicMock())
@patch("salt.utils.thin.zipfile", MagicMock())
@patch("salt.utils.thin.os.getcwd", MagicMock())
@patch("salt.utils.thin.os.access", MagicMock(return_value=True))
@patch("salt.utils.thin.os.chdir", MagicMock())
@patch("salt.utils.thin.os.close", MagicMock())
@patch("salt.utils.thin.tempfile.mkdtemp", MagicMock())
@patch(
"salt.utils.thin.tempfile.mkstemp", MagicMock(return_value=(3, ".temporary"))
)
@patch("salt.utils.thin.shutil", MagicMock())
@patch("salt.utils.thin.sys.version_info", _version_info(None, 3, 6))
@patch("salt.utils.path.which", MagicMock(return_value="/usr/bin/python"))
def test_gen_thin_compression_fallback_py3(self):
"""
Test thin.gen_thin function if fallbacks to the gzip compression, once setup wrong.
NOTE: Py2 version of this test is not required, as code shares the same spot across the versions.
:return:
"""
thin.gen_thin("", compress="arj")
thin.log.warning.assert_called()
pt, msg = thin.log.warning.mock_calls[0][1]
assert (
pt % msg
== 'Unknown compression type: "arj". Falling back to "gzip" compression.'
)
thin.zipfile.ZipFile.assert_not_called()
thin.tarfile.open.assert_called()
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.makedirs", MagicMock())
@patch("salt.utils.files.fopen", MagicMock())
@patch("salt.utils.thin._get_salt_call", MagicMock())
@patch("salt.utils.thin._get_ext_namespaces", MagicMock())
@patch("salt.utils.thin.get_tops", MagicMock(return_value=["/foo3", "/bar3"]))
@patch("salt.utils.thin.get_ext_tops", MagicMock(return_value={}))
@patch("salt.utils.thin.os.path.isfile", MagicMock())
@patch("salt.utils.thin.os.path.isdir", MagicMock(return_value=False))
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.remove", MagicMock())
@patch("salt.utils.thin.os.path.exists", MagicMock())
@patch("salt.utils.path.os_walk", MagicMock(return_value=[]))
@patch(
"salt.utils.thin.subprocess.Popen",
_popen(
None,
side_effect=[(bts("2.7"), bts("")), (bts('["/foo27", "/bar27"]'), bts(""))],
),
)
@patch("salt.utils.thin.tarfile", MagicMock())
@patch("salt.utils.thin.zipfile", MagicMock())
@patch("salt.utils.thin.os.getcwd", MagicMock())
@patch("salt.utils.thin.os.access", MagicMock(return_value=True))
@patch("salt.utils.thin.os.chdir", MagicMock())
@patch("salt.utils.thin.os.close", MagicMock())
@patch("salt.utils.thin.tempfile.mkdtemp", MagicMock(return_value=""))
@patch(
"salt.utils.thin.tempfile.mkstemp", MagicMock(return_value=(3, ".temporary"))
)
@patch("salt.utils.thin.shutil", MagicMock())
@patch("salt.utils.thin.sys.version_info", _version_info(None, 3, 6))
@patch("salt.utils.path.which", MagicMock(return_value="/usr/bin/python"))
def test_gen_thin_control_files_written_py3(self):
"""
Test thin.gen_thin function if control files are written (version, salt-call etc).
:return:
"""
thin.gen_thin("")
arc_name, arc_mode = thin.tarfile.method_calls[0][1]
self.assertEqual(arc_name, ".temporary")
self.assertEqual(arc_mode, "w:gz")
for idx, fname in enumerate(
["version", ".thin-gen-py-version", "salt-call", "supported-versions"]
):
name = thin.tarfile.open().method_calls[idx + 2][1][0]
self.assertEqual(name, fname)
thin.tarfile.open().close.assert_called()
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.makedirs", MagicMock())
@patch("salt.utils.files.fopen", MagicMock())
@patch("salt.utils.thin._get_salt_call", MagicMock())
@patch("salt.utils.thin._get_ext_namespaces", MagicMock())
@patch("salt.utils.thin.get_tops", MagicMock(return_value=["/salt", "/bar3"]))
@patch("salt.utils.thin.get_ext_tops", MagicMock(return_value={}))
@patch("salt.utils.thin.os.path.isfile", MagicMock())
@patch("salt.utils.thin.os.path.isdir", MagicMock(return_value=True))
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.remove", MagicMock())
@patch("salt.utils.thin.os.path.exists", MagicMock())
@patch(
"salt.utils.path.os_walk",
MagicMock(
return_value=(
("root", [], ["r1", "r2", "r3"]),
("root2", [], ["r4", "r5", "r6"]),
)
),
)
@patch("salt.utils.thin.tarfile", _tarfile(None))
@patch("salt.utils.thin.zipfile", MagicMock())
@patch(
"salt.utils.thin.os.getcwd",
MagicMock(return_value=os.path.join(RUNTIME_VARS.TMP, "fake-cwd")),
)
@patch("salt.utils.thin.os.chdir", MagicMock())
@patch("salt.utils.thin.os.close", MagicMock())
@patch("salt.utils.thin.tempfile.mkdtemp", MagicMock(return_value=""))
@patch(
"salt.utils.thin.tempfile.mkstemp", MagicMock(return_value=(3, ".temporary"))
)
@patch("salt.utils.thin.shutil", MagicMock())
@patch("salt.utils.thin.sys.version_info", _version_info(None, 3, 6))
@patch("salt.utils.hashutils.DigestCollector", MagicMock())
@patch("salt.utils.path.which", MagicMock(return_value="/usr/bin/python"))
def test_gen_thin_main_content_files_written_py3(self):
"""
Test thin.gen_thin function if main content files are written.
NOTE: Py2 version of this test is not required, as code shares the same spot across the versions.
:return:
"""
thin.gen_thin("")
files = []
for py in ("py3", "pyall"):
for i in range(1, 4):
files.append(os.path.join(py, "root", "r{}".format(i)))
for i in range(4, 7):
files.append(os.path.join(py, "root2", "r{}".format(i)))
for cl in thin.tarfile.open().method_calls[:-6]:
arcname = cl[2].get("arcname")
self.assertIn(arcname, files)
files.pop(files.index(arcname))
self.assertFalse(files)
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.makedirs", MagicMock())
@patch("salt.utils.files.fopen", MagicMock())
@patch("salt.utils.thin._get_salt_call", MagicMock())
@patch("salt.utils.thin._get_ext_namespaces", MagicMock())
@patch("salt.utils.thin.get_tops", MagicMock(return_value=[]))
@patch(
"salt.utils.thin.get_ext_tops",
MagicMock(
return_value={
"namespace": {
"py-version": [3, 0],
"path": "/opt/2015.8/salt",
"dependencies": ["/opt/certifi", "/opt/whatever"],
}
}
),
)
@patch("salt.utils.thin.os.path.isfile", MagicMock())
@patch("salt.utils.thin.os.path.isdir", MagicMock(return_value=True))
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.remove", MagicMock())
@patch("salt.utils.thin.os.path.exists", MagicMock())
@patch(
"salt.utils.path.os_walk",
MagicMock(
return_value=(
("root", [], ["r1", "r2", "r3"]),
("root2", [], ["r4", "r5", "r6"]),
)
),
)
@patch("salt.utils.thin.tarfile", _tarfile(None))
@patch("salt.utils.thin.zipfile", MagicMock())
@patch(
"salt.utils.thin.os.getcwd",
MagicMock(return_value=os.path.join(RUNTIME_VARS.TMP, "fake-cwd")),
)
@patch("salt.utils.thin.os.chdir", MagicMock())
@patch("salt.utils.thin.os.close", MagicMock())
@patch("salt.utils.thin.tempfile.mkdtemp", MagicMock(return_value=""))
@patch(
"salt.utils.thin.tempfile.mkstemp", MagicMock(return_value=(3, ".temporary"))
)
@patch("salt.utils.thin.shutil", MagicMock())
@patch("salt.utils.thin.sys.version_info", _version_info(None, 3, 6))
@patch("salt.utils.hashutils.DigestCollector", MagicMock())
@patch("salt.utils.path.which", MagicMock(return_value="/usr/bin/python"))
def test_gen_thin_ext_alternative_content_files_written_py3(self):
"""
Test thin.gen_thin function if external alternative content files are written.
:return:
"""
ext_conf = {
"namespace": {
"py-version": [3, 0],
"path": "/opt/2015.8/salt",
"dependencies": {
"certifi": "/opt/certifi",
"whatever": "/opt/whatever",
},
}
}
thin.gen_thin("", extended_cfg=ext_conf)
files = []
for py in ("pyall", "pyall", "py3"):
for i in range(1, 4):
files.append(os.path.join("namespace", py, "root", "r{}".format(i)))
for i in range(4, 7):
files.append(os.path.join("namespace", py, "root2", "r{}".format(i)))
for idx, cl in enumerate(thin.tarfile.open().method_calls[:-6]):
arcname = cl[2].get("arcname")
self.assertIn(arcname, files)
files.pop(files.index(arcname))
self.assertFalse(files)
def test_get_supported_py_config_typecheck(self):
"""
Test collecting proper py-versions. Should return bytes type.
:return:
"""
tops = {}
ext_cfg = {}
out = thin._get_supported_py_config(tops=tops, extended_cfg=ext_cfg)
assert type(salt.utils.stringutils.to_bytes("")) == type(out)
def test_get_supported_py_config_base_tops(self):
"""
Test collecting proper py-versions. Should return proper base tops.
:return:
"""
tops = {"3": ["/groundkeepers", "/stole"], "2": ["/the-root", "/password"]}
ext_cfg = {}
out = (
salt.utils.stringutils.to_str(
thin._get_supported_py_config(tops=tops, extended_cfg=ext_cfg)
)
.strip()
.split(os.linesep)
)
self.assertEqual(len(out), 2)
for t_line in ["py3:3:0", "py2:2:7"]:
self.assertIn(t_line, out)
def test_get_supported_py_config_ext_tops(self):
"""
Test collecting proper py-versions. Should return proper ext conf tops.
:return:
"""
tops = {}
ext_cfg = {
"solar-interference": {"py-version": [2, 6]},
"second-system-effect": {"py-version": [2, 7]},
}
out = (
salt.utils.stringutils.to_str(
thin._get_supported_py_config(tops=tops, extended_cfg=ext_cfg)
)
.strip()
.split(os.linesep)
)
for t_line in ["second-system-effect:2:7", "solar-interference:2:6"]:
self.assertIn(t_line, out)
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.makedirs", MagicMock())
@patch("salt.utils.files.fopen", MagicMock())
@patch("salt.utils.thin._get_salt_call", MagicMock())
@patch("salt.utils.thin._get_ext_namespaces", MagicMock())
@patch("salt.utils.thin.get_tops", MagicMock(return_value=["/foo3", "/bar3"]))
@patch("salt.utils.thin.get_ext_tops", MagicMock(return_value={}))
@patch("salt.utils.thin.os.path.isfile", MagicMock())
@patch("salt.utils.thin.os.path.isdir", MagicMock(return_value=False))
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.remove", MagicMock())
@patch("salt.utils.thin.os.path.exists", MagicMock())
@patch("salt.utils.path.os_walk", MagicMock(return_value=[]))
@patch(
"salt.utils.thin.subprocess.Popen",
_popen(
None,
side_effect=[(bts("2.7"), bts("")), (bts('["/foo27", "/bar27"]'), bts(""))],
),
)
@patch("salt.utils.thin.tarfile", MagicMock())
@patch("salt.utils.thin.zipfile", MagicMock())
@patch("salt.utils.thin.os.getcwd", MagicMock())
@patch("salt.utils.thin.os.access", MagicMock(return_value=False))
@patch("salt.utils.thin.os.chdir", MagicMock())
@patch("salt.utils.thin.os.close", MagicMock())
@patch("salt.utils.thin.tempfile.mkdtemp", MagicMock(return_value=""))
@patch(
"salt.utils.thin.tempfile.mkstemp", MagicMock(return_value=(3, ".temporary"))
)
@patch("salt.utils.thin.shutil", MagicMock())
@patch("salt.utils.thin.sys.version_info", _version_info(None, 3, 6))
def test_gen_thin_control_files_written_access_denied_cwd(self):
"""
Test thin.gen_thin function if control files are written (version, salt-call etc)
when the current working directory is inaccessible, eg. Salt is configured to run as
a non-root user but the command is executed in a directory that the user does not
have permissions to. Issue #54317.
NOTE: Py2 version of this test is not required, as code shares the same spot across the versions.
:return:
"""
thin.gen_thin("")
arc_name, arc_mode = thin.tarfile.method_calls[0][1]
self.assertEqual(arc_name, ".temporary")
self.assertEqual(arc_mode, "w:gz")
for idx, fname in enumerate(
["version", ".thin-gen-py-version", "salt-call", "supported-versions"]
):
name = thin.tarfile.open().method_calls[idx + 2][1][0]
self.assertEqual(name, fname)
thin.tarfile.open().close.assert_called()
def test_get_tops_python(self):
"""
test get_tops_python
"""
patch_proc = patch(
"salt.utils.thin.subprocess.Popen",
self._popen(
None,
side_effect=[
(bts("jinja2/__init__.py"), bts("")),
(bts("yaml/__init__.py"), bts("")),
(bts("tornado/__init__.py"), bts("")),
(bts("msgpack/__init__.py"), bts("")),
(bts("certifi/__init__.py"), bts("")),
(bts("singledispatch.py"), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts("distro.py"), bts("")),
],
),
)
patch_os = patch("os.path.exists", return_value=True)
patch_which = patch("salt.utils.path.which", return_value=True)
with patch_proc, patch_os, patch_which:
with TstSuiteLoggingHandler() as log_handler:
exp_ret = copy.deepcopy(self.exp_ret)
ret = thin.get_tops_python("python3.7", ext_py_ver=[3, 7])
if salt.utils.platform.is_windows():
for key, value in ret.items():
ret[key] = str(pathlib.Path(value).resolve(strict=False))
for key, value in exp_ret.items():
exp_ret[key] = str(pathlib.Path(value).resolve(strict=False))
assert ret == exp_ret
assert (
"ERROR:Could not auto detect file location for module concurrent"
" for python version python3.7" in log_handler.messages
)
def test_get_tops_python_exclude(self):
"""
test get_tops_python when excluding modules
"""
patch_proc = patch(
"salt.utils.thin.subprocess.Popen",
self._popen(
None,
side_effect=[
(bts("tornado/__init__.py"), bts("")),
(bts("msgpack/__init__.py"), bts("")),
(bts("certifi/__init__.py"), bts("")),
(bts("singledispatch.py"), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts("distro.py"), bts("")),
],
),
)
exp_ret = copy.deepcopy(self.exp_ret)
for lib in self.exc_libs:
exp_ret.pop(lib)
patch_os = patch("os.path.exists", return_value=True)
patch_which = patch("salt.utils.path.which", return_value=True)
with patch_proc, patch_os, patch_which:
ret = thin.get_tops_python(
"python3.7", exclude=self.exc_libs, ext_py_ver=[3, 7]
)
if salt.utils.platform.is_windows():
for key, value in ret.items():
ret[key] = str(pathlib.Path(value).resolve(strict=False))
for key, value in exp_ret.items():
exp_ret[key] = str(pathlib.Path(value).resolve(strict=False))
assert ret == exp_ret
def test_pack_alternatives_exclude(self):
"""
test pack_alternatives when mixing
manually set dependencies and auto
detecting other modules.
"""
patch_proc = patch(
"salt.utils.thin.subprocess.Popen",
self._popen(
None,
side_effect=[
(bts(self.fake_libs["distro"]), bts("")),
(bts(self.fake_libs["yaml"]), bts("")),
(bts(self.fake_libs["tornado"]), bts("")),
(bts(self.fake_libs["msgpack"]), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
],
),
)
patch_os = patch("os.path.exists", return_value=True)
ext_conf = copy.deepcopy(self.ext_conf)
ext_conf["test"]["auto_detect"] = True
for lib in self.fake_libs.values():
os.makedirs(lib)
with salt.utils.files.fopen(os.path.join(lib, "__init__.py"), "w+") as fp_:
fp_.write("test")
exp_files = self.exp_files.copy()
exp_files.extend(
[
os.path.join("yaml", "__init__.py"),
os.path.join("tornado", "__init__.py"),
os.path.join("msgpack", "__init__.py"),
]
)
patch_which = patch("salt.utils.path.which", return_value=True)
with patch_os, patch_proc, patch_which:
thin._pack_alternative(ext_conf, self.digest, self.tar)
calls = self.tar.mock_calls
for _file in exp_files:
assert [x for x in calls if "{}".format(_file) in x[-2]]
def test_pack_alternatives(self):
"""
test thin._pack_alternatives
"""
with patch("salt.utils.thin.get_ext_tops", MagicMock(return_value=self.tops)):
thin._pack_alternative(self.ext_conf, self.digest, self.tar)
calls = self.tar.mock_calls
for _file in self.exp_files:
assert [x for x in calls if "{}".format(_file) in x[-2]]
assert [
x
for x in calls
if os.path.join("test", "pyall", _file) in x[-1]["arcname"]
]
def test_pack_alternatives_not_normalized(self):
"""
test thin._pack_alternatives when the path
is not normalized
"""
tops = copy.deepcopy(self.tops)
tops["test"]["dependencies"] = [self.jinja_fp + "/"]
with patch("salt.utils.thin.get_ext_tops", MagicMock(return_value=tops)):
thin._pack_alternative(self.ext_conf, self.digest, self.tar)
calls = self.tar.mock_calls
for _file in self.exp_files:
assert [x for x in calls if "{}".format(_file) in x[-2]]
assert [
x
for x in calls
if os.path.join("test", "pyall", _file) in x[-1]["arcname"]
]
def test_pack_alternatives_path_doesnot_exist(self):
"""
test thin._pack_alternatives when the path
doesnt exist. Check error log message
and expect that because the directory
does not exist jinja2 does not get
added to the tar
"""
bad_path = os.path.join(tempfile.gettempdir(), "doesnotexisthere")
tops = copy.deepcopy(self.tops)
tops["test"]["dependencies"] = [bad_path]
with patch("salt.utils.thin.get_ext_tops", MagicMock(return_value=tops)):
with TstSuiteLoggingHandler() as log_handler:
thin._pack_alternative(self.ext_conf, self.digest, self.tar)
msg = "ERROR:File path {} does not exist. Unable to add to salt-ssh thin".format(
bad_path
)
assert msg in log_handler.messages
calls = self.tar.mock_calls
for _file in self.exp_files:
arg = [x for x in calls if "{}".format(_file) in x[-2]]
kwargs = [
x
for x in calls
if os.path.join("test", "pyall", _file) in x[-1]["arcname"]
]
if "jinja2" in _file:
assert not arg
assert not kwargs
else:
assert arg
assert kwargs
def test_pack_alternatives_auto_detect(self):
"""
test thin._pack_alternatives when auto_detect
is enabled
"""
ext_conf = copy.deepcopy(self.ext_conf)
ext_conf["test"]["auto_detect"] = True
for lib in self.fake_libs.values():
os.makedirs(lib)
with salt.utils.files.fopen(os.path.join(lib, "__init__.py"), "w+") as fp_:
fp_.write("test")
patch_tops_py = patch(
"salt.utils.thin.get_tops_python", return_value=self.fake_libs
)
exp_files = self.exp_files.copy()
exp_files.extend(
[
os.path.join("yaml", "__init__.py"),
os.path.join("tornado", "__init__.py"),
os.path.join("msgpack", "__init__.py"),
]
)
with patch_tops_py:
thin._pack_alternative(ext_conf, self.digest, self.tar)
calls = self.tar.mock_calls
for _file in exp_files:
assert [x for x in calls if "{}".format(_file) in x[-2]]
def test_pack_alternatives_empty_dependencies(self):
"""
test _pack_alternatives when dependencies is not
set in the config.
"""
ext_conf = copy.deepcopy(self.ext_conf)
ext_conf["test"]["auto_detect"] = True
ext_conf["test"].pop("dependencies")
for lib in self.fake_libs.values():
os.makedirs(lib)
with salt.utils.files.fopen(os.path.join(lib, "__init__.py"), "w+") as fp_:
fp_.write("test")
patch_tops_py = patch(
"salt.utils.thin.get_tops_python", return_value=self.fake_libs
)
exp_files = self.exp_files.copy()
exp_files.extend(
[
os.path.join("yaml", "__init__.py"),
os.path.join("tornado", "__init__.py"),
os.path.join("msgpack", "__init__.py"),
]
)
with patch_tops_py:
thin._pack_alternative(ext_conf, self.digest, self.tar)
calls = self.tar.mock_calls
for _file in exp_files:
assert [x for x in calls if "{}".format(_file) in x[-2]]
@pytest.mark.slow_test
@skipIf(
salt.utils.platform.is_windows(), "salt-ssh does not deploy to/from windows"
)
def test_thin_dir(self):
"""
Test the thin dir to make sure salt-call can run
Run salt call via a python in a new virtual environment to ensure
salt-call has all dependencies needed.
"""
# This was previously an integration test and is now here, as a unit test.
# Should actually be a functional test
with VirtualEnv() as venv:
salt.utils.thin.gen_thin(str(venv.venv_dir))
thin_dir = venv.venv_dir / "thin"
thin_archive = thin_dir / "thin.tgz"
tar = tarfile.open(str(thin_archive))
tar.extractall(str(thin_dir))
tar.close()
ret = venv.run(
venv.venv_python,
str(thin_dir / "salt-call"),
"--version",
check=False,
)
assert ret.returncode == 0, ret
|
{
"content_hash": "2f39d25660da11438f9c21b40909d386",
"timestamp": "",
"source": "github",
"line_count": 1375,
"max_line_length": 105,
"avg_line_length": 36.58472727272727,
"alnum_prop": 0.5286458333333334,
"repo_name": "saltstack/salt",
"id": "cb3cf7e521ed74dad827132aec78023ac3f3f9f5",
"size": "50304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/utils/test_thin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
from common import write_fake_link
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
write_fake_link(test)
test.write('myfortran.py', r"""
import getopt
import sys
comment = '#' + sys.argv[1]
opts, args = getopt.getopt(sys.argv[2:], 'co:')
for opt, arg in opts:
if opt == '-o': out = arg
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
for l in infile.readlines():
if l[:len(comment)] != comment:
outfile.write(l)
sys.exit(0)
""")
# Test default file suffix: .f/.F for FORTRAN
test.write('SConstruct', """
env = Environment(LINK = r'%(_python_)s mylink.py',
LINKFLAGS = [],
FORTRAN = r'%(_python_)s myfortran.py fortran')
env.Program(target = 'test01', source = 'test01.f')
env.Program(target = 'test02', source = 'test02.F')
env.Program(target = 'test03', source = 'test03.for')
env.Program(target = 'test04', source = 'test04.FOR')
env.Program(target = 'test05', source = 'test05.ftn')
env.Program(target = 'test06', source = 'test06.FTN')
env.Program(target = 'test07', source = 'test07.fpp')
env.Program(target = 'test08', source = 'test08.FPP')
""" % locals())
test.write('test01.f', "This is a .f file.\n#link\n#fortran\n")
test.write('test02.F', "This is a .F file.\n#link\n#fortran\n")
test.write('test03.for', "This is a .for file.\n#link\n#fortran\n")
test.write('test04.FOR', "This is a .FOR file.\n#link\n#fortran\n")
test.write('test05.ftn', "This is a .ftn file.\n#link\n#fortran\n")
test.write('test06.FTN', "This is a .FTN file.\n#link\n#fortran\n")
test.write('test07.fpp', "This is a .fpp file.\n#link\n#fortran\n")
test.write('test08.FPP', "This is a .FPP file.\n#link\n#fortran\n")
test.run(arguments = '.', stderr = None)
test.must_match('test01' + _exe, "This is a .f file.\n")
test.must_match('test02' + _exe, "This is a .F file.\n")
test.must_match('test03' + _exe, "This is a .for file.\n")
test.must_match('test04' + _exe, "This is a .FOR file.\n")
test.must_match('test05' + _exe, "This is a .ftn file.\n")
test.must_match('test06' + _exe, "This is a .FTN file.\n")
test.must_match('test07' + _exe, "This is a .fpp file.\n")
test.must_match('test08' + _exe, "This is a .FPP file.\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "db03e5a8b493230bd3bb7f1f3ac9d842",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 67,
"avg_line_length": 34.614285714285714,
"alnum_prop": 0.6413536937680562,
"repo_name": "timj/scons",
"id": "92e30ead5717198222b3477233d796201ca016ea",
"size": "3525",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/Fortran/FORTRANFILESUFFIXES.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2437"
},
{
"name": "C",
"bytes": "593"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1817"
},
{
"name": "DTrace",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "857084"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7393581"
},
{
"name": "Ruby",
"bytes": "10888"
},
{
"name": "Shell",
"bytes": "52480"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
}
|
from regeste.serializers import RegesteSerializer
from person.models import Person
from regeste.models import Regeste
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
class RegestDetail(APIView):
def get(self, request, pk, format=None):
regeste = get_object_or_404(Regeste.objects.filter(pk=pk))
serializer = RegesteSerializer(regeste)
return Response(serializer.data)
|
{
"content_hash": "1272afe01fc3a538eb2144fbc4cdef55",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 66,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.7752577319587629,
"repo_name": "danielbaak/imperii-viz",
"id": "ada9e13d60edd634374af5a14f7431b173910475",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regeste/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "136109"
},
{
"name": "HTML",
"bytes": "43041"
},
{
"name": "JavaScript",
"bytes": "285759"
},
{
"name": "Python",
"bytes": "33594"
},
{
"name": "Swift",
"bytes": "8940"
}
],
"symlink_target": ""
}
|
import pytest,datetime,json,os.path
from fixture.application import Application
import importlib,jsonpickle
from fixture.db import DbFixture
now_time = datetime.datetime.now()
fixture = None
target = None
@pytest.fixture
def app(request):
global fixture
browser = request.config.getoption("--browser")
#читаем из файла конфигурации все что касается web
web_config = load_config(request.config.getoption("--target"))['web']
# если фикстура не создана или невалидна то создаем ее
if fixture is None or not fixture.fixture_is_valid():
fixture = Application(browser=browser,base_url=web_config['base_url'])
fixture.session.ensure_login(username=web_config['username'],password=web_config['password'])
return fixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def final():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(final)
return fixture
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--target", action="store", default="target.json") # имя файла с опциями по умолчанию
parser.addoption("--check_ui", action="store_true")
def load_from_module(module):
return importlib.import_module("data.%s" % module).testdata
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
p=fixture[5:]
# загружаем данные из модуля например из C:\python_traning\python_traning\data\groups.py
testdata = load_from_module(fixture[5:]) # отрежем первые 5 символов
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
if fixture.startswith("json_"):
p=fixture[5:]
# загружаем данные из файла
testdata = load_from_json(fixture[5:]) # отрежем первые 5 символов
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),"data/%s.json" % file)) as f:
return jsonpickle.decode(f.read())
@pytest.fixture(scope="session")
def db(request):
#читаем из файла конфигурации все что касается db
db_config = load_config(request.config.getoption("--target"))['db']
dbfixture=DbFixture(host=db_config['host'], name=db_config['name'], user=db_config['user'], password=db_config['password']) # сделан класс DbFixture
def fin():
dbfixture.destroy()
request.addfinalizer(fin)
return dbfixture
@pytest.fixture
def check_ui(request):
return request.config.getoption("--check_ui")
# функция загрузки из файла
def load_config(from_file):
global target
if target is None:
path_to_config = os.path.join(os.path.dirname(os.path.abspath(__file__)),from_file)
with open(path_to_config) as config_file:
target=json.load(config_file)
return target
|
{
"content_hash": "d0dc4d33886c15bba0c9b708270e7ddc",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 152,
"avg_line_length": 37.82716049382716,
"alnum_prop": 0.6693864229765013,
"repo_name": "maximencia/python_traning",
"id": "6d20c5685acea5f5206893383cbbfda093530c91",
"size": "3368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "73712"
}
],
"symlink_target": ""
}
|
"""Dell Force10 Driver - supports DNOS9."""
from netmiko.cisco_base_connection import CiscoSSHConnection
class DellForce10SSH(CiscoSSHConnection):
"""Dell Force10 Driver - supports DNOS9."""
def save_config(
self,
cmd: str = "copy running-configuration startup-configuration",
confirm: bool = False,
confirm_response: str = "",
) -> str:
"""Saves Config"""
return super().save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
)
|
{
"content_hash": "a3a902482a5d7e96635134a8ca828554",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 31.176470588235293,
"alnum_prop": 0.6358490566037736,
"repo_name": "ktbyers/netmiko",
"id": "9ce811f66082410671c30f6eab4b029766adf701",
"size": "530",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netmiko/dell/dell_force10_ssh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "384"
},
{
"name": "Python",
"bytes": "726727"
},
{
"name": "Shell",
"bytes": "21540"
}
],
"symlink_target": ""
}
|
import sys
import os
from PySide import QtCore, QtGui
from PySide.QtGui import QFileDialog
from juma.core import app, signals
from juma.qt.TopEditorModule import TopEditorModule, QtMainWindow, SubEditorModule
##----------------------------------------------------------------##
class AssetEditor( TopEditorModule ):
_name = 'asset_editor'
_dependency = [ 'qt', 'moai' ]
def __init__(self):
super(AssetEditor, self).__init__()
self.runtime = None
def getWindowTitle( self ):
return 'Asset Editor'
def getRuntime(self):
if not self.runtime:
self.runtime = self.affirmModule('moai')
return self.runtime
def onLoad( self ):
self.mainWindow.setMenuWidget( self.getQtSupport().getSharedMenubar() )
self.findMenu( 'main/asset' ).addChild([
dict( name = 'refresh_assets', label = 'Refresh Assets', shortcut = 'ctrl+G' ),
], self )
self.findMenu('main/window').addChild([
'Mesh Exporter',
'Mesh Preview',
'----',
],
self )
return True
##----------------------------------------------------------------##
def openFile( self, fileformat, title, folder = None ):
if folder is None:
if self.getProject().getPath():
folder = self.getProject().getPath()
else:
folder = '~'
return QFileDialog.getOpenFileName(self.getMainWindow(), title, folder, fileformat)
##----------------------------------------------------------------##
def onMenu(self, node):
name = node.name
if name == 'mesh_exporter':
self.getModule('mesh_exporter').show()
elif name == 'mesh_preview':
self.getModule('mesh_preview').show()
elif name == 'refresh_assets':
self.getProject().assetLibrary.clearAssets()
runtime = self.getRuntime()
runtime.refreshAssets()
##----------------------------------------------------------------##
class AssetEditorModule( SubEditorModule ):
def getParentModuleId( self ):
return 'asset_editor'
def getSceneEditor( self ):
return self.getParentModule()
##----------------------------------------------------------------##
AssetEditor().register()
|
{
"content_hash": "ff99de6b5d01ff10a13964d61d37113d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 91,
"avg_line_length": 30.961038961038962,
"alnum_prop": 0.49538590604026844,
"repo_name": "cloudteampro/juma-editor",
"id": "a895bdf98f4911c243379aa8300ac2d22ca0f664",
"size": "2407",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "editor/lib/juma/AssetEditor/AssetEditor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "490405"
},
{
"name": "C++",
"bytes": "15076"
},
{
"name": "Lua",
"bytes": "223218"
},
{
"name": "Makefile",
"bytes": "6088"
},
{
"name": "Objective-C",
"bytes": "25470"
},
{
"name": "Python",
"bytes": "1033362"
},
{
"name": "Shell",
"bytes": "2792"
}
],
"symlink_target": ""
}
|
"""Remove id column from xcom
Revision ID: bbf4a7ad0465
Revises: cf5dc11e79ad
Create Date: 2019-10-29 13:53:09.445943
"""
from collections import defaultdict
from alembic import op
from sqlalchemy import Column, Integer
from airflow.compat.sqlalchemy import inspect
# revision identifiers, used by Alembic.
revision = 'bbf4a7ad0465'
down_revision = 'cf5dc11e79ad'
branch_labels = None
depends_on = None
airflow_version = '2.0.0'
def get_table_constraints(conn, table_name):
"""
This function return primary and unique constraint
along with column name. Some tables like `task_instance`
is missing the primary key constraint name and the name is
auto-generated by the SQL server. so this function helps to
retrieve any primary or unique constraint name.
:param conn: sql connection object
:param table_name: table name
:return: a dictionary of ((constraint name, constraint type), column name) of table
:rtype: defaultdict(list)
"""
query = f"""SELECT tc.CONSTRAINT_NAME , tc.CONSTRAINT_TYPE, ccu.COLUMN_NAME
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS ccu ON ccu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME
WHERE tc.TABLE_NAME = '{table_name}' AND
(tc.CONSTRAINT_TYPE = 'PRIMARY KEY' or UPPER(tc.CONSTRAINT_TYPE) = 'UNIQUE')
"""
result = conn.execute(query).fetchall()
constraint_dict = defaultdict(list)
for constraint, constraint_type, column in result:
constraint_dict[(constraint, constraint_type)].append(column)
return constraint_dict
def drop_column_constraints(operator, column_name, constraint_dict):
"""
Drop a primary key or unique constraint
:param operator: batch_alter_table for the table
:param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table
"""
for constraint, columns in constraint_dict.items():
if column_name in columns:
if constraint[1].lower().startswith("primary"):
operator.drop_constraint(constraint[0], type_='primary')
elif constraint[1].lower().startswith("unique"):
operator.drop_constraint(constraint[0], type_='unique')
def create_constraints(operator, column_name, constraint_dict):
"""
Create a primary key or unique constraint
:param operator: batch_alter_table for the table
:param constraint_dict: a dictionary of ((constraint name, constraint type), column name) of table
"""
for constraint, columns in constraint_dict.items():
if column_name in columns:
if constraint[1].lower().startswith("primary"):
operator.create_primary_key(constraint_name=constraint[0], columns=columns)
elif constraint[1].lower().startswith("unique"):
operator.create_unique_constraint(constraint_name=constraint[0], columns=columns)
def upgrade():
"""Apply Remove id column from xcom"""
conn = op.get_bind()
inspector = inspect(conn)
with op.batch_alter_table('xcom') as bop:
xcom_columns = [col.get('name') for col in inspector.get_columns("xcom")]
if "id" in xcom_columns:
if conn.dialect.name == 'mssql':
constraint_dict = get_table_constraints(conn, "xcom")
drop_column_constraints(bop, 'id', constraint_dict)
bop.drop_column('id')
bop.drop_index('idx_xcom_dag_task_date')
# mssql doesn't allow primary keys with nullable columns
if conn.dialect.name != 'mssql':
bop.create_primary_key('pk_xcom', ['dag_id', 'task_id', 'key', 'execution_date'])
def downgrade():
"""Unapply Remove id column from xcom"""
conn = op.get_bind()
with op.batch_alter_table('xcom') as bop:
if conn.dialect.name != 'mssql':
bop.drop_constraint('pk_xcom', type_='primary')
bop.add_column(Column('id', Integer, nullable=False))
bop.create_primary_key('id', ['id'])
bop.create_index('idx_xcom_dag_task_date', ['dag_id', 'task_id', 'key', 'execution_date'])
|
{
"content_hash": "4a8ff70f4b1aef671829dbc58e1bc89b",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 103,
"avg_line_length": 38.93396226415094,
"alnum_prop": 0.668524351829416,
"repo_name": "danielvdende/incubator-airflow",
"id": "a588af5c53917ab2b2ebac2bb3a660f608bd29b9",
"size": "4915",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/migrations/versions/0060_2_0_0_remove_id_column_from_xcom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import numpy as np
SEED = 5
np.random.seed(SEED)
import pandas as pd
uri = 'https://gist.githubusercontent.com/guilhermesilveira/4d1d4a16ccbf6ea4e0a64a38a24ec884/raw/afd05cb0c796d18f3f5a6537053ded308ba94bf7/car-prices.csv'
dados = pd.read_csv(uri)
"""
Tratamento dos dados
"""
dados = dados.rename(columns={
'mileage_per_year' : 'milhas_por_ano',
'model_year' : 'ano_do_modelo',
'price' : 'preco',
'sold' : 'vendido'
})
dados.vendido = dados.vendido.map({
'no' : 0,
'yes' : 1
})
from datetime import datetime
ano_atual = datetime.today().year
dados['idade_do_modelo'] = ano_atual - dados.ano_do_modelo
dados['km_por_ano'] = dados.milhas_por_ano * 1.60934
dados = dados.drop(columns = ["Unnamed: 0", "milhas_por_ano","ano_do_modelo"], axis=1) # remove as colunas (axis, caso contraio ia remover apenas de linha)
x = dados[["preco", "idade_do_modelo","km_por_ano"]]
y = dados["vendido"]
"""
Aplica modelos dummies para termos um baseline
"""
treino_x, teste_x, treino_y, teste_y = train_test_split(x, y, test_size = 0.25, stratify = y)
print("Treinaremos com %d elementos e testaremos com %d elementos" % (len(treino_x), len(teste_x)))
from sklearn.dummy import DummyClassifier
# Chuta as classes mesma proporção encontradas no treino
dummy_stratified = DummyClassifier(strategy="stratified")
dummy_stratified.fit(treino_x, treino_y)
acuracia = dummy_stratified.score(teste_x, teste_y) * 100
print("A acurácia do dummy stratified foi %.2f%%" % acuracia)
# Chuta a classe mais frequente encontrada no treino
dummy_mostfrequent = DummyClassifier(strategy="most_frequent")
dummy_mostfrequent.fit(treino_x, treino_y)
acuracia = dummy_mostfrequent.score(teste_x, teste_y) * 100
print("A acurácia do dummy mostfrequent foi %.2f%%" % acuracia)
"""
Aplica o modelo LinearSVC
"""
from sklearn.svm import LinearSVC
treino_x, teste_x, treino_y, teste_y = train_test_split(x, y, test_size = 0.25, stratify = y)
print("Treinaremos com %d elementos e testaremos com %d elementos" % (len(treino_x), len(teste_x)))
modelo = LinearSVC()
modelo.fit(treino_x, treino_y)
previsoes = modelo.predict(teste_x)
acuracia = accuracy_score(teste_y, previsoes) * 100
print("A acurácia do LinearSVC foi %.2f%%" % acuracia)
"""
Reescala os dados e aplica modelo SVC
"""
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
raw_treino_x, raw_teste_x, treino_y, teste_y = train_test_split(x, y, test_size = 0.25, stratify = y)
print("Treinaremos com %d elementos e testaremos com %d elementos" % (len(treino_x), len(teste_x)))
scaler = StandardScaler()
scaler.fit(raw_treino_x)
treino_x = scaler.transform(raw_treino_x)
teste_x = scaler.transform(raw_teste_x)
modelo = SVC()
modelo.fit(treino_x, treino_y)
previsoes = modelo.predict(teste_x)
acuracia = accuracy_score(teste_y, previsoes) * 100
print("A acurácia do SVC foi %.2f%%" % acuracia)
"""
Aplica árvore de decisão utilizando o Gini
"""
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
treino_x, teste_x, treino_y, teste_y = train_test_split(x, y, test_size = 0.25, stratify = y)
print("Treinaremos com %d elementos e testaremos com %d elementos" % (len(treino_x), len(teste_x)))
modelo = DecisionTreeClassifier(max_depth=5)
modelo.fit(treino_x, treino_y)
previsoes = modelo.predict(teste_x)
acuracia = accuracy_score(teste_y, previsoes) * 100
print("A acurácia da árvore de decisão foi %.2f%%" % acuracia)
"""
Gera o grafo da árvore de decisão
"""
from sklearn.tree import export_graphviz
import graphviz
# gera o grafo a partir do modelo
dot_data = export_graphviz(modelo, out_file=None,
filled=True, rounded=True, # formatacao das cores e forma
feature_names = x.columns, # nome das features (para não exibir X[0])
class_names = ["não", "sim"])
# gera a visualização
grafico_arvore_decisao = graphviz.Source(dot_data)
grafico_arvore_decisao.render('arvore-decisao-grafo.gv', view=True)
|
{
"content_hash": "43a36cc7e1705df72ae6e412194cada1",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 155,
"avg_line_length": 30.992481203007518,
"alnum_prop": 0.7147016011644832,
"repo_name": "wesleyegberto/courses-projects",
"id": "5810e379301c06dce3e6441eb07ed18d62d76283",
"size": "4165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ia/machine-learning-sklearn-classificacao/5_arvore_decisao.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160809"
},
{
"name": "Dockerfile",
"bytes": "2215"
},
{
"name": "EJS",
"bytes": "4300"
},
{
"name": "HTML",
"bytes": "577164"
},
{
"name": "Java",
"bytes": "617578"
},
{
"name": "JavaScript",
"bytes": "5560654"
},
{
"name": "Jupyter Notebook",
"bytes": "176930"
},
{
"name": "Procfile",
"bytes": "117"
},
{
"name": "Puppet",
"bytes": "2496"
},
{
"name": "Python",
"bytes": "50952"
},
{
"name": "SCSS",
"bytes": "18154"
},
{
"name": "Shell",
"bytes": "2489"
},
{
"name": "TypeScript",
"bytes": "825418"
},
{
"name": "Vue",
"bytes": "3210"
},
{
"name": "XSLT",
"bytes": "1733"
}
],
"symlink_target": ""
}
|
"""Generic mapping to Select statements"""
from sqlalchemy.testing import assert_raises, assert_raises_message
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import String, Integer, select
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, Session
from sqlalchemy.testing import eq_, AssertsCompiledSQL
from sqlalchemy.testing import fixtures
# TODO: more tests mapping to selects
class SelectableNoFromsTest(fixtures.MappedTest, AssertsCompiledSQL):
@classmethod
def define_tables(cls, metadata):
Table('common', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', Integer),
Column('extra', String(45)))
@classmethod
def setup_classes(cls):
class Subset(cls.Comparable):
pass
def test_no_tables(self):
Subset = self.classes.Subset
selectable = select(["x", "y", "z"]).alias()
mapper(Subset, selectable, primary_key=[selectable.c.x])
self.assert_compile(
Session().query(Subset),
"SELECT anon_1.x, anon_1.y, anon_1.z FROM (SELECT x, y, z) AS anon_1",
use_default_dialect=True
)
def test_no_table_needs_pl(self):
Subset = self.classes.Subset
selectable = select(["x", "y", "z"]).alias()
assert_raises_message(
sa.exc.ArgumentError,
"could not assemble any primary key columns",
mapper, Subset, selectable
)
def test_no_selects(self):
Subset, common = self.classes.Subset, self.tables.common
subset_select = select([common.c.id, common.c.data])
assert_raises(sa.exc.InvalidRequestError, mapper, Subset, subset_select)
def test_basic(self):
Subset, common = self.classes.Subset, self.tables.common
subset_select = select([common.c.id, common.c.data]).alias()
subset_mapper = mapper(Subset, subset_select)
sess = Session(bind=testing.db)
sess.add(Subset(data=1))
sess.flush()
sess.expunge_all()
eq_(sess.query(Subset).all(), [Subset(data=1)])
eq_(sess.query(Subset).filter(Subset.data==1).one(), Subset(data=1))
eq_(sess.query(Subset).filter(Subset.data!=1).first(), None)
subset_select = sa.orm.class_mapper(Subset).mapped_table
eq_(sess.query(Subset).filter(subset_select.c.data==1).one(),
Subset(data=1))
|
{
"content_hash": "2036a1ab4de6342b7563452095e16ba3",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 85,
"avg_line_length": 32.98684210526316,
"alnum_prop": 0.6382130035899481,
"repo_name": "Abi1ity/uniclust2.0",
"id": "42347213e186eb74684615c5075a2a49f3922923",
"size": "2507",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "SQLAlchemy-0.9.9/test/orm/test_selectable.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "52411"
},
{
"name": "CSS",
"bytes": "69780"
},
{
"name": "Groff",
"bytes": "28"
},
{
"name": "HTML",
"bytes": "12680251"
},
{
"name": "JavaScript",
"bytes": "161113"
},
{
"name": "Makefile",
"bytes": "12078"
},
{
"name": "Python",
"bytes": "22767232"
},
{
"name": "Shell",
"bytes": "8093"
},
{
"name": "TeX",
"bytes": "17033"
}
],
"symlink_target": ""
}
|
from pandac.PandaModules import *
from pandac.PandaModules import *
from direct.directnotify import DirectNotifyGlobal
from direct.showbase import PythonUtil
from otp.otpbase import OTPLocalizer
import HTTPUtil
import RemoteValueSet
import copy
accountServer = ''
accountServer = launcher.getAccountServer()
print 'TTAccount: accountServer from launcher: ', accountServer
configAccountServer = base.config.GetString('account-server', '')
if configAccountServer:
accountServer = configAccountServer
print 'TTAccount: overriding accountServer from config: ', accountServer
if not accountServer:
accountServer = 'https://toontown.go.com'
print 'TTAccount: default accountServer: ', accountServer
accountServer = URLSpec(accountServer, 1)
def getAccountServer():
return accountServer
TTAccountException = HTTPUtil.HTTPUtilException
class TTAccount:
notify = DirectNotifyGlobal.directNotify.newCategory('TTAccount')
def __init__(self, cr):
self.cr = cr
self.response = None
return
def createAccount(self, loginName, password, data):
return self.talk('create', data=self.__makeLoginDict(loginName, password, data))
def authorize(self, loginName, password):
return self.talk('play', data=self.__makeLoginDict(loginName, password))
def createBilling(self, loginName, password, data):
return self.talk('purchase', data=self.__makeLoginDict(loginName, password, data))
def setParentPassword(self, loginName, password, parentPassword):
return self.talk('setParentPassword', data=self.__makeLoginDict(loginName, password, {'parentPassword': parentPassword}))
def supportsParentPassword(self):
return 1
def authenticateParentPassword(self, loginName, password, parentPassword):
try:
errorMsg = self.talk('authenticateParentPassword', data=self.__makeLoginDict(loginName, parentPassword))
if not errorMsg:
return (1, None)
if self.response.getInt('errorCode') in (5, 72):
return (0, None)
return (0, errorMsg)
except TTAccountException, e:
return (0, str(e))
return None
def supportsAuthenticateDelete(self):
return 1
def authenticateDelete(self, loginName, password):
try:
errorMsg = self.talk('authenticateDelete', data=self.__makeLoginDict(loginName, password))
if not errorMsg:
return (1, None)
if self.response.getInt('errorCode') in (5, 72):
return (0, None)
return (0, errorMsg)
except TTAccountException, e:
return (0, str(e))
return None
def enableSecretFriends(self, loginName, password, parentPassword, enable = 1):
try:
errorMsg = self.talk('setSecretChat', data=self.__makeLoginDict(loginName, parentPassword, {'chat': base.cr.secretChatAllowed,
'secretsNeedParentPassword': base.cr.secretChatNeedsParentPassword}))
if not errorMsg:
return (1, None)
if self.response.getInt('errorCode') in (5, 72):
return (0, None)
return (0, errorMsg)
except TTAccountException, e:
return (0, str(e))
return None
def changePassword(self, loginName, password, newPassword):
return self.talk('purchase', data=self.__makeLoginDict(loginName, password, {'newPassword': newPassword}))
def requestPwdReminder(self, email = None, acctName = None):
data = {}
if email is not None:
data['email'] = email
else:
data['accountName'] = acctName
return self.talk('forgotPassword', data)
def cancelAccount(self, loginName, password):
return self.talk('cancel', data=self.__makeLoginDict(loginName, password))
def getAccountData(self, loginName, password):
errorMsg = self.talk('get', data=self.__makeLoginDict(loginName, password))
if errorMsg:
self.notify.warning('getAccountData error: %s' % errorMsg)
return errorMsg
if self.response.hasKey('errorMsg'):
self.notify.warning("error field is: '%s'" % self.response.getString('errorMsg'))
self.accountData = copy.deepcopy(self.response)
fieldNameMap = {'em': 'email',
'l1': 'addr1',
'l2': 'addr2',
'l3': 'addr3'}
dict = self.accountData.dict
for fieldName in dict.keys():
if fieldNameMap.has_key(fieldName):
dict[fieldNameMap[fieldName]] = dict[fieldName]
del dict[fieldName]
return None
def getLastErrorMsg(self, forceCustServNum = 0):
errCode = self.response.getInt('errorCode')
if errCode < 100:
msg = self.response.getString('errorMsg')
if forceCustServNum:
msg += ' ' + OTPLocalizer.TTAccountCustomerServiceHelp % self.cr.accountServerConstants.getString('customerServicePhoneNumber')
elif errCode < 200:
msg = self.response.getString('errorMsg')
msg += ' ' + OTPLocalizer.TTAccountCustomerServiceHelp % self.cr.accountServerConstants.getString('customerServicePhoneNumber')
elif errCode >= 500:
msg = OTPLocalizer.TTAccountIntractibleError
msg += ' ' + OTPLocalizer.TTAccountCallCustomerService % self.cr.accountServerConstants.getString('customerServicePhoneNumber')
else:
self.notify.warning('unknown error code class: %s: %s' % (self.response.getInt('errorCode'), self.response.getString('errorMsg')))
msg = self.response.getString('errorMsg')
msg += ' ' + OTPLocalizer.TTAccountCallCustomerService % self.cr.accountServerConstants.getString('customerServicePhoneNumber')
return msg
def __makeLoginDict(self, loginName, password, data = None):
dict = {'accountName': loginName,
'password': password}
if data:
dict.update(data)
return dict
def makeLoginDict(self, loginName, password, data = None):
return self.__makeLoginDict(loginName, password, data)
def talk(self, operation, data = {}):
self.notify.debug('TTAccount.talk()')
for key in data.keys():
data[key] = str(data[key])
if operation in ('play', 'get', 'cancel', 'authenticateParentPassword', 'authenticateDelete', 'authenticateParentPasswordNewStyle', 'authenticateDeleteNewStyle'):
pass
elif operation == 'authenticateParentUsernameAndPassword':
pass
elif operation == 'forgotPassword':
pass
elif operation == 'setParentPassword':
pass
elif operation == 'setSecretChat':
pass
elif operation == 'create':
pass
elif operation == 'purchase':
if data.has_key('newPassword'):
pass
else:
self.notify.error("Internal TTAccount error: need to add 'required data' checking for %s operation" % operation)
op2Php = {'play': 'play',
'get': 'get',
'cancel': 'cancel',
'create': 'create',
'purchase': 'purchase',
'setParentPassword': 'setSecrets',
'authenticateParentPassword': 'authenticateChat',
'authenticateDelete': 'authDelete',
'setSecretChat': 'setChat',
'forgotPassword': 'forgotPw',
'authenticateParentPasswordNewStyle': 'api/authChat',
'authenticateParentUsernameAndPassword': 'api/authParentChat',
'authenticateDeleteNewStyle': 'api/authDelete'}
newWebOperations = ('authenticateParentPasswordNewStyle', 'authenticateParentUsernameAndPassword', 'authenticateDeleteNewStyle')
url = URLSpec(getAccountServer())
if operation in newWebOperations:
url.setPath('/%s' % op2Php[operation])
else:
url.setPath('/%s.php' % op2Php[operation])
body = ''
if data.has_key('accountName'):
if operation not in newWebOperations:
url.setQuery('n=%s' % URLSpec.quote(data['accountName']))
serverFields = {'accountName': 'n',
'password': 'p',
'parentPassword': 'sp',
'newPassword': 'np',
'chat': 'chat',
'email': 'em',
'dobYear': 'doby',
'dobMonth': 'dobm',
'dobDay': 'dobd',
'ccNumber': 'ccn',
'ccMonth': 'ccm',
'ccYear': 'ccy',
'nameOnCard': 'noc',
'addr1': 'l1',
'addr2': 'l2',
'addr3': 'l3',
'city': 'city',
'state': 'state',
'country': 'country',
'zip': 'zip',
'referrer': 'ref',
'secretsNeedParentPassword': 'secretsNeedsParentPassword',
'parentPasswordNewStyle': 'pp',
'parentUsername': 'pu',
'userid': 'userid'}
ignoredFields = ('ccType',)
outBoundFields = {}
for fieldName in data.keys():
if not serverFields.has_key(fieldName):
if fieldName not in ignoredFields:
self.notify.error('unknown data field: %s' % fieldName)
else:
outBoundFields[serverFields[fieldName]] = data[fieldName]
orderedFields = outBoundFields.keys()
orderedFields.sort()
for fieldName in orderedFields:
if len(body):
body += '&'
body += '%s=%s' % (fieldName, URLSpec.quotePlus(outBoundFields[fieldName]))
self.notify.debug('url=' + url.cStr())
self.notify.debug('body=' + body)
if operation in ('get',):
expectedHeader = 'ACCOUNT INFO'
elif operation in ('play', 'cancel', 'create', 'purchase', 'setParentPassword', 'setSecretChat', 'authenticateParentPassword', 'authenticateDelete', 'forgotPassword', 'authenticateParentPasswordNewStyle', 'authenticateParentUsernameAndPassword', 'authenticateDeleteNewStyle'):
expectedHeader = 'ACCOUNT SERVER RESPONSE'
else:
self.notify.error("Internal TTAccount error: need to set expected response header for '%s' operation" % operation)
self.response = RemoteValueSet.RemoteValueSet(url, self.cr.http, body=body, expectedHeader=expectedHeader)
self.notify.debug(' self.response=' + str(self.response))
if self.response.hasKey('errorCode'):
errorCode = self.response.getInt('errorCode')
self.notify.info('account server error code: %s' % errorCode)
if errorCode == 10:
self.cr.freeTimeExpiresAt = 0
if self.response.hasKey('errorMsg'):
return self.getLastErrorMsg()
if operation in ('get', 'forgotPassword', 'authenticateDelete', 'play', 'cancel', 'create', 'purchase', 'setParentPassword', 'authenticateParentPassword', 'authenticateParentPasswordNewStyle', 'authenticateParentUsernameAndPassword', 'authenticateDeleteNewStyle'):
pass
elif operation == 'setSecretChat':
self.playToken = self.response.getString('playToken')
self.playTokenIsEncrypted = 1
else:
self.notify.error('Internal TTAccount error: need to extract useful data for %s operation' % operation)
return None
def authenticateParentUsernameAndPassword(self, loginName, password, parentUsername, parentPassword):
try:
errorMsg = self.talk('authenticateParentUsernameAndPassword', data=self.__makeLoginDict(loginName, password, {'parentUsername': parentUsername,
'parentPasswordNewStyle': parentPassword,
'userid': loginName}))
if not errorMsg:
return (1, None)
if self.response.getInt('errorCode') in (5, 72):
return (0, None)
return (0, errorMsg)
except TTAccountException, e:
return (0, str(e))
return None
|
{
"content_hash": "851e9f925333e1c59f51f6b64f074006",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 284,
"avg_line_length": 42.84642857142857,
"alnum_prop": 0.6246561640410102,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "eaac4460c56bcc52fa5a808cecf538978119970f",
"size": "11997",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "otp/login/TTAccount.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
}
|
import contextlib
import mock
import webob.exc
from neutron.api.v2 import attributes
from neutron import context
from neutron.plugins.nec.common import exceptions as nexc
from neutron.plugins.nec.extensions import packetfilter as ext_pf
from neutron.tests.unit.nec import test_nec_plugin
from neutron.tests.unit import test_db_plugin as test_plugin
NEC_PLUGIN_PF_INI = """
[DEFAULT]
api_extensions_path = neutron/plugins/nec/extensions
[OFC]
driver = neutron.tests.unit.nec.stub_ofc_driver.StubOFCDriver
enable_packet_filter = True
"""
class PacketfilterExtensionManager(ext_pf.Packetfilter):
@classmethod
def get_resources(cls):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
{'packet_filters': ext_pf.PACKET_FILTER_ATTR_MAP})
return super(PacketfilterExtensionManager, cls).get_resources()
class TestNecPluginPacketFilterBase(test_nec_plugin.NecPluginV2TestCase):
_nec_ini = NEC_PLUGIN_PF_INI
def setUp(self):
ext_mgr = PacketfilterExtensionManager()
super(TestNecPluginPacketFilterBase, self).setUp(ext_mgr=ext_mgr)
def _create_packet_filter(self, fmt, net_id, expected_res_status=None,
arg_list=None, **kwargs):
data = {'packet_filter': {'network_id': net_id,
'tenant_id': self._tenant_id,
'priority': '1',
'action': 'ALLOW'}}
for arg in (('name', 'admin_state_up', 'action', 'priority', 'in_port',
'src_mac', 'dst_mac', 'eth_type', 'src_cidr', 'dst_cidr',
'protocol', 'src_port', 'dst_port') +
(arg_list or ())):
# Arg must be present
if arg in kwargs:
data['packet_filter'][arg] = kwargs[arg]
pf_req = self.new_create_request('packet_filters', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
pf_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
pf_res = pf_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(pf_res.status_int, expected_res_status)
return pf_res
def _make_packet_filter(self, fmt, net_id, expected_res_status=None,
**kwargs):
res = self._create_packet_filter(fmt, net_id, expected_res_status,
**kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def packet_filter_on_network(self, network=None, fmt=None, do_delete=True,
**kwargs):
with test_plugin.optional_ctx(network, self.network) as network_to_use:
net_id = network_to_use['network']['id']
pf = self._make_packet_filter(fmt or self.fmt, net_id, **kwargs)
yield pf
if do_delete:
self._delete('packet_filters', pf['packet_filter']['id'])
@contextlib.contextmanager
def packet_filter_on_port(self, port=None, fmt=None, do_delete=True,
set_portinfo=True, **kwargs):
with test_plugin.optional_ctx(port, self.port) as port_to_use:
net_id = port_to_use['port']['network_id']
port_id = port_to_use['port']['id']
if set_portinfo:
portinfo = {'id': port_id,
'port_no': kwargs.get('port_no', 123)}
kw = {'added': [portinfo]}
if 'datapath_id' in kwargs:
kw['datapath_id'] = kwargs['datapath_id']
self.rpcapi_update_ports(**kw)
kwargs['in_port'] = port_id
pf = self._make_packet_filter(fmt or self.fmt, net_id, **kwargs)
self.assertEqual(port_id, pf['packet_filter']['in_port'])
yield pf
if do_delete:
self._delete('packet_filters', pf['packet_filter']['id'])
class TestNecPluginPacketFilter(TestNecPluginPacketFilterBase):
def setUp(self):
super(TestNecPluginPacketFilter, self).setUp()
# Remove attributes explicitly from mock object to check
# a case where there are no update_filter and validate_*.
del self.ofc.driver.update_filter
del self.ofc.driver.validate_filter_create
del self.ofc.driver.validate_filter_update
def test_list_packet_filters(self):
self._list('packet_filters')
def test_create_pf_on_network_no_ofc_creation(self):
with self.packet_filter_on_network(admin_state_up=False) as pf:
self.assertEqual(pf['packet_filter']['status'], 'DOWN')
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
def test_create_pf_on_port_no_ofc_creation(self):
with self.packet_filter_on_port(admin_state_up=False,
set_portinfo=False) as pf:
self.assertEqual(pf['packet_filter']['status'], 'DOWN')
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
def test_create_pf_on_network_with_ofc_creation(self):
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
self.assertEqual(pf['packet_filter']['status'], 'ACTIVE')
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 1)
def test_create_pf_on_port_with_ofc_creation(self):
with self.packet_filter_on_port() as pf:
pf_id = pf['packet_filter']['id']
self.assertEqual(pf['packet_filter']['status'], 'ACTIVE')
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 1)
def _test_create_pf_with_protocol(self, protocol, expected_eth_type):
with self.packet_filter_on_network(protocol=protocol) as pf:
pf_data = pf['packet_filter']
self.assertEqual(protocol, pf_data['protocol'])
self.assertEqual(expected_eth_type, pf_data['eth_type'])
def test_create_pf_with_protocol_tcp(self):
self._test_create_pf_with_protocol('TCP', 0x800)
def test_create_pf_with_protocol_udp(self):
self._test_create_pf_with_protocol('UDP', 0x800)
def test_create_pf_with_protocol_icmp(self):
self._test_create_pf_with_protocol('ICMP', 0x800)
def test_create_pf_with_protocol_arp(self):
self._test_create_pf_with_protocol('ARP', 0x806)
def test_create_pf_with_inconsistent_protocol_and_eth_type(self):
with self.packet_filter_on_network(protocol='TCP') as pf:
pf_data = pf['packet_filter']
pf_id = pf_data['id']
self.assertEqual('TCP', pf_data['protocol'])
self.assertEqual(0x800, pf_data['eth_type'])
data = {'packet_filter': {'eth_type': 0x806}}
self._update('packet_filters', pf_id, data,
expected_code=409)
def test_create_pf_with_invalid_priority(self):
with self.network() as net:
net_id = net['network']['id']
kwargs = {'priority': 'high'}
self._create_packet_filter(self.fmt, net_id,
webob.exc.HTTPBadRequest.code,
**kwargs)
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
def test_create_pf_with_ofc_creation_failure(self):
self.ofc.set_raise_exc('create_ofc_packet_filter',
nexc.OFCException(reason='hoge'))
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
pf_ref = self._show('packet_filters', pf_id)
self.assertEqual(pf_ref['packet_filter']['status'], 'ERROR')
self.ofc.set_raise_exc('create_ofc_packet_filter', None)
# Retry activate packet_filter (even if there is no change).
data = {'packet_filter': {}}
self._update('packet_filters', pf_id, data)
pf_ref = self._show('packet_filters', pf_id)
self.assertEqual(pf_ref['packet_filter']['status'], 'ACTIVE')
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 2)
def test_show_pf_on_network(self):
kwargs = {
'name': 'test-pf-net',
'admin_state_up': False,
'action': 'DENY',
'priority': '102',
'src_mac': '00:11:22:33:44:55',
'dst_mac': '66:77:88:99:aa:bb',
'eth_type': '2048',
'src_cidr': '192.168.1.0/24',
'dst_cidr': '192.168.2.0/24',
'protocol': 'TCP',
'src_port': '35001',
'dst_port': '22'
}
with self.packet_filter_on_network(**kwargs) as pf:
pf_id = pf['packet_filter']['id']
pf_ref = self._show('packet_filters', pf_id)
# convert string to int.
kwargs.update({'priority': 102, 'eth_type': 2048,
'src_port': 35001, 'dst_port': 22,
'in_port': None})
self.assertEqual(pf_id, pf_ref['packet_filter']['id'])
for key in kwargs:
self.assertEqual(kwargs[key], pf_ref['packet_filter'][key])
def test_show_pf_on_network_with_wildcards(self):
kwargs = {
'name': 'test-pf-net',
'admin_state_up': False,
'action': 'DENY',
'priority': '102',
}
with self.packet_filter_on_network(**kwargs) as pf:
pf_id = pf['packet_filter']['id']
pf_ref = self._show('packet_filters', pf_id)
# convert string to int.
kwargs.update({'priority': 102,
'in_port': None,
'src_mac': None,
'dst_mac': None,
'eth_type': None,
'src_cidr': None,
'dst_cidr': None,
'protocol': None,
'src_port': None,
'dst_port': None})
self.assertEqual(pf_id, pf_ref['packet_filter']['id'])
for key in kwargs:
self.assertEqual(kwargs[key], pf_ref['packet_filter'][key])
def test_show_pf_on_port(self):
kwargs = {
'name': 'test-pf-port',
'admin_state_up': False,
'action': 'DENY',
'priority': '0o147',
'src_mac': '00:11:22:33:44:55',
'dst_mac': '66:77:88:99:aa:bb',
'eth_type': 2048,
'src_cidr': '192.168.1.0/24',
'dst_cidr': '192.168.2.0/24',
'protocol': 'TCP',
'dst_port': '0x50'
}
with self.packet_filter_on_port(**kwargs) as pf:
pf_id = pf['packet_filter']['id']
pf_ref = self._show('packet_filters', pf_id)
# convert string to int.
kwargs.update({'priority': 103, 'eth_type': 2048,
'dst_port': 80,
# wildcard field is None in a response.
'src_port': None})
self.assertEqual(pf_id, pf_ref['packet_filter']['id'])
self.assertTrue(pf_ref['packet_filter']['in_port'])
for key in kwargs:
self.assertEqual(kwargs[key], pf_ref['packet_filter'][key])
def test_show_pf_not_found(self):
pf_id = '00000000-ffff-ffff-ffff-000000000000'
self._show('packet_filters', pf_id,
expected_code=webob.exc.HTTPNotFound.code)
def test_update_pf_on_network(self):
ctx = mock.ANY
pf_dict = mock.ANY
with self.packet_filter_on_network(admin_state_up=False) as pf:
pf_id = pf['packet_filter']['id']
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': True}}
self._update('packet_filters', pf_id, data)
self.ofc.create_ofc_packet_filter.assert_called_once_with(
ctx, pf_id, pf_dict)
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': False}}
self._update('packet_filters', pf_id, data)
self.ofc.delete_ofc_packet_filter.assert_called_once_with(
ctx, pf_id)
def test_update_pf_on_port(self):
ctx = mock.ANY
pf_dict = mock.ANY
with self.packet_filter_on_port(admin_state_up=False) as pf:
pf_id = pf['packet_filter']['id']
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': True}}
self._update('packet_filters', pf_id, data)
self.ofc.create_ofc_packet_filter.assert_called_once_with(
ctx, pf_id, pf_dict)
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': False}}
self._update('packet_filters', pf_id, data)
self.ofc.delete_ofc_packet_filter.assert_called_once_with(
ctx, pf_id)
def test_delete_pf_with_error_status(self):
self.ofc.set_raise_exc('create_ofc_packet_filter',
nexc.OFCException(reason='fake'))
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
pf_ref = self._show('packet_filters', pf_id)
self.assertEqual(pf_ref['packet_filter']['status'], 'ERROR')
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(1, self.ofc.create_ofc_packet_filter.call_count)
self.assertEqual(0, self.ofc.delete_ofc_packet_filter.call_count)
def test_activate_pf_on_port_triggered_by_update_port(self):
ctx = mock.ANY
pf_dict = mock.ANY
with self.packet_filter_on_port(set_portinfo=False) as pf:
pf_id = pf['packet_filter']['id']
in_port_id = pf['packet_filter']['in_port']
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
portinfo = {'id': in_port_id, 'port_no': 123}
kw = {'added': [portinfo]}
self.rpcapi_update_ports(**kw)
self.ofc.create_ofc_packet_filter.assert_called_once_with(
ctx, pf_id, pf_dict)
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
kw = {'removed': [in_port_id]}
self.rpcapi_update_ports(**kw)
self.ofc.delete_ofc_packet_filter.assert_called_once_with(
ctx, pf_id)
# Ensure pf was created before in_port has activated.
ctx = mock.ANY
pf_dict = mock.ANY
port_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_port(ctx, in_port_id),
mock.call.create_ofc_port(ctx, in_port_id, port_dict),
mock.call.exists_ofc_port(ctx, in_port_id),
mock.call.delete_ofc_port(ctx, in_port_id, port_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 1)
def test_activate_pf_while_exists_on_ofc(self):
ctx = mock.ANY
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
self.ofc.set_raise_exc('delete_ofc_packet_filter',
nexc.OFCException(reason='hoge'))
# This update request will make plugin reactivate pf.
data = {'packet_filter': {'priority': 1000}}
self._update('packet_filters', pf_id, data,
expected_code=webob.exc.HTTPInternalServerError.code)
self.ofc.set_raise_exc('delete_ofc_packet_filter', None)
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 2)
def test_deactivate_pf_with_ofc_deletion_failure(self):
ctx = mock.ANY
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
self.ofc.set_raise_exc('delete_ofc_packet_filter',
nexc.OFCException(reason='hoge'))
data = {'packet_filter': {'admin_state_up': False}}
self._update('packet_filters', pf_id, data,
expected_code=webob.exc.HTTPInternalServerError.code)
pf_ref = self._show('packet_filters', pf_id)
self.assertEqual(pf_ref['packet_filter']['status'], 'ERROR')
self.ofc.set_raise_exc('delete_ofc_packet_filter', None)
data = {'packet_filter': {'priority': 1000}}
self._update('packet_filters', pf_id, data)
pf_ref = self._show('packet_filters', pf_id)
self.assertEqual(pf_ref['packet_filter']['status'], 'DOWN')
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 2)
def test_delete_pf_with_ofc_deletion_failure(self):
self.ofc.set_raise_exc('delete_ofc_packet_filter',
nexc.OFCException(reason='hoge'))
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
self._delete('packet_filters', pf_id,
expected_code=webob.exc.HTTPInternalServerError.code)
pf_ref = self._show('packet_filters', pf_id)
self.assertEqual(pf_ref['packet_filter']['status'], 'ERROR')
self.ofc.set_raise_exc('delete_ofc_packet_filter', None)
# Then, self._delete('packet_filters', pf_id) will success.
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 2)
def test_auto_delete_pf_in_network_deletion(self):
with self.packet_filter_on_network(admin_state_up=False,
do_delete=False) as pf:
pf_id = pf['packet_filter']['id']
self._show('packet_filters', pf_id,
expected_code=webob.exc.HTTPNotFound.code)
def test_auto_delete_pf_in_port_deletion(self):
with self.port(do_delete=False) as port:
network = self._show('networks', port['port']['network_id'])
with self.packet_filter_on_network(network=network) as pfn:
with self.packet_filter_on_port(port=port, do_delete=False,
set_portinfo=False) as pf:
pf_id = pf['packet_filter']['id']
in_port_id = pf['packet_filter']['in_port']
self._delete('ports', in_port_id)
# Check the packet filter on the port is deleted.
self._show('packet_filters', pf_id,
expected_code=webob.exc.HTTPNotFound.code)
# Check the packet filter on the network is not deleted.
self._show('packet_filters', pfn['packet_filter']['id'])
def test_no_pf_activation_while_port_operations(self):
with self.packet_filter_on_port() as pf:
in_port_id = pf['packet_filter']['in_port']
self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 0)
data = {'port': {'admin_state_up': False}}
self._update('ports', in_port_id, data)
self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 0)
data = {'port': {'admin_state_up': True}}
self._update('ports', in_port_id, data)
self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 0)
class TestNecPluginPacketFilterWithValidate(TestNecPluginPacketFilterBase):
def setUp(self):
super(TestNecPluginPacketFilterWithValidate, self).setUp()
# Remove attributes explicitly from mock object to check
# a case where there are no update_filter.
del self.ofc.driver.update_filter
self.validate_create = self.ofc.driver.validate_filter_create
self.validate_update = self.ofc.driver.validate_filter_update
def test_create_pf_on_network(self):
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
self.assertEqual(pf['packet_filter']['status'], 'ACTIVE')
ctx = mock.ANY
pf_dict = mock.ANY
expected = [
mock.call.driver.validate_filter_create(ctx, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict),
mock.call.exists_ofc_packet_filter(ctx, pf_id),
mock.call.delete_ofc_packet_filter(ctx, pf_id),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 1)
def test_update_pf_on_network(self):
ctx = mock.ANY
pf_dict = mock.ANY
with self.packet_filter_on_network(admin_state_up=False) as pf:
pf_id = pf['packet_filter']['id']
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': True}}
self._update('packet_filters', pf_id, data)
self.ofc.create_ofc_packet_filter.assert_called_once_with(
ctx, pf_id, pf_dict)
self.ofc.driver.validate_filter_update.assert_called_once_with(
ctx, data['packet_filter'])
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': False}}
self._update('packet_filters', pf_id, data)
self.ofc.delete_ofc_packet_filter.assert_called_once_with(
ctx, pf_id)
self.assertEqual(
2, self.ofc.driver.validate_filter_update.call_count)
def test_create_pf_on_network_with_validation_error(self):
self.validate_create.side_effect = ext_pf.PacketFilterInvalidPriority(
min=1, max=65535)
with self.network() as net:
net_id = net['network']['id']
e = self.assertRaises(webob.exc.HTTPClientError,
self._make_packet_filter,
self.fmt, net_id, expected_res_status=400)
self.assertEqual(400, e.status_int)
def test_update_pf_on_network_with_validation_error(self):
self.validate_update.side_effect = (
ext_pf.PacketFilterUpdateNotSupported(field='priority'))
with self.packet_filter_on_network() as pf:
pf_id = pf['packet_filter']['id']
pf_ref = self._show('packet_filters', pf_id)
self.assertEqual(pf_ref['packet_filter']['status'], 'ACTIVE')
data = {'packet_filter': {'priority': 1000}}
self._update('packet_filters', pf_id, data,
expected_code=400)
class TestNecPluginPacketFilterWithFilterUpdate(TestNecPluginPacketFilterBase):
def setUp(self):
super(TestNecPluginPacketFilterWithFilterUpdate, self).setUp()
# Remove attributes explicitly from mock object to check
# a case where there are no update_filter and validate_*.
del self.ofc.driver.validate_filter_create
del self.ofc.driver.validate_filter_update
def test_update_pf_toggle_admin_state(self):
ctx = mock.ANY
pf_dict = mock.ANY
with self.packet_filter_on_network(admin_state_up=False) as pf:
pf_id = pf['packet_filter']['id']
self.assertFalse(self.ofc.create_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': True}}
self._update('packet_filters', pf_id, data)
self.ofc.create_ofc_packet_filter.assert_called_once_with(
ctx, pf_id, pf_dict)
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': False}}
self._update('packet_filters', pf_id, data)
self.ofc.delete_ofc_packet_filter.assert_called_once_with(
ctx, pf_id)
def test_update_pf_change_field(self):
ctx = mock.ANY
with self.packet_filter_on_network(admin_state_up=True) as pf:
pf_id = pf['packet_filter']['id']
self.assertTrue(self.ofc.create_ofc_packet_filter.called)
data = {'packet_filter': {'src_mac': '12:34:56:78:9a:bc'}}
self._update('packet_filters', pf_id, data)
self.ofc.update_ofc_packet_filter.assert_called_once_with(
ctx, pf_id, data['packet_filter'])
self.assertEqual(1, self.ofc.update_ofc_packet_filter.call_count)
self.assertFalse(self.ofc.delete_ofc_packet_filter.called)
data = {'packet_filter': {'admin_state_up': False}}
self._update('packet_filters', pf_id, data)
self.ofc.delete_ofc_packet_filter.assert_called_once_with(
ctx, pf_id)
data = {'packet_filter': {'src_mac': '11:22:33:44:55:66'}}
self._update('packet_filters', pf_id, data)
self.assertEqual(1, self.ofc.update_ofc_packet_filter.call_count)
data = {'packet_filter': {'admin_state_up': True}}
self._update('packet_filters', pf_id, data)
data = {'packet_filter': {'src_mac': '66:55:44:33:22:11'}}
self._update('packet_filters', pf_id, data)
self.assertEqual(2, self.ofc.update_ofc_packet_filter.call_count)
|
{
"content_hash": "fcf496eea076d3e9a338da1e1b53d753",
"timestamp": "",
"source": "github",
"line_count": 699,
"max_line_length": 79,
"avg_line_length": 42.693848354792564,
"alnum_prop": 0.5712897496900445,
"repo_name": "aaron-fz/neutron_full_sync",
"id": "24c2f9aa14fe4a52c41bc1b2a12bf6e885cc7791",
"size": "30434",
"binary": false,
"copies": "6",
"ref": "refs/heads/full-sync",
"path": "neutron/tests/unit/nec/test_packet_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""Support for UK Met Office weather service."""
from __future__ import annotations
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.const import (
ATTR_ATTRIBUTION,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
LENGTH_KILOMETERS,
PERCENTAGE,
SPEED_MILES_PER_HOUR,
TEMP_CELSIUS,
UV_INDEX,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
ATTRIBUTION,
CONDITION_CLASSES,
DOMAIN,
METOFFICE_COORDINATES,
METOFFICE_DAILY_COORDINATOR,
METOFFICE_HOURLY_COORDINATOR,
METOFFICE_NAME,
MODE_3HOURLY_LABEL,
MODE_DAILY,
MODE_DAILY_LABEL,
VISIBILITY_CLASSES,
VISIBILITY_DISTANCE_CLASSES,
)
ATTR_LAST_UPDATE = "last_update"
ATTR_SENSOR_ID = "sensor_id"
ATTR_SITE_ID = "site_id"
ATTR_SITE_NAME = "site_name"
SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="name",
name="Station Name",
device_class=None,
native_unit_of_measurement=None,
icon="mdi:label-outline",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="weather",
name="Weather",
device_class=None,
native_unit_of_measurement=None,
icon="mdi:weather-sunny", # but will adapt to current conditions
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="temperature",
name="Temperature",
device_class=DEVICE_CLASS_TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
icon=None,
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="feels_like_temperature",
name="Feels Like Temperature",
device_class=DEVICE_CLASS_TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
icon=None,
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="wind_speed",
name="Wind Speed",
device_class=None,
native_unit_of_measurement=SPEED_MILES_PER_HOUR,
icon="mdi:weather-windy",
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="wind_direction",
name="Wind Direction",
device_class=None,
native_unit_of_measurement=None,
icon="mdi:compass-outline",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="wind_gust",
name="Wind Gust",
device_class=None,
native_unit_of_measurement=SPEED_MILES_PER_HOUR,
icon="mdi:weather-windy",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="visibility",
name="Visibility",
device_class=None,
native_unit_of_measurement=None,
icon="mdi:eye",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="visibility_distance",
name="Visibility Distance",
device_class=None,
native_unit_of_measurement=LENGTH_KILOMETERS,
icon="mdi:eye",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="uv",
name="UV Index",
device_class=None,
native_unit_of_measurement=UV_INDEX,
icon="mdi:weather-sunny-alert",
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="precipitation",
name="Probability of Precipitation",
device_class=None,
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-rainy",
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="humidity",
name="Humidity",
device_class=DEVICE_CLASS_HUMIDITY,
native_unit_of_measurement=PERCENTAGE,
icon=None,
entity_registry_enabled_default=False,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigType, async_add_entities
) -> None:
"""Set up the Met Office weather sensor platform."""
hass_data = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
MetOfficeCurrentSensor(
hass_data[METOFFICE_HOURLY_COORDINATOR],
hass_data,
True,
description,
)
for description in SENSOR_TYPES
]
+ [
MetOfficeCurrentSensor(
hass_data[METOFFICE_DAILY_COORDINATOR],
hass_data,
False,
description,
)
for description in SENSOR_TYPES
],
False,
)
class MetOfficeCurrentSensor(CoordinatorEntity, SensorEntity):
"""Implementation of a Met Office current weather condition sensor."""
def __init__(
self,
coordinator,
hass_data,
use_3hourly,
description: SensorEntityDescription,
):
"""Initialize the sensor."""
super().__init__(coordinator)
self.entity_description = description
mode_label = MODE_3HOURLY_LABEL if use_3hourly else MODE_DAILY_LABEL
self._attr_name = f"{hass_data[METOFFICE_NAME]} {description.name} {mode_label}"
self._attr_unique_id = f"{description.name}_{hass_data[METOFFICE_COORDINATES]}"
if not use_3hourly:
self._attr_unique_id = f"{self._attr_unique_id}_{MODE_DAILY}"
self.use_3hourly = use_3hourly
@property
def native_value(self):
"""Return the state of the sensor."""
value = None
if self.entity_description.key == "visibility_distance" and hasattr(
self.coordinator.data.now, "visibility"
):
value = VISIBILITY_DISTANCE_CLASSES.get(
self.coordinator.data.now.visibility.value
)
if self.entity_description.key == "visibility" and hasattr(
self.coordinator.data.now, "visibility"
):
value = VISIBILITY_CLASSES.get(self.coordinator.data.now.visibility.value)
elif self.entity_description.key == "weather" and hasattr(
self.coordinator.data.now, self.entity_description.key
):
value = [
k
for k, v in CONDITION_CLASSES.items()
if self.coordinator.data.now.weather.value in v
][0]
elif hasattr(self.coordinator.data.now, self.entity_description.key):
value = getattr(self.coordinator.data.now, self.entity_description.key)
if hasattr(value, "value"):
value = value.value
return value
@property
def icon(self):
"""Return the icon for the entity card."""
value = self.entity_description.icon
if self.entity_description.key == "weather":
value = self.state
if value is None:
value = "sunny"
elif value == "partlycloudy":
value = "partly-cloudy"
value = f"mdi:weather-{value}"
return value
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_LAST_UPDATE: self.coordinator.data.now.date,
ATTR_SENSOR_ID: self.entity_description.key,
ATTR_SITE_ID: self.coordinator.data.site.id,
ATTR_SITE_NAME: self.coordinator.data.site.name,
}
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return (
self.entity_description.entity_registry_enabled_default and self.use_3hourly
)
|
{
"content_hash": "cdf6f4a86e465ab2842888dd5b31c3f7",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 93,
"avg_line_length": 31.019607843137255,
"alnum_prop": 0.6127686472819216,
"repo_name": "sander76/home-assistant",
"id": "4919e36bd5888cd73891195c85651a0ab2da447b",
"size": "7910",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/metoffice/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import collections
import random
from representations.embedding import Embedding, SVDEmbedding
class SequentialEmbedding:
def __init__(self, year_embeds, **kwargs):
self.embeds = year_embeds
@classmethod
def load(cls, path, years, **kwargs):
embeds = collections.OrderedDict()
for year in years:
embeds[year] = Embedding.load(path + "/" + str(year), **kwargs)
return SequentialEmbedding(embeds)
def get_embed(self, year):
return self.embeds[year]
def get_subembeds(self, words, normalize=True):
embeds = collections.OrderedDict()
for year, embed in self.embeds.iteritems():
embeds[year] = embed.get_subembed(words, normalize=normalize)
return SequentialEmbedding(embeds)
def get_time_sims(self, word1, word2):
time_sims = collections.OrderedDict()
for year, embed in self.embeds.iteritems():
time_sims[year] = embed.similarity(word1, word2)
return time_sims
def get_seq_neighbour_set(self, word, n=3):
neighbour_set = set([])
for embed in self.embeds.itervalues():
closest = embed.closest(word, n=n)
for _, neighbour in closest:
neighbour_set.add(neighbour)
return neighbour_set
def get_seq_closest(self, word, start_year, num_years=10, n=10):
closest = collections.defaultdict(float)
for year in range(start_year, start_year + num_years):
embed = self.embeds[year]
year_closest = embed.closest(word, n=n*10)
for score, neigh in year_closest.iteritems():
closest[neigh] += score
return sorted(closest, key = lambda word : closest[word], reverse=True)[0:n]
def get_word_subembeds(self, word, n=3, num_rand=None, word_list=None):
if word_list == None:
word_set = self.get_seq_neighbour_set(word, n=n)
if num_rand != None:
word_set = word_set.union(set(random.sample(self.embeds.values()[-1].iw, num_rand)))
word_list = list(word_set)
year_subembeds = collections.OrderedDict()
for year,embed in self.embeds.iteritems():
year_subembeds[year] = embed.get_subembed(word_list)
return SequentialEmbedding.from_ordered_dict(year_subembeds)
class SequentialSVDEmbedding(SequentialEmbedding):
def __init__(self, path, years, **kwargs):
self.embeds = collections.OrderedDict()
for year in years:
self.embeds[year] = SVDEmbedding(path + "/" + str(year), **kwargs)
|
{
"content_hash": "bc910245b0da95782a9da42211821410",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 100,
"avg_line_length": 38.10294117647059,
"alnum_prop": 0.6256271709764569,
"repo_name": "ruhulsbu/WEAT4TwitterGroups",
"id": "733bd01626a6a167cfb685270ae004ff3bea88d7",
"size": "2591",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "histwords/representations/sequentialembedding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "802"
},
{
"name": "Makefile",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "248596"
},
{
"name": "Shell",
"bytes": "8093"
}
],
"symlink_target": ""
}
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import sys
import time
import os
import platform
import subprocess
from socket import socket
CARBON_SERVER = '127.0.0.1'
CARBON_PORT = 2003
delay = 60
if len(sys.argv) > 1:
delay = int( sys.argv[1] )
def get_loadavg():
# For more details, "man proc" and "man uptime"
if platform.system() == "Linux":
return open('/proc/loadavg').read().strip().split()[:3]
else:
command = "uptime"
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
os.waitpid(process.pid, 0)
output = process.stdout.read().replace(',', ' ').strip().split()
length = len(output)
return output[length - 3:length]
sock = socket()
try:
sock.connect( (CARBON_SERVER,CARBON_PORT) )
except Exception:
print("Couldn't connect to %(server)s on port %(port)d, is carbon-agent.py running?" % {
'server': CARBON_SERVER, 'port': CARBON_PORT
})
sys.exit(1)
while True:
now = int( time.time() )
lines = []
#We're gonna report all three loadavg values
loadavg = get_loadavg()
lines.append("system.loadavg_1min %s %d" % (loadavg[0],now))
lines.append("system.loadavg_5min %s %d" % (loadavg[1],now))
lines.append("system.loadavg_15min %s %d" % (loadavg[2],now))
message = '\n'.join(lines) + '\n' #all lines must end in a newline
print("sending message\n")
print('-' * 80)
print(message)
print()
sock.sendall(message)
time.sleep(delay)
|
{
"content_hash": "661d4a57fb5c0ab8efef08f52a86281f",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 92,
"avg_line_length": 30.71212121212121,
"alnum_prop": 0.6684755796743956,
"repo_name": "deniszh/graphite-web",
"id": "ce1659565ffd690ac02bf84382aa3511dbed383f",
"size": "2045",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "examples/example-client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "150191"
},
{
"name": "HTML",
"bytes": "21435"
},
{
"name": "JavaScript",
"bytes": "1691623"
},
{
"name": "Perl",
"bytes": "857"
},
{
"name": "Python",
"bytes": "1344125"
},
{
"name": "Ruby",
"bytes": "1950"
},
{
"name": "Shell",
"bytes": "1112"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import json
import ConfigParser
import random
from optparse import OptionParser
def configuring ():
print("Configuring Wizard: ")
configer = ConfigParser.RawConfigParser()
try:
config_file = open(config_path, 'w')
except IOError as e:
print("Creating the config file was unsuccesful")
raise
valid = False
while not valid:
wall_path = raw_input("Where are your wallpapers located : ")
if not os.path.isdir(os.path.expandvars(wall_path)):
ans = raw_input("The directory entered is not valid\nR to rentry, any to exit")
if ans != 'R':
config_file.close()
os.remove(config_path)
exit(0)
else:
valid = True
configer.add_section("PATHS")
configer.set("PATHS", "wall_path", wall_path)
json_path = raw_input("Where do you want your json tag structure to be located ? (enter for default) : ")
if not json_path:
json_path = wall_path + "/tags.json"
configer.set("PATHS", "json_path", json_path)
#Check whether the file exists
keep = 'N'
if os.path.isfile(json_path):
keep = raw_input("An index file exists already at " + json_path + ".\nDo you want to keep it ? (Y/N)")
if keep != 'Y': #Create a new Json file.
with open(json_path, "w") as json_file:
json_file.write("{}")
configer.add_section("COMMANDS")
configer.set("COMMANDS", "cmd_set_wp", "wal")
configer.write(config_file)
print("The configuration is now complete")
config_file.close()
exit(0)
def listing (a, b, c, d):
#Open the json file
with open(os.path.expandvars(json_path), "r") as data_json:
tags_dict = json.load(data_json)
print(json.dumps(tags_dict, indent=4, sort_keys=True))
#look for config file
config_path = os.environ['HOME'] + "/.config/walchoser"
if os.path.isfile(config_path) :
try:
config_file = open(config_path, "r")
except:
exit(1)
configer = ConfigParser.RawConfigParser()
configer.readfp(config_file)
global json_path
json_path = configer.get("PATHS", "json_path")
cmd_set_wp = configer.get("COMMANDS", "cmd_set_wp")
wall_path = configer.get("PATHS", "wall_path")
with open(os.path.expandvars(json_path), "r") as data_json:
tags_dict = json.load(data_json)
usage = "usage: %prog [Options] args"
parser = OptionParser(usage)
parser.add_option("-a", "--add", dest="path_tags", help="-a [PATH] [TAGS] \n Reference a wallpaper at path relative to the set wallpapers directory. The tags must be separated by comas (CSV).", nargs=2)
parser.add_option("-l", "--list", action="callback", callback=listing, help="List your json index file.", nargs=0)
parser.add_option("-c", "--config", action="callback", callback=configuring, help="Calls the config wizard.")
parser.add_option("-p", "--print", action="store_true", dest="printing", default=False, help="-p [TAGS] To use with tags to print what wallpapers match.")
parser.add_option("--get-wp", action="store_true", dest="wall_path", default=False)
parser.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False)
parser.add_option("-t", "--get-tags", dest="wp_path", nargs=1, help="Output the tags of a given wallpaper")
(option, args) = parser.parse_args()
if option.path_tags:
user_tag_list = option.path_tags[1].split(',')
filename = option.path_tags[0]
if not os.path.isfile(os.path.expandvars(wall_path + "/" + filename)):
print("your filename is incorrect, exiting")
exit(1)
for tag in user_tag_list:
toadd = []
toadd.append(filename)
if tag in tags_dict:
toadd.extend(tags_dict[tag])
toadd = list(set(toadd)) #remove potential duplicate reference
tags_dict.update({tag:toadd})
data_json = open(os.path.expandvars(json_path), "w")
json.dump(tags_dict, data_json)
data_json.close()
#Change the wallpaper according to tags given
if not args and not option:
os.system(cmd_set_wp + " -t -i" + wall_path)
else:
#In case of the wp printing option, useful for the albert extension
if option.wall_path:
print(os.path.expandvars(wall_path))
exit(0)
#in case of tags wanted
elif option.wp_path:
#NOT efficient by efficiency is not compulsory here
tags = {k: v for k, v in tags_dict.items() if option.wp_path in v}
print(json.dumps(tags.keys()))
exit(0)
if args and args[0] in tags_dict:
matching = set(tags_dict[args[0]])
for tag in args:
if tag in tags_dict:
matching = matching.intersection(set(tags_dict[tag]))
#In case of printing option
if option.printing:
if not option.quiet:
print("Those Wallpapers are matching your tags:")
print(json.dumps(list(matching), indent=4, sort_keys=True))
exit(0)
#Else the a wallpaper is set
else:
if not option.quiet:
print("Chosing a wallpaper at random among matching")
chosen_wp = random.choice(list(matching))
os.system(cmd_set_wp + " -t -i" + wall_path + "/" + chosen_wp)
elif args:
if not option.quiet:
print("No wallpaper is matching your tags, exiting.")
exit(0)
else:
ans = raw_input("Your config file doesn't exist yet, do you want to create it (Y/N) ? ")
if ans == 'Y':
configuring()
else:
print("aborting")
exit(0)
exit(0)
|
{
"content_hash": "a8bd08a4f5cda8c5d8435a3194b7b049",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 203,
"avg_line_length": 33.57236842105263,
"alnum_prop": 0.6807760141093474,
"repo_name": "Emilien-P/scripts-dump",
"id": "2f6e10d84321a572841a3f59ce14af3225a0ae84",
"size": "5127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "walchoser.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7438"
},
{
"name": "Shell",
"bytes": "275"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
import asyncio
from flowirc.log import log
class MiddleWareBase:
_listeners = defaultdict(list)
def on(self, event, callback):
self.add_listener(event, callback, event)
@asyncio.coroutine
def trigger(self, event):
log.debug("Received event: %s", event)
listeners = self._listeners[event]
for future in asyncio.as_completed(
[callback(evant) for callback, event_type in
listeners if self.matches(event, event_type)]):
result = yield from future
self.dispatch(result)
def add_listener(self, event, callback, callback_argument):
self._listeners[event].append((asyncio.coroutine(callback),
callback_argument))
def dispatch(self, data):
raise NotImplementedError()
|
{
"content_hash": "f13cd469810a03b93518527e9e3da0a5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 67,
"avg_line_length": 31.107142857142858,
"alnum_prop": 0.6291618828932262,
"repo_name": "lndbrg/flowirc",
"id": "238eebb72145b39a95deb88d7c089ee26fd00e8f",
"size": "871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flowirc/middleware/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27304"
}
],
"symlink_target": ""
}
|
"""Tests for contrib.resampler.python.ops.resampler_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import resampler
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def _bilinearly_interpolate(data, x, y):
"""Performs bilinenar interpolation of grid data at user defined coordinates.
This interpolation function:
a) implicitly pads the input data with 0s.
b) returns 0 when sampling outside the (padded) image.
The effect is that the sampled signal smoothly goes to 0 outside the original
input domain, rather than producing a jump discontinuity at the image
boundaries.
Args:
data: numpy array of shape `[data_height, data_width]` containing data
samples assumed to be defined at the corresponding pixel coordinates.
x: numpy array of shape `[warp_height, warp_width]` containing x coordinates
at which interpolation will be performed.
y: numpy array of shape `[warp_height, warp_width]` containing y coordinates
at which interpolation will be performed.
Returns:
Numpy array of shape `[warp_height, warp_width]` containing interpolated
values.
"""
shape = x.shape
x = np.asarray(x) + 1
y = np.asarray(y) + 1
data = np.lib.pad(data, 1, "constant", constant_values=0)
x_0 = np.floor(x).astype(int)
x_1 = x_0 + 1
y_0 = np.floor(y).astype(int)
y_1 = y_0 + 1
x_0 = np.clip(x_0, 0, data.shape[1] - 1)
x_1 = np.clip(x_1, 0, data.shape[1] - 1)
y_0 = np.clip(y_0, 0, data.shape[0] - 1)
y_1 = np.clip(y_1, 0, data.shape[0] - 1)
i_a = data[y_0, x_0]
i_b = data[y_1, x_0]
i_c = data[y_0, x_1]
i_d = data[y_1, x_1]
w_a = (x_1 - x) * (y_1 - y)
w_b = (x_1 - x) * (y - y_0)
w_c = (x - x_0) * (y_1 - y)
w_d = (x - x_0) * (y - y_0)
samples = (w_a * i_a + w_b * i_b + w_c * i_c + w_d * i_d)
samples.reshape(shape)
return samples
def _make_warp(batch_size, warp_height, warp_width, dtype):
"""Creates batch of warping coordinates."""
x, y = np.meshgrid(np.linspace(0, warp_width - 1, warp_width),
np.linspace(0, warp_height - 1, warp_height))
warp = np.concatenate((x.reshape([warp_height, warp_width, 1]),
y.reshape([warp_height, warp_width, 1])), 2)
warp = np.tile(warp.reshape([1, warp_height, warp_width, 2]),
[batch_size, 1, 1, 1])
warp += np.random.randn(*warp.shape)
return warp.astype(dtype)
class ResamplerTest(test.TestCase):
def test_op_forward_pass_gpu_float32(self):
self._test_op_forward_pass(True, dtypes.float32, 1e-4)
def test_op_forward_pass_gpu_float64(self):
self._test_op_forward_pass(True, dtypes.float64, 1e-5)
def test_op_forward_pass_cpu_float16(self):
self._test_op_forward_pass(False, dtypes.float16, 1e-2)
def test_op_forward_pass_cpu_float32(self):
self._test_op_forward_pass(False, dtypes.float32, 1e-4)
def test_op_forward_pass_cpu_float64(self):
self._test_op_forward_pass(False, dtypes.float64, 1e-5)
def test_op_backward_pass_gpu_float32(self):
self._test_op_backward_pass(True, dtypes.float32, 1e-3)
def test_op_backward_pass_cpu_float16(self):
self._test_op_backward_pass(False, dtypes.float16, 1e-3)
def test_op_backward_pass_cpu_float32(self):
self._test_op_backward_pass(False, dtypes.float32, 1e-4)
def test_op_backward_pass_cpu_float64(self):
self._test_op_backward_pass(False, dtypes.float64, 1e-6)
def _test_op_forward_pass(self, on_gpu, dtype, tol):
np.random.seed(0)
data_width = 7
data_height = 9
data_channels = 5
warp_width = 4
warp_height = 8
batch_size = 10
warp = _make_warp(batch_size, warp_height, warp_width, dtype.as_numpy_dtype)
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.random.rand(*data_shape).astype(dtype.as_numpy_dtype)
with self.test_session(use_gpu=on_gpu, force_gpu=False) as sess:
data_ph = array_ops.placeholder(dtype, shape=(None,) + data.shape[1:])
warp_ph = array_ops.placeholder(dtype, shape=(None,) + warp.shape[1:])
outputs = resampler.resampler(data=data_ph, warp=warp_ph)
self.assertEqual(outputs.get_shape().as_list(),
[None, warp_height, warp_width, data_channels])
out = sess.run(outputs, feed_dict={data_ph: data, warp_ph: warp})
# Generate reference output via bilinear interpolation in numpy
reference_output = np.zeros_like(out)
for batch in xrange(batch_size):
for c in xrange(data_channels):
reference_output[batch, :, :, c] = _bilinearly_interpolate(
data[batch, :, :, c],
warp[batch, :, :, 0],
warp[batch, :, :, 1])
self.assertAllClose(out, reference_output, rtol=tol, atol=tol)
def _test_op_backward_pass(self, on_gpu, dtype, tol):
np.random.seed(13)
data_width = 5
data_height = 4
data_channels = 3
warp_width = 2
warp_height = 6
batch_size = 10
warp = _make_warp(batch_size, warp_height, warp_width, dtype.as_numpy_dtype)
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.random.rand(*data_shape).astype(dtype.as_numpy_dtype)
with self.test_session(use_gpu=on_gpu, force_gpu=False):
data_tensor = constant_op.constant(data)
warp_tensor = constant_op.constant(warp)
output_tensor = resampler.resampler(data=data_tensor, warp=warp_tensor)
grads = test.compute_gradient([data_tensor, warp_tensor], [
data_tensor.get_shape().as_list(),
warp_tensor.get_shape().as_list()
], output_tensor, output_tensor.get_shape().as_list(), [data, warp])
if not on_gpu:
# On CPU we perform numerical differentiation at the best available
# precision, and compare against that. This is necessary for test to
# pass for float16.
data_tensor_64 = constant_op.constant(data, dtype=dtypes.float64)
warp_tensor_64 = constant_op.constant(warp, dtype=dtypes.float64)
output_tensor_64 = resampler.resampler(data=data_tensor_64,
warp=warp_tensor_64)
grads_64 = test.compute_gradient([data_tensor_64, warp_tensor_64], [
data_tensor.get_shape().as_list(),
warp_tensor.get_shape().as_list()
], output_tensor_64, output_tensor.get_shape().as_list(), [data, warp])
for g, g_64 in zip(grads, grads_64):
self.assertLess(np.fabs(g[0] - g_64[1]).max(), tol)
else:
for g in grads:
self.assertLess(np.fabs(g[0] - g[1]).max(), tol)
def test_op_errors(self):
data_width = 7
data_height = 9
data_depth = 3
data_channels = 5
warp_width = 4
warp_height = 8
batch_size = 10
# Input data shape is not defined over a 2D grid, i.e. its shape is not like
# (batch_size, data_height, data_width, data_channels).
with self.test_session() as sess:
data_shape = (batch_size, data_height, data_width, data_depth,
data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size, warp_height, warp_width, 2)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.UnimplementedError,
"Only bilinear interpolation is currently "
"supported."):
sess.run(outputs)
# Warp tensor must be at least a matrix, with shape [batch_size, 2].
with self.test_session() as sess:
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size,)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"warp should be at least a matrix"):
sess.run(outputs)
# The batch size of the data and warp tensors must be the same.
with self.test_session() as sess:
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size+1, warp_height, warp_width, 2)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Batch size of data and warp tensor"):
sess.run(outputs)
# The warp tensor must contain 2D coordinates, i.e. its shape last dimension
# must be 2.
with self.test_session() as sess:
data_shape = (batch_size, data_height, data_width, data_channels)
data = np.zeros(data_shape)
warp_shape = (batch_size, warp_height, warp_width, 3)
warp = np.zeros(warp_shape)
outputs = resampler.resampler(constant_op.constant(data),
constant_op.constant(warp))
with self.assertRaisesRegexp(errors_impl.UnimplementedError,
"Only bilinear interpolation is supported, "
"warping"):
sess.run(outputs)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "ec3019016d77ac818fe08bb24ca6fe1b",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 80,
"avg_line_length": 38.41732283464567,
"alnum_prop": 0.6295347407255585,
"repo_name": "xuleiboy1234/autoTitle",
"id": "9aa1e0562844cf8fed0eadb038599f02d94d0cd6",
"size": "10477",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tensorflow/tensorflow/contrib/resampler/python/ops/resampler_ops_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "196965"
},
{
"name": "C++",
"bytes": "28230132"
},
{
"name": "CMake",
"bytes": "624472"
},
{
"name": "Go",
"bytes": "941453"
},
{
"name": "Java",
"bytes": "380704"
},
{
"name": "Jupyter Notebook",
"bytes": "1833674"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37232"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "5350"
},
{
"name": "Perl 6",
"bytes": "1365"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "25123920"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "358280"
}
],
"symlink_target": ""
}
|
import flask
import jinja2
def repo_visibility(repo):
"""Convert repo to its visibility attribute as string
:param repo: Repository to show its visibility as string
:type repo: ``repocribro.models.Repository``
:return: Text representation of repo visibility
:rtype: str
"""
if repo.is_public:
return 'Public'
if repo.is_hidden:
return 'Hidden'
if repo.is_private:
return 'Private'
def repo_link(repo, show_secret=False):
"""Convert repo to link to the detail page of that repo
:param repo: Repository to show its link
:type repo: ``repocribro.models.Repository``
:param show_secret: If secret links should be returned
:type show_secret: bool
:return: HTML code with link to repository detail page
:rtype: ``jinja2.Markup``
"""
url = None
if repo.is_public:
url = flask.url_for('core.repo_detail',
login=repo.owner_login, reponame=repo.name)
elif repo.is_hidden and show_secret:
url = flask.url_for('core.repo_detail_hidden', secret=repo.secret)
elif repo.is_private and show_secret:
url = flask.url_for('core.repo_detail',
login=repo.owner_login, reponame=repo.name)
if url is None:
return 'Top secret'
return jinja2.Markup('<a href="{0}">{0}</a>'.format(url))
def repo_languages(repo):
"""Filter for languages to get rid of unrecognized as None
:param repo: Repository to show its languages
:type repo: ``repocribro.models.Repository``
:return: string representation of languages
:rtype: str
"""
if not isinstance(repo.languages, str):
return 'unrecognized'
return repo.languages
def repo_topics(repo):
"""Filter for topics as list of badges
:param repo: Repository to show its topics
:type repo: ``repocribro.models.Repository``
:return: HTML code with topics as badges
:rtype: ``jinja2.Markup``
"""
if repo.topics is None:
return ''
ds_topics = repo.topics.split(' ')
return jinja2.Markup(' '.join([
'<span class ="badge badge-secondary">{}</span>'.format(topic)
for topic in ds_topics
]))
def gh_user_link(user):
"""Convert user/org to its GitHub URL
:param repo: User to show its GitHub URL
:type repo: ``repocribro.models.RepositoryOwner``
:return: HTML code with hyperlink to GitHub user/org page
:rtype: ``jinja2.Markup``
"""
return jinja2.Markup(
'<a href="https://github.com/{0}" target="_blank">{0}</a>'.format(
user.login
)
)
def gh_push_url(push):
"""Convert push to compare GitHub URL
:param push: Push to be converted
:type push: ``repocribro.models.Push``
:return: URL to GitHub compare page
:rtype: str
"""
before = push.before[:10]
after = push.after[:10]
return 'https://github.com/{0}/compare/{1}...{2}'.format(
push.repository.full_name, before, after
)
def gh_repo_link(repo):
"""Convert repo to its GitHub URL
:param repo: Repository to show its GitHub URL
:type repo: ``repocribro.models.Repository``
:return: HTML code with hyperlink to GitHub repo page
:rtype: ``jinja2.Markup``
"""
return jinja2.Markup(
'<a href="https://github.com/{0}" target="_blank">{0}</a>'.format(
repo.full_name
)
)
def gh_repo_visibility(repo):
"""Convert repo to its GitHub visibility attribute as string
:param repo: Repository to show its GitHub visibility as string
:type repo: ``repocribro.models.Repository``
:return: Text representation of repo GitHub visibility
:rtype: str
"""
return 'Private' if repo.private else 'Public'
#: Container with all model filters with their names in views
model_filters = {
'repo_visibility': repo_visibility,
'repo_link': repo_link,
'repo_languages': repo_languages,
'repo_topics': repo_topics,
'gh_user_link': gh_user_link,
'gh_repo_link': gh_repo_link,
'gh_push_url': gh_push_url,
'gh_repo_visibility': gh_repo_visibility
}
|
{
"content_hash": "ea9ab6e5f9e6478748a5cecd4460d92e",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 74,
"avg_line_length": 29.29078014184397,
"alnum_prop": 0.6365617433414044,
"repo_name": "MarekSuchanek/repocribro",
"id": "f49504456182c39363f517a00261c91fe63e5756",
"size": "4130",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "repocribro/filters/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1996"
},
{
"name": "Dockerfile",
"bytes": "378"
},
{
"name": "HTML",
"bytes": "57114"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "223534"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, division
from theano import gof
class TypedListType(gof.Type):
"""
Parameters
----------
ttype
Type of theano variable this list will contains, can be another list.
depth
Optionnal parameters, any value above 0 will create a nested list of
this depth. (0-based)
"""
def __init__(self, ttype, depth=0):
if depth < 0:
raise ValueError('Please specify a depth superior or'
'equal to 0')
if not isinstance(ttype, gof.Type):
raise TypeError('Expected a Theano Type')
if depth == 0:
self.ttype = ttype
else:
self.ttype = TypedListType(ttype, depth - 1)
def filter(self, x, strict=False, allow_downcast=None):
"""
Parameters
----------
x
Value to filter.
strict
If true, only native python list will be accepted.
allow_downcast
Does not have any utility at the moment.
"""
if strict:
if not isinstance(x, list):
raise TypeError('Expected a python list')
else:
x = [self.ttype.filter(y) for y in x]
if all(self.ttype.is_valid_value(y) for y in x):
return x
else:
raise TypeError('Expected all elements to'
' be %s' % str(self.ttype))
def __eq__(self, other):
"""
Two lists are equal if they contain the same type.
"""
return type(self) == type(other) and self.ttype == other.ttype
def __hash__(self):
return gof.hashtype(self) ^ hash(self.ttype)
def __str__(self):
return 'TypedList <' + str(self.ttype) + '>'
def get_depth(self):
"""
Utilitary function to get the 0 based level of the list.
"""
if isinstance(self.ttype, TypedListType):
return self.ttype.get_depth() + 1
else:
return 0
def values_eq(self, a, b):
if not len(a) == len(b):
return False
for x in range(len(a)):
if not self.ttype.values_eq(a[x], b[x]):
return False
return True
def may_share_memory(self, a, b):
if a is b:
return True
# As a list contain other element, if a or b isn't a list, we
# still need to check if that element is contained in the
# other list.
if not isinstance(a, list):
a = [a]
if not isinstance(b, list):
b = [b]
for idx1 in range(len(a)):
for idx2 in range(len(b)):
if self.ttype.may_share_memory(a[idx1], b[idx2]):
return True
def c_declare(self, name, sub, check_input=True):
return """
PyListObject* %(name)s;
""" % dict(name=name)
def c_init(self, name, sub):
return """
%(name)s = NULL;
""" % dict(name=name)
def c_extract(self, name, sub, check_input=True):
if check_input:
pre = """
if (!PyList_Check(py_%(name)s)) {
PyErr_SetString(PyExc_TypeError, "expected a list");
%(fail)s
}""" % dict(name=name, fail=sub['fail'])
else:
pre = ""
return pre + """
%(name)s = (PyListObject*) (py_%(name)s);
""" % dict(name=name, fail=sub['fail'])
def c_sync(self, name, sub):
return """
Py_XDECREF(py_%(name)s);
py_%(name)s = (PyObject*)(%(name)s);
Py_INCREF(py_%(name)s);
""" % dict(name=name)
def c_cleanup(self, name, sub):
return ""
def c_code_cache_version(self):
return (2,)
dtype = property(lambda self: self.ttype)
ndim = property(lambda self: self.ttype.ndim + 1)
|
{
"content_hash": "b71ef8dd2089c9920cff2afee71350e7",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 77,
"avg_line_length": 27.517482517482517,
"alnum_prop": 0.5064803049555273,
"repo_name": "JazzeYoung/VeryDeepAutoEncoder",
"id": "a565562b1e06a44322ea416aa9745e8d4576a7e6",
"size": "3935",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "theano/typed_list/type.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "260790"
},
{
"name": "C++",
"bytes": "323987"
},
{
"name": "CSS",
"bytes": "1750"
},
{
"name": "Cuda",
"bytes": "2767955"
},
{
"name": "HTML",
"bytes": "4611"
},
{
"name": "Jupyter Notebook",
"bytes": "4603376"
},
{
"name": "Makefile",
"bytes": "116"
},
{
"name": "Python",
"bytes": "16514506"
},
{
"name": "Shell",
"bytes": "16447"
}
],
"symlink_target": ""
}
|
import sys
sys.path.append('dependencies/ipyxact')
import argparse
from ipyxact.ipyxact import Component
from mako.lookup import TemplateLookup
from mako.template import Template
from mako.runtime import Context
parser = argparse.ArgumentParser(description='Generate registers')
parser.add_argument('xml_path', metavar='<xml>', type=argparse.FileType('r'), nargs=1,
help='path to IP-XACT XML file to be read')
args = parser.parse_args()
component = Component()
component.load(args.xml_path[0].name)
addressBlock = component.memoryMaps.memoryMap[0].addressBlock[0]
lookup = TemplateLookup('templates')
buffer = open('reg_block.svh', 'w')
for reg in addressBlock.register:
template = lookup.get_template('uvm_reg.mako')
ctx = Context(buffer, reg=reg)
template.render_context(ctx)
buffer.write('\n\n\n\n')
template = lookup.get_template('uvm_reg_block.mako')
ctx = Context(buffer, addressBlock=addressBlock)
template.render_context(ctx)
|
{
"content_hash": "acb8125e4f459b36df6891e144afa4e5",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 86,
"avg_line_length": 28.823529411764707,
"alnum_prop": 0.7418367346938776,
"repo_name": "tudortimi/rgen",
"id": "4dedbf16f6e1b7a5c81c1c9fdc0fdc40dae6c21e",
"size": "1003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rgen.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1683"
},
{
"name": "Python",
"bytes": "1189"
},
{
"name": "Shell",
"bytes": "638"
}
],
"symlink_target": ""
}
|
from flask import send_from_directory
from backend import app
import os
def serve_static_path(directory, path):
return app.send_static_file(os.path.join(directory, path).replace('\\','/'))
@app.route('/')
def root():
return app.send_static_file('index.html')
@app.route('/js/<path:path>')
def static_js(path):
return serve_static_path('js', path)
@app.route('/css/<path:path>')
def static_css(path):
return serve_static_path('css', path)
@app.route('/img/<path:path>')
def static_img(path):
return serve_static_path('img', path)
@app.route('/api/avatar/<path:path>')
def avatar_proxy(path):
return send_from_directory((app.config['AVATAR_UPLOADS'].replace('\\','/')), path)
@app.route('/api/forums/postsimages/<path:path>')
def forums_images_proxy(path):
return send_from_directory((app.config['FORUMS_IMG_UPLOADS'].replace('\\','/')), path)
@app.route('/api/gamedownloads/<path:path>')
def game_download_proxy(path):
return send_from_directory((app.config['GAME_DOWNLOADS'].replace('\\','/')), path)
|
{
"content_hash": "546541d266398a7cfd7fc35620d468ad",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 90,
"avg_line_length": 26.794871794871796,
"alnum_prop": 0.676555023923445,
"repo_name": "ghostsp15/Fort-Nitta",
"id": "f5a190f69ad4d26d649c8e9093eb20a8eec799f6",
"size": "1045",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "backend/staticfiles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "702440"
},
{
"name": "Erlang",
"bytes": "1055"
},
{
"name": "HTML",
"bytes": "425083"
},
{
"name": "JavaScript",
"bytes": "2435422"
},
{
"name": "Python",
"bytes": "87662"
},
{
"name": "Ruby",
"bytes": "161"
}
],
"symlink_target": ""
}
|
import os
def get_default_config_path():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "atcodertools-default.toml")
|
{
"content_hash": "6e7950a01172a9eb0c99b6bd087eed70",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 96,
"avg_line_length": 28,
"alnum_prop": 0.7142857142857143,
"repo_name": "kyuridenamida/ToolsForAtCoder",
"id": "c415a7e04dc01ff5c57783b2d736fe91ccd84516",
"size": "140",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "atcodertools/tools/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "291563"
},
{
"name": "Python",
"bytes": "54947"
}
],
"symlink_target": ""
}
|
from __future__ import division, unicode_literals, print_function
import json
import glob
import itertools
import logging
import math
import os
import re
import warnings
import xml.etree.cElementTree as ET
from collections import defaultdict
from io import StringIO
import numpy as np
from monty.io import zopen, reverse_readfile
from monty.json import MSONable
from monty.json import jsanitize
from monty.re import regrep
from six import string_types
from six.moves import map, zip
from pymatgen.analysis.nmr import NMRChemicalShiftNotation
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.core.units import unitized
from pymatgen.electronic_structure.bandstructure import BandStructure, \
BandStructureSymmLine, get_reconstructed_band_structure
from pymatgen.electronic_structure.core import Spin, Orbital, OrbitalType, Magmom
from pymatgen.electronic_structure.dos import CompleteDos, Dos
from pymatgen.entries.computed_entries import \
ComputedEntry, ComputedStructureEntry
from pymatgen.io.vasp.inputs import Incar, Kpoints, Poscar, Potcar
from pymatgen.util.io_utils import clean_lines, micro_pyawk
"""
Classes for reading/manipulating/writing VASP ouput files.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Rickard Armiento, " + \
"Vincent L Chevrier, Ioannis Petousis, Stephen Dacek, Mark Turiansky"
__credits__ = "Anubhav Jain"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.2"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 30, 2012"
logger = logging.getLogger(__name__)
def _parse_parameters(val_type, val):
"""
Helper function to convert a Vasprun parameter into the proper type.
Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
"""
if val_type == "logical":
return val == "T"
elif val_type == "int":
return int(val)
elif val_type == "string":
return val.strip()
else:
return float(val)
def _parse_v_parameters(val_type, val, filename, param_name):
"""
Helper function to convert a Vasprun array-type parameter into the proper
type. Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
filename: Fullpath of vasprun.xml. Used for robust error handling.
E.g., if vasprun.xml contains \\*\\*\\* for some Incar parameters,
the code will try to read from an INCAR file present in the same
directory.
param_name: Name of parameter.
Returns:
Parsed value.
"""
if val_type == "logical":
val = [i == "T" for i in val.split()]
elif val_type == "int":
try:
val = [int(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# LDAUL/J as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise IOError("Error in parsing vasprun.xml")
elif val_type == "string":
val = val.split()
else:
try:
val = [float(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# MAGMOM as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise IOError("Error in parsing vasprun.xml")
return val
def _parse_varray(elem):
if elem.get("type", None) == 'logical':
m = [[True if i=='T' else False for i in v.text.split()] for v in elem]
else:
m = [[_vasprun_float(i) for i in v.text.split()] for v in elem]
return m
def _parse_from_incar(filename, key):
"""
Helper function to parse a parameter from the INCAR.
"""
dirname = os.path.dirname(filename)
for f in os.listdir(dirname):
if re.search(r"INCAR", f):
warnings.warn("INCAR found. Using " + key + " from INCAR.")
incar = Incar.from_file(os.path.join(dirname, f))
if key in incar:
return incar[key]
else:
return None
return None
def _vasprun_float(f):
"""
Large numbers are often represented as ********* in the vasprun.
This function parses these values as np.nan
"""
try:
return float(f)
except ValueError as e:
f = f.strip()
if f == '*' * len(f):
warnings.warn('Float overflow (*******) encountered in vasprun')
return np.nan
raise e
class Vasprun(MSONable):
"""
Vastly improved cElementTree-based parser for vasprun.xml files. Uses
iterparse to support incremental parsing of large files.
Speedup over Dom is at least 2x for smallish files (~1Mb) to orders of
magnitude for larger files (~10Mb).
Args:
filename (str): Filename to parse
ionic_step_skip (int): If ionic_step_skip is a number > 1,
only every ionic_step_skip ionic steps will be read for
structure and energies. This is very useful if you are parsing
very large vasprun.xml files and you are not interested in every
single ionic step. Note that the final energies may not be the
actual final energy in the vasprun.
ionic_step_offset (int): Used together with ionic_step_skip. If set,
the first ionic step read will be offset by the amount of
ionic_step_offset. For example, if you want to start reading
every 10th structure but only from the 3rd structure onwards,
set ionic_step_skip to 10 and ionic_step_offset to 3. Main use
case is when doing statistical structure analysis with
extremely long time scale multiple VASP calculations of
varying numbers of steps.
parse_dos (bool): Whether to parse the dos. Defaults to True. Set
to False to shave off significant time from the parsing if you
are not interested in getting those data.
parse_eigen (bool): Whether to parse the eigenvalues. Defaults to
True. Set to False to shave off significant time from the
parsing if you are not interested in getting those data.
parse_projected_eigen (bool): Whether to parse the projected
eigenvalues. Defaults to False. Set to True to obtain projected
eigenvalues. **Note that this can take an extreme amount of time
and memory.** So use this wisely.
parse_potcar_file (bool/str): Whether to parse the potcar file to read
the potcar hashes for the potcar_spec attribute. Defaults to True,
where no hashes will be determined and the potcar_spec dictionaries
will read {"symbol": ElSymbol, "hash": None}. By Default, looks in
the same directory as the vasprun.xml, with same extensions as
Vasprun.xml. If a string is provided, looks at that filepath.
occu_tol (float): Sets the minimum tol for the determination of the
vbm and cbm. Usually the default of 1e-8 works well enough,
but there may be pathological cases.
exception_on_bad_xml (bool): Whether to throw a ParseException if a
malformed XML is detected. Default to True, which ensures only
proper vasprun.xml are parsed. You can set to False if you want
partial results (e.g., if you are monitoring a calculation during a
run), but use the results with care. A warning is issued.
**Vasp results**
.. attribute:: ionic_steps
All ionic steps in the run as a list of
{"structure": structure at end of run,
"electronic_steps": {All electronic step data in vasprun file},
"stresses": stress matrix}
.. attribute:: structures
List of Structure objects for the structure at each ionic step.
.. attribute:: tdos
Total dos calculated at the end of run.
.. attribute:: idos
Integrated dos calculated at the end of run.
.. attribute:: pdos
List of list of PDos objects. Access as pdos[atomindex][orbitalindex]
.. attribute:: efermi
Fermi energy
.. attribute:: eigenvalues
Available only if parse_eigen=True. Final eigenvalues as a dict of
{(spin, kpoint index):[[eigenvalue, occu]]}.
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint index is 0-based (unlike the 1-based indexing in VASP).
.. attribute:: projected_eigenvalues
Final projected eigenvalues as a dict of {spin: nd-array}. To access
a particular value, you need to do
Vasprun.projected_eigenvalues[spin][kpoint index][band index][atom index][orbital_index]
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint, band and atom indices are 0-based (unlike the 1-based indexing
in VASP).
.. attribute:: dielectric
The real and imaginary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the real part tensor, and the imaginary part tensor
([energies],[[real_partxx,real_partyy,real_partzz,real_partxy,
real_partyz,real_partxz]],[[imag_partxx,imag_partyy,imag_partzz,
imag_partxy, imag_partyz, imag_partxz]])
.. attribute:: other_dielectric
Dictionary, with the tag comment as key, containing other variants of
the real and imaginary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the real part tensor, and the imaginary part tensor
([energies],[[real_partxx,real_partyy,real_partzz,real_partxy,
real_partyz,real_partxz]],[[imag_partxx,imag_partyy,imag_partzz,
imag_partxy, imag_partyz, imag_partxz]])
.. attribute:: epsilon_static
The static part of the dielectric constant. Present when it's a DFPT run
(LEPSILON=TRUE)
.. attribute:: epsilon_static_wolfe
The static part of the dielectric constant without any local field
effects. Present when it's a DFPT run (LEPSILON=TRUE)
.. attribute:: epsilon_ionic
The ionic part of the static dielectric constant. Present when it's a
DFPT run (LEPSILON=TRUE) and IBRION=5, 6, 7 or 8
.. attribute:: nionic_steps
The total number of ionic steps. This number is always equal
to the total number of steps in the actual run even if
ionic_step_skip is used.
.. attribute:: force_constants
Force constants computed in phonon DFPT run(IBRION = 8).
The data is a 4D numpy array of shape (natoms, natoms, 3, 3).
.. attribute:: normalmode_eigenvals
Normal mode frequencies.
1D numpy array of size 3*natoms.
.. attribute:: normalmode_eigenvecs
Normal mode eigen vectors.
3D numpy array of shape (3*natoms, natoms, 3).
**Vasp inputs**
.. attribute:: incar
Incar object for parameters specified in INCAR file.
.. attribute:: parameters
Incar object with parameters that vasp actually used, including all
defaults.
.. attribute:: kpoints
Kpoints object for KPOINTS specified in run.
.. attribute:: actual_kpoints
List of actual kpoints, e.g.,
[[0.25, 0.125, 0.08333333], [-0.25, 0.125, 0.08333333],
[0.25, 0.375, 0.08333333], ....]
.. attribute:: actual_kpoints_weights
List of kpoint weights, E.g.,
[0.04166667, 0.04166667, 0.04166667, 0.04166667, 0.04166667, ....]
.. attribute:: atomic_symbols
List of atomic symbols, e.g., ["Li", "Fe", "Fe", "P", "P", "P"]
.. attribute:: potcar_symbols
List of POTCAR symbols. e.g.,
["PAW_PBE Li 17Jan2003", "PAW_PBE Fe 06Sep2000", ..]
Author: Shyue Ping Ong
"""
def __init__(self, filename, ionic_step_skip=None,
ionic_step_offset=0, parse_dos=True,
parse_eigen=True, parse_projected_eigen=False,
parse_potcar_file=True, occu_tol=1e-8,
exception_on_bad_xml=True):
self.filename = filename
self.ionic_step_skip = ionic_step_skip
self.ionic_step_offset = ionic_step_offset
self.occu_tol = occu_tol
self.exception_on_bad_xml = exception_on_bad_xml
with zopen(filename, "rt") as f:
if ionic_step_skip or ionic_step_offset:
# remove parts of the xml file and parse the string
run = f.read()
steps = run.split("<calculation>")
# The text before the first <calculation> is the preamble!
preamble = steps.pop(0)
self.nionic_steps = len(steps)
new_steps = steps[ionic_step_offset::int(ionic_step_skip)]
# add the tailing informat in the last step from the run
to_parse = "<calculation>".join(new_steps)
if steps[-1] != new_steps[-1]:
to_parse = "{}<calculation>{}{}".format(
preamble, to_parse,
steps[-1].split("</calculation>")[-1])
else:
to_parse = "{}<calculation>{}".format(preamble, to_parse)
self._parse(StringIO(to_parse), parse_dos=parse_dos,
parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen)
else:
self._parse(f, parse_dos=parse_dos, parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen)
self.nionic_steps = len(self.ionic_steps)
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
self.update_charge_from_potcar(parse_potcar_file)
if self.incar.get("ALGO", "") != "BSE" and (not self.converged):
msg = "%s is an unconverged VASP run.\n" % filename
msg += "Electronic convergence reached: %s.\n" % \
self.converged_electronic
msg += "Ionic convergence reached: %s." % self.converged_ionic
warnings.warn(msg, UnconvergedVASPWarning)
def _parse(self, stream, parse_dos, parse_eigen, parse_projected_eigen):
self.efermi = None
self.eigenvalues = None
self.projected_eigenvalues = None
self.dielectric_data = {}
self.other_dielectric = {}
ionic_steps = []
parsed_header = False
try:
for event, elem in ET.iterparse(stream):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
self.kpoints, self.actual_kpoints, \
self.actual_kpoints_weights = self._parse_kpoints(
elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "structure" and elem.attrib.get("name") == \
"initialpos":
self.initial_structure = self._parse_structure(elem)
elif tag == "atominfo":
self.atomic_symbols, self.potcar_symbols = \
self._parse_atominfo(elem)
self.potcar_spec = [{"titel": p,
"hash": None} for
p in self.potcar_symbols]
if tag == "calculation":
parsed_header = True
if not self.parameters.get("LCHIMAG", False):
ionic_steps.append(self._parse_calculation(elem))
else:
ionic_steps.extend(self._parse_chemical_shift_calculation(elem))
elif parse_dos and tag == "dos":
try:
self.tdos, self.idos, self.pdos = self._parse_dos(elem)
self.efermi = self.tdos.efermi
self.dos_has_errors = False
except Exception as ex:
self.dos_has_errors = True
elif parse_eigen and tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
self.projected_eigenvalues = self._parse_projected_eigen(
elem)
elif tag == "dielectricfunction":
if ("comment" not in elem.attrib) or \
elem.attrib["comment"] == "INVERSE MACROSCOPIC DIELECTRIC TENSOR (including local field effects in RPA (Hartree))":
if not 'density' in self.dielectric_data:
self.dielectric_data['density'] = self._parse_diel(elem)
# "velocity-velocity" is also named "current-current"
# in OUTCAR
elif not 'velocity' in self.dielectric_data:
self.dielectric_data['velocity'] = self._parse_diel(elem)
else:
raise NotImplementedError('This vasprun.xml has >2 unlabelled dielectric functions')
else:
comment = elem.attrib["comment"]
self.other_dielectric[comment] = self._parse_diel(elem)
elif tag == "varray" and elem.attrib.get("name") == 'opticaltransitions':
self.optical_transition = np.array(_parse_varray(elem))
elif tag == "structure" and elem.attrib.get("name") == \
"finalpos":
self.final_structure = self._parse_structure(elem)
elif tag == "dynmat":
hessian, eigenvalues, eigenvectors = self._parse_dynmat(elem)
natoms = len(self.atomic_symbols)
hessian = np.array(hessian)
self.force_constants = np.zeros((natoms, natoms, 3, 3), dtype='double')
for i in range(natoms):
for j in range(natoms):
self.force_constants[i, j] = hessian[i*3:(i+1)*3,j*3:(j+1)*3]
phonon_eigenvectors = []
for ev in eigenvectors:
phonon_eigenvectors.append(np.array(ev).reshape(natoms, 3))
self.normalmode_eigenvals = np.array(eigenvalues)
self.normalmode_eigenvecs = np.array(phonon_eigenvectors)
except ET.ParseError as ex:
if self.exception_on_bad_xml:
raise ex
else:
warnings.warn(
"XML is malformed. Parsing has stopped but partial data"
"is available.", UserWarning)
self.ionic_steps = ionic_steps
self.vasp_version = self.generator["version"]
@property
def structures(self):
return [step["structure"] for step in self.ionic_steps]
@property
def epsilon_static(self):
"""
Property only available for DFPT calculations.
"""
return self.ionic_steps[-1].get("epsilon", [])
@property
def epsilon_static_wolfe(self):
"""
Property only available for DFPT calculations.
"""
return self.ionic_steps[-1].get("epsilon_rpa", [])
@property
def epsilon_ionic(self):
"""
Property only available for DFPT calculations and when IBRION=5, 6, 7 or 8.
"""
return self.ionic_steps[-1].get("epsilon_ion", [])
@property
def dielectric(self):
return self.dielectric_data['density']
@property
def optical_absorption_coeff(self):
"""
Calculate the optical absorption coefficient
from the dielectric constants. Note that this method is only
implemented for optical properties calculated with GGA and BSE.
Returns:
optical absorption coefficient in list
"""
if self.dielectric_data["density"]:
real_avg = [sum(self.dielectric_data["density"][1][i][0:3]) / 3
for i in range(len(self.dielectric_data["density"][0]))]
imag_avg = [sum(self.dielectric_data["density"][2][i][0:3]) / 3
for i in range(len(self.dielectric_data["density"][0]))]
def f(freq, real, imag):
"""
The optical absorption coefficient calculated in terms of
equation
"""
hbar = 6.582119514e-16 # eV/K
coeff = np.sqrt(
np.sqrt(real ** 2 + imag ** 2) - real) * \
np.sqrt(2) / hbar * freq
return coeff
absorption_coeff = [f(freq, real, imag) for freq, real, imag in
zip(self.dielectric_data["density"][0],
real_avg, imag_avg)]
return absorption_coeff
@property
def lattice(self):
return self.final_structure.lattice
@property
def lattice_rec(self):
return self.final_structure.lattice.reciprocal_lattice
@property
def converged_electronic(self):
"""
Checks that electronic step convergence has been reached in the final
ionic step
"""
final_esteps = self.ionic_steps[-1]["electronic_steps"]
if 'LEPSILON' in self.incar and self.incar['LEPSILON']:
i = 1
to_check = set(['e_wo_entrp', 'e_fr_energy', 'e_0_energy'])
while set(final_esteps[i].keys()) == to_check:
i += 1
return i + 1 != self.parameters["NELM"]
return len(final_esteps) < self.parameters["NELM"]
@property
def converged_ionic(self):
"""
Checks that ionic step convergence has been reached, i.e. that vasp
exited before reaching the max ionic steps for a relaxation run
"""
nsw = self.parameters.get("NSW", 0)
return nsw <= 1 or len(self.ionic_steps) < nsw
@property
def converged(self):
"""
Returns true if a relaxation run is converged.
"""
return self.converged_electronic and self.converged_ionic
@property
@unitized("eV")
def final_energy(self):
"""
Final energy from the vasp run.
"""
try:
final_istep = self.ionic_steps[-1]
if final_istep["e_wo_entrp"] != final_istep[
'electronic_steps'][-1]["e_0_energy"]:
warnings.warn("Final e_wo_entrp differs from the final "
"electronic step. VASP may have included some "
"corrections, e.g., vdw. Vasprun will return "
"the final e_wo_entrp, i.e., including "
"corrections in such instances.")
return final_istep["e_wo_entrp"]
return final_istep['electronic_steps'][-1]["e_0_energy"]
except (IndexError, KeyError):
warnings.warn("Calculation does not have a total energy. "
"Possibly a GW or similar kind of run. A value of "
"infinity is returned.")
return float('inf')
@property
def complete_dos(self):
"""
A complete dos object which incorporates the total dos and all
projected dos.
"""
final_struct = self.final_structure
pdoss = {final_struct[i]: pdos for i, pdos in enumerate(self.pdos)}
return CompleteDos(self.final_structure, self.tdos, pdoss)
@property
def hubbards(self):
"""
Hubbard U values used if a vasprun is a GGA+U run. {} otherwise.
"""
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
if not self.incar.get("LDAU", False):
return {}
us = self.incar.get("LDAUU", self.parameters.get("LDAUU"))
js = self.incar.get("LDAUJ", self.parameters.get("LDAUJ"))
if len(js) != len(us):
js = [0] * len(us)
if len(us) == len(symbols):
return {symbols[i]: us[i] - js[i] for i in range(len(symbols))}
elif sum(us) == 0 and sum(js) == 0:
return {}
else:
raise VaspParserError("Length of U value parameters and atomic "
"symbols are mismatched")
@property
def run_type(self):
"""
Returns the run type. Currently supports LDA, GGA, vdW-DF and HF calcs.
TODO: Fix for other functional types like PW91, other vdW types, etc.
"""
if self.parameters.get("LHFCALC", False):
rt = "HF"
elif self.parameters.get("LUSE_VDW", False):
vdw_gga = {"RE": "DF", "OR": "optPBE", "BO": "optB88",
"MK": "optB86b", "ML": "DF2"}
gga = self.parameters.get("GGA").upper()
rt = "vdW-" + vdw_gga[gga]
elif self.potcar_symbols[0].split()[0] == 'PAW':
rt = "LDA"
else:
rt = "GGA"
if self.is_hubbard:
rt += "+U"
return rt
@property
def is_hubbard(self):
"""
True if run is a DFT+U run.
"""
if len(self.hubbards) == 0:
return False
return sum(self.hubbards.values()) > 1e-8
@property
def is_spin(self):
"""
True if run is spin-polarized.
"""
return self.parameters.get("ISPIN", 1) == 2
def get_computed_entry(self, inc_structure=True, parameters=None,
data=None):
"""
Returns a ComputedStructureEntry from the vasprun.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the Vasprun object. If
parameters is None, a default set of parameters that are
necessary for typical post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
Returns:
ComputedStructureEntry/ComputedEntry
"""
param_names = {"is_hubbard", "hubbards", "potcar_symbols",
"potcar_spec", "run_type"}
if parameters:
param_names.update(parameters)
params = {p: getattr(self, p) for p in param_names}
data = {p: getattr(self, p) for p in data} if data is not None else {}
if inc_structure:
return ComputedStructureEntry(self.final_structure,
self.final_energy, parameters=params,
data=data)
else:
return ComputedEntry(self.final_structure.composition,
self.final_energy, parameters=params,
data=data)
def get_band_structure(self, kpoints_filename=None, efermi=None,
line_mode=False):
"""
Returns the band structure as a BandStructure object
Args:
kpoints_filename (str): Full path of the KPOINTS file from which
the band structure is generated.
If none is provided, the code will try to intelligently
determine the appropriate KPOINTS file by substituting the
filename of the vasprun.xml with KPOINTS.
The latter is the default behavior.
efermi (float): If you want to specify manually the fermi energy
this is where you should do it. By default, the None value
means the code will get it from the vasprun.
line_mode (bool): Force the band structure to be considered as
a run along symmetry lines.
Returns:
a BandStructure object (or more specifically a
BandStructureSymmLine object if the run is detected to be a run
along symmetry lines)
Two types of runs along symmetry lines are accepted: non-sc with
Line-Mode in the KPOINT file or hybrid, self-consistent with a
uniform grid+a few kpoints along symmetry lines (explicit KPOINTS
file) (it's not possible to run a non-sc band structure with hybrid
functionals). The explicit KPOINTS file needs to have data on the
kpoint label as commentary.
"""
if not kpoints_filename:
kpoints_filename = self.filename.replace('vasprun.xml', 'KPOINTS')
if not os.path.exists(kpoints_filename) and line_mode is True:
raise VaspParserError('KPOINTS needed to obtain band structure '
'along symmetry lines.')
if efermi is None:
efermi = self.efermi
kpoint_file = None
if os.path.exists(kpoints_filename):
kpoint_file = Kpoints.from_file(kpoints_filename)
lattice_new = Lattice(self.lattice_rec.matrix)
kpoints = [np.array(self.actual_kpoints[i])
for i in range(len(self.actual_kpoints))]
p_eigenvals = defaultdict(list)
eigenvals = defaultdict(list)
nkpts = len(kpoints)
neigenvalues = [len(v) for v in self.eigenvalues[Spin.up]]
min_eigenvalues = min(neigenvalues)
for spin, v in self.eigenvalues.items():
v = np.swapaxes(v, 0, 1)
eigenvals[spin] = v[:, :, 0]
if self.projected_eigenvalues:
peigen = self.projected_eigenvalues[spin]
# Original axes for self.projected_eigenvalues are kpoints,
# band, ion, orb.
# For BS input, we need band, kpoints, orb, ion.
peigen = np.swapaxes(peigen, 0, 1) # Swap kpoint and band axes
peigen = np.swapaxes(peigen, 2, 3) # Swap ion and orb axes
p_eigenvals[spin] = peigen
# for b in range(min_eigenvalues):
# p_eigenvals[spin].append(
# [{Orbital(orb): v for orb, v in enumerate(peigen[b, k])}
# for k in range(nkpts)])
# check if we have an hybrid band structure computation
# for this we look at the presence of the LHFCALC tag
hybrid_band = False
if self.parameters.get('LHFCALC', False):
hybrid_band = True
if kpoint_file is not None:
if kpoint_file.style == Kpoints.supported_modes.Line_mode:
line_mode = True
if line_mode:
labels_dict = {}
if hybrid_band:
start_bs_index = 0
for i in range(len(self.actual_kpoints)):
if self.actual_kpoints_weights[i] == 0.0:
start_bs_index = i
break
for i in range(start_bs_index, len(kpoint_file.kpts)):
if kpoint_file.labels[i] is not None:
labels_dict[kpoint_file.labels[i]] = \
kpoint_file.kpts[i]
# remake the data only considering line band structure k-points
# (weight = 0.0 kpoints)
nbands = len(eigenvals[Spin.up])
kpoints = kpoints[start_bs_index:nkpts]
up_eigen = [eigenvals[Spin.up][i][start_bs_index:nkpts]
for i in range(nbands)]
if self.projected_eigenvalues:
p_eigenvals[Spin.up] = [p_eigenvals[Spin.up][i][
start_bs_index:nkpts]
for i in range(nbands)]
if self.is_spin:
down_eigen = [eigenvals[Spin.down][i][start_bs_index:nkpts]
for i in range(nbands)]
eigenvals = {Spin.up: up_eigen, Spin.down: down_eigen}
if self.projected_eigenvalues:
p_eigenvals[Spin.down] = [p_eigenvals[Spin.down][i][
start_bs_index:nkpts]
for i in range(nbands)]
else:
eigenvals = {Spin.up: up_eigen}
else:
if '' in kpoint_file.labels:
raise Exception("A band structure along symmetry lines "
"requires a label for each kpoint. "
"Check your KPOINTS file")
labels_dict = dict(zip(kpoint_file.labels, kpoint_file.kpts))
labels_dict.pop(None, None)
return BandStructureSymmLine(kpoints, eigenvals, lattice_new,
efermi, labels_dict,
structure=self.final_structure,
projections=p_eigenvals)
else:
return BandStructure(kpoints, eigenvals, lattice_new, efermi,
structure=self.final_structure,
projections=p_eigenvals)
@property
def eigenvalue_band_properties(self):
"""
Band properties from the eigenvalues as a tuple,
(band gap, cbm, vbm, is_band_gap_direct).
"""
vbm = -float("inf")
vbm_kpoint = None
cbm = float("inf")
cbm_kpoint = None
for spin, d in self.eigenvalues.items():
for k, val in enumerate(d):
for (eigenval, occu) in val:
if occu > self.occu_tol and eigenval > vbm:
vbm = eigenval
vbm_kpoint = k
elif occu <= self.occu_tol and eigenval < cbm:
cbm = eigenval
cbm_kpoint = k
return max(cbm - vbm, 0), cbm, vbm, vbm_kpoint == cbm_kpoint
def get_potcars(self, path):
def get_potcar_in_path(p):
for fn in os.listdir(os.path.abspath(p)):
if fn.startswith('POTCAR'):
pc = Potcar.from_file(os.path.join(p, fn))
if {d.header for d in pc} == \
{sym for sym in self.potcar_symbols}:
return pc
warnings.warn("No POTCAR file with matching TITEL fields"
" was found in {}".format(os.path.abspath(p)))
if isinstance(path, string_types):
if "POTCAR" in path:
potcar = Potcar.from_file(path)
if {d.TITEL for d in potcar} != \
{sym for sym in self.potcar_symbols}:
raise ValueError("Potcar TITELs do not match Vasprun")
else:
potcar = get_potcar_in_path(path)
elif isinstance(path, bool) and path:
potcar = get_potcar_in_path(os.path.split(self.filename)[0])
else:
potcar = None
return potcar
def update_potcar_spec(self, path):
potcar = self.get_potcars(path)
if potcar:
self.potcar_spec = [{"titel": sym, "hash": ps.get_potcar_hash()}
for sym in self.potcar_symbols
for ps in potcar if
ps.symbol == sym.split()[1]]
def update_charge_from_potcar(self, path):
potcar = self.get_potcars(path)
if potcar and self.incar.get("ALGO", "") not in ["GW0", "G0W0", "GW", "BSE"]:
nelect = self.parameters["NELECT"]
potcar_nelect = int(round(sum([self.structures[0].composition.element_composition[
ps.element] * ps.ZVAL for ps in potcar])))
charge = nelect - potcar_nelect
if charge:
for s in self.structures:
s._charge = charge
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {"vasp_version": self.vasp_version,
"has_vasp_completed": self.converged,
"nsites": len(self.final_structure)}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
unique_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["run_type"] = self.run_type
vin = {"incar": {k: v for k, v in self.incar.items()},
"crystal": self.initial_structure.as_dict(),
"kpoints": self.kpoints.as_dict()}
actual_kpts = [{"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i]}
for i in range(len(self.actual_kpoints))]
vin["kpoints"]["actual_points"] = actual_kpts
vin["potcar"] = [s.split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = {k: v for k, v in self.parameters.items()}
vin["lattice_rec"] = self.lattice_rec.as_dict()
d["input"] = vin
nsites = len(self.final_structure)
try:
vout = {"ionic_steps": self.ionic_steps,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
except (ArithmeticError, TypeError):
vout = {"ionic_steps": self.ionic_steps,
"final_energy": self.final_energy,
"final_energy_per_atom": None,
"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
if self.eigenvalues:
eigen = {str(spin): v.tolist()
for spin, v in self.eigenvalues.items()}
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm,
is_gap_direct=is_direct))
if self.projected_eigenvalues:
vout['projected_eigenvalues'] = {
str(spin): v.tolist()
for spin, v in self.projected_eigenvalues.items()}
vout['epsilon_static'] = self.epsilon_static
vout['epsilon_static_wolfe'] = self.epsilon_static_wolfe
vout['epsilon_ionic'] = self.epsilon_ionic
d['output'] = vout
return jsanitize(d, strict=True)
def _parse_params(self, elem):
params = {}
for c in elem:
name = c.attrib.get("name")
if c.tag not in ("i", "v"):
p = self._parse_params(c)
if name == "response functions":
# Delete duplicate fields from "response functions",
# which overrides the values in the root params.
p = {k: v for k, v in p.items() if k not in params}
params.update(p)
else:
ptype = c.attrib.get("type")
val = c.text.strip() if c.text else ""
if c.tag == "i":
params[name] = _parse_parameters(ptype, val)
else:
params[name] = _parse_v_parameters(ptype, val,
self.filename, name)
elem.clear()
return Incar(params)
def _parse_atominfo(self, elem):
for a in elem.findall("array"):
if a.attrib["name"] == "atoms":
atomic_symbols = [rc.find("c").text.strip()
for rc in a.find("set")]
elif a.attrib["name"] == "atomtypes":
potcar_symbols = [rc.findall("c")[4].text.strip()
for rc in a.find("set")]
# ensure atomic symbols are valid elements
def parse_atomic_symbol(symbol):
try:
return str(Element(symbol))
# vasprun.xml uses X instead of Xe for xenon
except ValueError as e:
if symbol == "X":
return "Xe"
elif symbol == "r":
return "Zr"
raise e
elem.clear()
return [parse_atomic_symbol(sym) for
sym in atomic_symbols], potcar_symbols
def _parse_kpoints(self, elem):
e = elem
if elem.find("generation"):
e = elem.find("generation")
k = Kpoints("Kpoints from vasprun.xml")
k.style = Kpoints.supported_modes.from_string(
e.attrib["param"] if "param" in e.attrib else "Reciprocal")
for v in e.findall("v"):
name = v.attrib.get("name")
toks = v.text.split()
if name == "divisions":
k.kpts = [[int(i) for i in toks]]
elif name == "usershift":
k.kpts_shift = [float(i) for i in toks]
elif name in {"genvec1", "genvec2", "genvec3", "shift"}:
setattr(k, name, [float(i) for i in toks])
for va in elem.findall("varray"):
name = va.attrib["name"]
if name == "kpointlist":
actual_kpoints = _parse_varray(va)
elif name == "weights":
weights = [i[0] for i in _parse_varray(va)]
elem.clear()
if k.style == Kpoints.supported_modes.Reciprocal:
k = Kpoints(comment="Kpoints from vasprun.xml",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(k.kpts),
kpts=actual_kpoints, kpts_weights=weights)
return k, actual_kpoints, weights
def _parse_structure(self, elem):
latt = _parse_varray(elem.find("crystal").find("varray"))
pos = _parse_varray(elem.find("varray"))
struct = Structure(latt, self.atomic_symbols, pos)
sdyn = elem.find("varray/[@name='selective']")
if sdyn:
struct.add_site_property('selective_dynamics',
_parse_varray(sdyn))
return struct
def _parse_diel(self, elem):
imag = [[_vasprun_float(l) for l in r.text.split()]
for r in elem.find("imag").find("array")
.find("set").findall("r")]
real = [[_vasprun_float(l) for l in r.text.split()]
for r in elem.find("real")
.find("array").find("set").findall("r")]
elem.clear()
return [e[0] for e in imag], \
[e[1:] for e in real], [e[1:] for e in imag]
def _parse_optical_transition(self, elem):
for va in elem.findall("varray"):
if va.attrib.get("name") == "opticaltransitions":
# opticaltransitions array contains oscillator strength and probability of transition
oscillator_strength = np.array(_parse_varray(va))[0:,]
probability_transition = np.array(_parse_varray(va))[0:,1]
return oscillator_strength, probability_transition
def _parse_chemical_shift_calculation(self, elem):
calculation = []
istep = {}
try:
s = self._parse_structure(elem.find("structure"))
except AttributeError: # not all calculations have a structure
s = None
pass
for va in elem.findall("varray"):
istep[va.attrib["name"]] = _parse_varray(va)
istep["structure"] = s
istep["electronic_steps"] = []
calculation.append(istep)
for scstep in elem.findall("scstep"):
try:
d = {i.attrib["name"]: _vasprun_float(i.text)
for i in scstep.find("energy").findall("i")}
cur_ene = d['e_fr_energy']
min_steps = 1 if len(calculation) >= 1 else self.parameters.get("NELMIN", 5)
if len(calculation[-1]["electronic_steps"]) <= min_steps:
calculation[-1]["electronic_steps"].append(d)
else:
last_ene = calculation[-1]["electronic_steps"][-1]["e_fr_energy"]
if abs(cur_ene - last_ene) < 1.0:
calculation[-1]["electronic_steps"].append(d)
else:
calculation.append({"electronic_steps": [d]})
except AttributeError: # not all calculations have an energy
pass
calculation[-1].update(calculation[-1]["electronic_steps"][-1])
return calculation
def _parse_calculation(self, elem):
try:
istep = {i.attrib["name"]: float(i.text)
for i in elem.find("energy").findall("i")}
except AttributeError: # not all calculations have an energy
istep = {}
pass
esteps = []
for scstep in elem.findall("scstep"):
try:
d = {i.attrib["name"]: _vasprun_float(i.text)
for i in scstep.find("energy").findall("i")}
esteps.append(d)
except AttributeError: # not all calculations have an energy
pass
try:
s = self._parse_structure(elem.find("structure"))
except AttributeError: # not all calculations have a structure
s = None
pass
for va in elem.findall("varray"):
istep[va.attrib["name"]] = _parse_varray(va)
istep["electronic_steps"] = esteps
istep["structure"] = s
elem.clear()
return istep
def _parse_dos(self, elem):
efermi = float(elem.find("i").text)
energies = None
tdensities = {}
idensities = {}
for s in elem.find("total").find("array").find("set").findall("set"):
data = np.array(_parse_varray(s))
energies = data[:, 0]
spin = Spin.up if s.attrib["comment"] == "spin 1" else Spin.down
tdensities[spin] = data[:, 1]
idensities[spin] = data[:, 2]
pdoss = []
partial = elem.find("partial")
if partial is not None:
orbs = [ss.text for ss in partial.find("array").findall("field")]
orbs.pop(0)
lm = any(["x" in s for s in orbs])
for s in partial.find("array").find("set").findall("set"):
pdos = defaultdict(dict)
for ss in s.findall("set"):
spin = Spin.up if ss.attrib["comment"] == "spin 1" else \
Spin.down
data = np.array(_parse_varray(ss))
nrow, ncol = data.shape
for j in range(1, ncol):
if lm:
orb = Orbital(j - 1)
else:
orb = OrbitalType(j - 1)
pdos[orb][spin] = data[:, j]
pdoss.append(pdos)
elem.clear()
return Dos(efermi, energies, tdensities), \
Dos(efermi, energies, idensities), pdoss
def _parse_eigen(self, elem):
eigenvalues = defaultdict(list)
for s in elem.find("array").find("set").findall("set"):
spin = Spin.up if s.attrib["comment"] == "spin 1" else Spin.down
for ss in s.findall("set"):
eigenvalues[spin].append(_parse_varray(ss))
eigenvalues = {spin: np.array(v) for spin, v in eigenvalues.items()}
elem.clear()
return eigenvalues
def _parse_projected_eigen(self, elem):
root = elem.find("array").find("set")
proj_eigen = defaultdict(list)
for s in root.findall("set"):
spin = int(re.match(r"spin(\d+)", s.attrib["comment"]).group(1))
# Force spin to be +1 or -1
spin = Spin.up if spin == 1 else Spin.down
for kpt, ss in enumerate(s.findall("set")):
dk = []
for band, sss in enumerate(ss.findall("set")):
db = _parse_varray(sss)
dk.append(db)
proj_eigen[spin].append(dk)
proj_eigen = {spin: np.array(v) for spin, v in proj_eigen.items()}
elem.clear()
return proj_eigen
def _parse_dynmat(self, elem):
hessian = []
eigenvalues = []
eigenvectors = []
for v in elem.findall("v"):
if v.attrib["name"] == "eigenvalues":
eigenvalues = [float(i) for i in v.text.split()]
for va in elem.findall("varray"):
if va.attrib["name"] == "hessian":
for v in va.findall("v"):
hessian.append([float(i) for i in v.text.split()])
elif va.attrib["name"] == "eigenvectors":
for v in va.findall("v"):
eigenvectors.append([float(i) for i in v.text.split()])
return hessian, eigenvalues, eigenvectors
class BSVasprun(Vasprun):
"""
A highly optimized version of Vasprun that parses only eigenvalues for
bandstructures. All other properties like structures, parameters,
etc. are ignored.
"""
def __init__(self, filename, parse_projected_eigen=False,
parse_potcar_file=False, occu_tol=1e-8):
self.filename = filename
self.occu_tol = occu_tol
with zopen(filename, "rt") as f:
self.efermi = None
parsed_header = False
self.eigenvalues = None
self.projected_eigenvalues = None
for event, elem in ET.iterparse(f):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
self.kpoints, self.actual_kpoints, \
self.actual_kpoints_weights = self._parse_kpoints(
elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "atominfo":
self.atomic_symbols, self.potcar_symbols = \
self._parse_atominfo(elem)
self.potcar_spec = [{"titel": p,
"hash": None} for
p in self.potcar_symbols]
parsed_header = True
elif tag == "i" and elem.attrib.get("name") == "efermi":
self.efermi = float(elem.text)
elif tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
self.projected_eigenvalues = self._parse_projected_eigen(
elem)
elif tag == "structure" and elem.attrib.get("name") == \
"finalpos":
self.final_structure = self._parse_structure(elem)
self.vasp_version = self.generator["version"]
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {"vasp_version": self.vasp_version,
"has_vasp_completed": True,
"nsites": len(self.final_structure)}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
unique_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["run_type"] = self.run_type
vin = {"incar": {k: v for k, v in self.incar.items()},
"crystal": self.final_structure.as_dict(),
"kpoints": self.kpoints.as_dict()}
actual_kpts = [{"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i]}
for i in range(len(self.actual_kpoints))]
vin["kpoints"]["actual_points"] = actual_kpts
vin["potcar"] = [s.split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = {k: v for k, v in self.parameters.items()}
vin["lattice_rec"] = self.lattice_rec.as_dict()
d["input"] = vin
vout = {"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
if self.eigenvalues:
eigen = defaultdict(dict)
for spin, values in self.eigenvalues.items():
for i, v in enumerate(values):
eigen[i][str(spin)] = v
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm,
is_gap_direct=is_direct))
if self.projected_eigenvalues:
peigen = []
for i in range(len(eigen)):
peigen.append({})
for spin, v in self.projected_eigenvalues.items():
for kpoint_index, vv in enumerate(v):
if str(spin) not in peigen[kpoint_index]:
peigen[kpoint_index][str(spin)] = vv
vout['projected_eigenvalues'] = peigen
d['output'] = vout
return jsanitize(d, strict=True)
class Outcar(MSONable):
"""
Parser for data in OUTCAR that is not available in Vasprun.xml
Note, this class works a bit differently than most of the other
VaspObjects, since the OUTCAR can be very different depending on which
"type of run" performed.
Creating the OUTCAR class with a filename reads "regular parameters" that
are always present.
Args:
filename (str): OUTCAR filename to parse.
.. attribute:: magnetization
Magnetization on each ion as a tuple of dict, e.g.,
({"d": 0.0, "p": 0.003, "s": 0.002, "tot": 0.005}, ... )
Note that this data is not always present. LORBIT must be set to some
other value than the default.
.. attribute:: chemical_shifts
Chemical Shift on each ion as a tuple of ChemicalShiftNotation, e.g.,
(cs1, cs2, ...)
.. attribute:: unsym_cs_tensor
Unsymmetrized Chemical Shift tensor matrixes on each ion as a list.
e.g.,
[[[sigma11, sigma12, sigma13],
[sigma21, sigma22, sigma23],
[sigma31, sigma32, sigma33]],
...
[[sigma11, sigma12, sigma13],
[sigma21, sigma22, sigma23],
[sigma31, sigma32, sigma33]]]
.. attribute:: unsym_cs_tensor
G=0 contribution to chemical shift. 2D rank 3 matrix
.. attribute:: cs_core_contribution
Core contribution to chemical shift. dict. e.g.,
{'Mg': -412.8, 'C': -200.5, 'O': -271.1}
.. attribute:: efg
Electric Field Gradient (EFG) tensor on each ion as a tuple of dict, e.g.,
({"cq": 0.1, "eta", 0.2, "nuclear_quadrupole_moment": 0.3},
{"cq": 0.7, "eta", 0.8, "nuclear_quadrupole_moment": 0.9},
...)
.. attribute:: charge
Charge on each ion as a tuple of dict, e.g.,
({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
Note that this data is not always present. LORBIT must be set to some
other value than the default.
.. attribute:: is_stopped
True if OUTCAR is from a stopped run (using STOPCAR, see Vasp Manual).
.. attribute:: run_stats
Various useful run stats as a dict including "System time (sec)",
"Total CPU time used (sec)", "Elapsed time (sec)",
"Maximum memory used (kb)", "Average memory used (kb)",
"User time (sec)".
.. attribute:: elastic_tensor
Total elastic moduli (Kbar) is given in a 6x6 array matrix.
.. attribute:: drift
Total drift for each step in eV/Atom
.. attribute:: ngf
Dimensions for the Augementation grid
.. attribute: sampling_radii
Size of the sampling radii in VASP for the test charges for
the electrostatic potential at each atom. Total array size is the number
of elements present in the calculation
.. attribute: electrostatic_potential
Average electrostatic potential at each atomic position in order
of the atoms in POSCAR.
One can then call a specific reader depending on the type of run being
performed. These are currently: read_igpar(), read_lepsilon() and
read_lcalcpol(), read_core_state_eign(), read_avg_core_pot().
See the documentation of those methods for more documentation.
Authors: Rickard Armiento, Shyue Ping Ong
"""
def __init__(self, filename):
self.filename = filename
self.is_stopped = False
# data from end of OUTCAR
charge = []
mag_x = []
mag_y = []
mag_z = []
header = []
run_stats = {}
total_mag = None
nelect = None
efermi = None
total_energy = None
time_patt = re.compile(r"\((sec|kb)\)")
efermi_patt = re.compile(r"E-fermi\s*:\s*(\S+)")
nelect_patt = re.compile(r"number of electron\s+(\S+)\s+magnetization")
mag_patt = re.compile(r"number of electron\s+\S+\s+magnetization\s+("
r"\S+)")
toten_pattern = re.compile(r"free energy TOTEN\s+=\s+([\d\-\.]+)")
all_lines = []
for line in reverse_readfile(self.filename):
clean = line.strip()
all_lines.append(clean)
if clean.find("soft stop encountered! aborting job") != -1:
self.is_stopped = True
else:
if time_patt.search(line):
tok = line.strip().split(":")
run_stats[tok[0].strip()] = float(tok[1].strip())
continue
m = efermi_patt.search(clean)
if m:
try:
# try-catch because VASP sometimes prints
# 'E-fermi: ******** XC(G=0): -6.1327
# alpha+bet : -1.8238'
efermi = float(m.group(1))
continue
except ValueError:
efermi = None
continue
m = nelect_patt.search(clean)
if m:
nelect = float(m.group(1))
m = mag_patt.search(clean)
if m:
total_mag = float(m.group(1))
if total_energy is None:
m = toten_pattern.search(clean)
if m:
total_energy = float(m.group(1))
if all([nelect, total_mag is not None, efermi is not None,
run_stats]):
break
# For single atom systems, VASP doesn't print a total line, so
# reverse parsing is very difficult
read_charge = False
read_mag_x = False
read_mag_y = False # for SOC calculations only
read_mag_z = False
all_lines.reverse()
for clean in all_lines:
if read_charge or read_mag_x or read_mag_y or read_mag_z:
if clean.startswith("# of ion"):
header = re.split(r"\s{2,}", clean.strip())
header.pop(0)
else:
m = re.match(r"\s*(\d+)\s+(([\d\.\-]+)\s+)+", clean)
if m:
toks = [float(i)
for i in re.findall(r"[\d\.\-]+", clean)]
toks.pop(0)
if read_charge:
charge.append(dict(zip(header, toks)))
elif read_mag_x:
mag_x.append(dict(zip(header, toks)))
elif read_mag_y:
mag_y.append(dict(zip(header, toks)))
elif read_mag_z:
mag_z.append(dict(zip(header, toks)))
elif clean.startswith('tot'):
read_charge = False
read_mag_x = False
read_mag_y = False
read_mag_z = False
if clean == "total charge":
charge = []
read_charge = True
read_mag_x, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (x)":
mag_x = []
read_mag_x = True
read_charge, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (y)":
mag_y = []
read_mag_y = True
read_charge, read_mag_x, read_mag_z = False, False, False
elif clean == "magnetization (z)":
mag_z = []
read_mag_z = True
read_charge, read_mag_x, read_mag_y = False, False, False
# merge x, y and z components of magmoms if present (SOC calculation)
if mag_y and mag_z:
# TODO: detect spin axis
mag = []
for idx in range(len(mag_x)):
mag.append({
key: Magmom([mag_x[idx][key], mag_y[idx][key], mag_z[idx][key]])
for key in mag_x[0].keys()
})
else:
mag = mag_x
# data from beginning of OUTCAR
run_stats['cores'] = 0
with zopen(filename, "rt") as f:
for line in f:
if "running" in line:
run_stats['cores'] = line.split()[2]
break
self.run_stats = run_stats
self.magnetization = tuple(mag)
self.charge = tuple(charge)
self.efermi = efermi
self.nelect = nelect
self.total_mag = total_mag
self.final_energy = total_energy
self.data = {}
# Read the drift:
self.read_pattern({
"drift": r"total drift:\s+([\.\-\d]+)\s+([\.\-\d]+)\s+([\.\-\d]+)"},
terminate_on_match=False,
postprocess=float)
self.drift = self.data.get('drift',[])
# Check if calculation is spin polarized
self.spin = False
self.read_pattern({'spin': 'ISPIN = 2'})
if self.data.get('spin',[]):
self.spin = True
# Check if calculation is noncollinear
self.noncollinear = False
self.read_pattern({'noncollinear': 'LNONCOLLINEAR = T'})
if self.data.get('noncollinear',[]):
self.noncollinear = False
# Check to see if LEPSILON is true and read piezo data if so
self.lepsilon = False
self.read_pattern({'epsilon': 'LEPSILON= T'})
if self.data.get('epsilon',[]):
self.lepsilon = True
self.read_lepsilon()
self.read_lepsilon_ionic()
# Check to see if LCALCPOL is true and read polarization data if so
self.lcalcpol = False
self.read_pattern({'calcpol': 'LCALCPOL = T'})
if self.data.get('calcpol',[]):
self.lcalcpol = True
self.read_lcalcpol()
self.read_pseudo_zval()
# Read electrostatic potential
self.read_pattern({
'electrostatic': r"average \(electrostatic\) potential at core"})
if self.data.get('electrostatic', []):
self.read_electrostatic_potential()
def read_pattern(self, patterns, reverse=False, terminate_on_match=False,
postprocess=str):
"""
General pattern reading. Uses monty's regrep method. Takes the same
arguments.
Args:
patterns (dict): A dict of patterns, e.g.,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Renders accessible:
Any attribute in patterns. For example,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the
value of self.data["energy"] = [[-1234], [-3453], ...], to the
results from regex and postprocess. Note that the returned values
are lists of lists, because you can grep multiple items on one line.
"""
matches = regrep(self.filename, patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=postprocess)
for k in patterns.keys():
self.data[k] = [i[0] for i in matches.get(k, [])]
def read_table_pattern(self, header_pattern, row_pattern, footer_pattern,
postprocess=str, attribute_name=None,
last_one_only=True):
"""
Parse table-like data. A table composes of three parts: header,
main body, footer. All the data matches "row pattern" in the main body
will be returned.
Args:
header_pattern (str): The regular expression pattern matches the
table header. This pattern should match all the text
immediately before the main body of the table. For multiple
sections table match the text until the section of
interest. MULTILINE and DOTALL options are enforced, as a
result, the "." meta-character will also match "\n" in this
section.
row_pattern (str): The regular expression matches a single line in
the table. Capture interested field using regular expression
groups.
footer_pattern (str): The regular expression matches the end of the
table. E.g. a long dash line.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
attribute_name (str): Name of this table. If present the parsed data
will be attached to "data. e.g. self.data["efg"] = [...]
last_one_only (bool): All the tables will be parsed, if this option
is set to True, only the last table will be returned. The
enclosing list will be removed. i.e. Only a single table will
be returned. Default to be True.
Returns:
List of tables. 1) A table is a list of rows. 2) A row if either a list of
attribute values in case the the capturing group is defined without name in
row_pattern, or a dict in case that named capturing groups are defined by
row_pattern.
"""
with zopen(self.filename, 'rt') as f:
text = f.read()
table_pattern_text = header_pattern + r"\s*^(?P<table_body>(?:\s+" + \
row_pattern + r")+)\s+" + footer_pattern
table_pattern = re.compile(table_pattern_text, re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
tables = []
for mt in table_pattern.finditer(text):
table_body_text = mt.group("table_body")
table_contents = []
for line in table_body_text.split("\n"):
ml = rp.search(line)
d = ml.groupdict()
if len(d) > 0:
processed_line = {k: postprocess(v) for k, v in d.items()}
else:
processed_line = [postprocess(v) for v in ml.groups()]
table_contents.append(processed_line)
tables.append(table_contents)
if last_one_only:
retained_data = tables[-1]
else:
retained_data = tables
if attribute_name is not None:
self.data[attribute_name] = retained_data
return retained_data
def read_electrostatic_potential(self):
"""
Parses the eletrostatic potential for the last ionic step
"""
pattern = {"ngf": r"\s+dimension x,y,z NGXF=\s+([\.\-\d]+)\sNGYF=\s+([\.\-\d]+)\sNGZF=\s+([\.\-\d]+)"}
self.read_pattern(pattern, postprocess=int)
self.ngf = self.data.get("ngf",[[]])[0]
pattern = {"radii": r"the test charge radii are((?:\s+[\.\-\d]+)+)"}
self.read_pattern(pattern, reverse=True,terminate_on_match=True, postprocess=str)
self.sampling_radii = [float(f) for f in self.data["radii"][0][0].split()]
header_pattern = r"\(the norm of the test charge is\s+[\.\-\d]+\)"
table_pattern = r"((?:\s+\d+\s?[\.\-\d]+)+)"
footer_pattern = r"\s+E-fermi :"
pots = self.read_table_pattern(header_pattern, table_pattern, footer_pattern)
pots = "".join(itertools.chain.from_iterable(pots))
pots = re.findall(r"\s+\d+\s?([\.\-\d]+)+", pots)
pots = [float(f) for f in pots]
self.electrostatic_potential = pots
def read_freq_dielectric(self):
"""
Parses the frequency dependent dielectric function (obtained with
LOPTICS). Frequencies (in eV) are in self.frequencies, and dielectric
tensor function is given as self.dielectric_tensor_function.
"""
header_pattern = r"\s+frequency dependent\s+IMAGINARY " \
r"DIELECTRIC FUNCTION \(independent particle, " \
r"no local field effects\)(\sdensity-density)*$"
row_pattern = r"\s+".join([r"([\.\-\d]+)"] * 7)
lines = []
for l in reverse_readfile(self.filename):
lines.append(l)
if re.match(header_pattern, l):
break
freq = []
data = {"REAL": [], "IMAGINARY": []}
lines.reverse()
count = 0
component = "IMAGINARY"
for l in lines[3:]: # Skip the preamble.
if re.match(row_pattern, l.strip()):
toks = l.strip().split()
if component == "IMAGINARY":
freq.append(float(toks[0]))
xx, yy, zz, xy, yz, xz = [float(t) for t in toks[1:]]
matrix = [[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]]
data[component].append(matrix)
elif re.match(r"\s*-+\s*", l):
count += 1
if count == 1:
component = "REAL"
elif count == 2:
break
self.frequencies = np.array(freq)
self.dielectric_tensor_function = np.array(data["REAL"]) + \
1j * np.array(data["IMAGINARY"])
def read_chemical_shifts(self):
"""
Parse the NMR chemical shifts data. Only the second part "absolute, valence and core"
will be parsed. And only the three right most field (ISO_SHIFT, SPAN, SKEW) will be retrieved.
Returns:
List of chemical shifts in the order of atoms from the OUTCAR. Maryland notation is adopted.
"""
header_pattern = r"\s+CSA tensor \(J\. Mason, Solid State Nucl\. Magn\. Reson\. 2, " \
r"285 \(1993\)\)\s+" \
r"\s+-{50,}\s+" \
r"\s+EXCLUDING G=0 CONTRIBUTION\s+INCLUDING G=0 CONTRIBUTION\s+" \
r"\s+-{20,}\s+-{20,}\s+" \
r"\s+ATOM\s+ISO_SHIFT\s+SPAN\s+SKEW\s+ISO_SHIFT\s+SPAN\s+SKEW\s+" \
r"-{50,}\s*$"
first_part_pattern = r"\s+\(absolute, valence only\)\s+$"
swallon_valence_body_pattern = r".+?\(absolute, valence and core\)\s+$"
row_pattern = r"\d+(?:\s+[-]?\d+\.\d+){3}\s+" + r'\s+'.join(
[r"([-]?\d+\.\d+)"] * 3)
footer_pattern = r"-{50,}\s*$"
h1 = header_pattern + first_part_pattern
cs_valence_only = self.read_table_pattern(
h1, row_pattern, footer_pattern, postprocess=float,
last_one_only=True)
h2 = header_pattern + swallon_valence_body_pattern
cs_valence_and_core = self.read_table_pattern(
h2, row_pattern, footer_pattern, postprocess=float,
last_one_only=True)
all_cs = {}
for name, cs_table in [["valence_only", cs_valence_only],
["valence_and_core", cs_valence_and_core]]:
cs = []
for sigma_iso, omega, kappa in cs_table:
tensor = NMRChemicalShiftNotation.from_maryland_notation(sigma_iso, omega, kappa)
cs.append(tensor)
all_cs[name] = tuple(cs)
self.data["chemical_shifts"] = all_cs
def read_cs_g0_contribution(self):
"""
Parse the G0 contribution of NMR chemical shift.
Returns:
G0 contribution matrix as list of list.
"""
header_pattern = r'^\s+G\=0 CONTRIBUTION TO CHEMICAL SHIFT \(field along BDIR\)\s+$\n' \
r'^\s+-{50,}$\n' \
r'^\s+BDIR\s+X\s+Y\s+Z\s*$\n' \
r'^\s+-{50,}\s*$\n'
row_pattern = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 3)
footer_pattern = r'\s+-{50,}\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float,
last_one_only=True, attribute_name="cs_g0_contribution")
return self.data["cs_g0_contribution"]
def read_cs_core_contribution(self):
"""
Parse the core contribution of NMR chemical shift.
Returns:
G0 contribution matrix as list of list.
"""
header_pattern = r'^\s+Core NMR properties\s*$\n' \
r'\n' \
r'^\s+typ\s+El\s+Core shift \(ppm\)\s*$\n' \
r'^\s+-{20,}$\n'
row_pattern = r'\d+\s+(?P<element>[A-Z][a-z]?\w?)\s+(?P<shift>[-]?\d+\.\d+)'
footer_pattern = r'\s+-{20,}\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=str,
last_one_only=True, attribute_name="cs_core_contribution")
core_contrib = {d['element']: float(d['shift'])
for d in self.data["cs_core_contribution"]}
self.data["cs_core_contribution"] = core_contrib
return self.data["cs_core_contribution"]
def read_cs_raw_symmetrized_tensors(self):
"""
Parse the matrix form of NMR tensor before corrected to table.
Returns:
nsymmetrized tensors list in the order of atoms.
"""
header_pattern = r"\s+-{50,}\s+" \
r"\s+Absolute Chemical Shift tensors\s+" \
r"\s+-{50,}$"
first_part_pattern = r"\s+UNSYMMETRIZED TENSORS\s+$"
row_pattern = r"\s+".join([r"([-]?\d+\.\d+)"]*3)
unsym_footer_pattern = r"^\s+SYMMETRIZED TENSORS\s+$"
with zopen(self.filename, 'rt') as f:
text = f.read()
unsym_table_pattern_text = header_pattern + first_part_pattern + \
r"(?P<table_body>.+)" + unsym_footer_pattern
table_pattern = re.compile(unsym_table_pattern_text,
re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
m = table_pattern.search(text)
if m:
table_text = m.group("table_body")
micro_header_pattern = r"ion\s+\d+"
micro_table_pattern_text = micro_header_pattern + \
r"\s*^(?P<table_body>(?:\s*" + \
row_pattern + r")+)\s+"
micro_table_pattern = re.compile(micro_table_pattern_text,
re.MULTILINE | re.DOTALL)
unsym_tensors = []
for mt in micro_table_pattern.finditer(table_text):
table_body_text = mt.group("table_body")
tensor_matrix = []
for line in table_body_text.rstrip().split("\n"):
ml = rp.search(line)
processed_line = [float(v) for v in ml.groups()]
tensor_matrix.append(processed_line)
unsym_tensors.append(tensor_matrix)
self.data["unsym_cs_tensor"] = unsym_tensors
return unsym_tensors
else:
raise ValueError("NMR UNSYMMETRIZED TENSORS is not found")
def read_nmr_efg(self):
"""
Parse the NMR Electric Field Gradient tensors.
Returns:
Electric Field Gradient tensors as a list of dict in the order of atoms from OUTCAR.
Each dict key/value pair corresponds to a component of the tensors.
"""
header_pattern = r'^\s+NMR quadrupolar parameters\s+$\n' \
r'^\s+Cq : quadrupolar parameter\s+Cq=e[*]Q[*]V_zz/h$\n' \
r'^\s+eta: asymmetry parameters\s+\(V_yy - V_xx\)/ V_zz$\n' \
r'^\s+Q : nuclear electric quadrupole moment in mb \(millibarn\)$\n' \
r'^-{50,}$\n' \
r'^\s+ion\s+Cq\(MHz\)\s+eta\s+Q \(mb\)\s+$\n' \
r'^-{50,}\s*$\n'
row_pattern = r'\d+\s+(?P<cq>[-]?\d+\.\d+)\s+(?P<eta>[-]?\d+\.\d+)\s+' \
r'(?P<nuclear_quadrupole_moment>[-]?\d+\.\d+)'
footer_pattern = r'-{50,}\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float,
last_one_only=True, attribute_name="efg")
def read_elastic_tensor(self):
"""
Parse the elastic tensor data.
Returns:
6x6 array corresponding to the elastic tensor from the OUTCAR.
"""
header_pattern = r"TOTAL ELASTIC MODULI \(kBar\)\s+"\
r"Direction\s+([X-Z][X-Z]\s+)+"\
r"\-+"
row_pattern = r"[X-Z][X-Z]\s+"+r"\s+".join([r"(\-*[\.\d]+)"] * 6)
footer_pattern = r"\-+"
et_table = self.read_table_pattern(header_pattern, row_pattern,
footer_pattern, postprocess=float)
self.data["elastic_tensor"] = et_table
def read_piezo_tensor(self):
"""
Parse the piezo tensor data
"""
header_pattern = r"PIEZOELECTRIC TENSOR for field in x, y, " \
r"z\s+\(C/m\^2\)\s+([X-Z][X-Z]\s+)+\-+"
row_pattern = r"[x-z]\s+"+r"\s+".join([r"(\-*[\.\d]+)"] * 6)
footer_pattern = r"BORN EFFECTIVE"
pt_table = self.read_table_pattern(header_pattern, row_pattern,
footer_pattern, postprocess=float)
self.data["piezo_tensor"] = pt_table
def read_corrections(self, reverse=True, terminate_on_match=True):
patterns = {
"dipol_quadrupol_correction": r"dipol\+quadrupol energy "
r"correction\s+([\d\-\.]+)"
}
self.read_pattern(patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=float)
self.data["dipol_quadrupol_correction"] = self.data["dipol_quadrupol_correction"][0][0]
def read_neb(self, reverse=True, terminate_on_match=True):
"""
Reads NEB data. This only works with OUTCARs from both normal
VASP NEB calculations or from the CI NEB method implemented by
Henkelman et al.
Args:
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match. Defaults to True here since we usually
want only the final value.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern. Defaults to True here
since we usually want only the final value.
Renders accessible:
tangent_force - Final tangent force.
energy - Final energy.
These can be accessed under Outcar.data[key]
"""
patterns = {
"energy": r"energy\(sigma->0\)\s+=\s+([\d\-\.]+)",
"tangent_force": r"(NEB: projections on to tangent \(spring, REAL\)\s+\S+|tangential force \(eV/A\))\s+([\d\-\.]+)"
}
self.read_pattern(patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=str)
self.data["energy"] = float(self.data["energy"][0][0])
if self.data.get("tangent_force"):
self.data["tangent_force"] = float(
self.data["tangent_force"][0][1])
def read_igpar(self):
"""
Renders accessible:
er_ev = e<r>_ev (dictionary with Spin.up/Spin.down as keys)
er_bp = e<r>_bp (dictionary with Spin.up/Spin.down as keys)
er_ev_tot = spin up + spin down summed
er_bp_tot = spin up + spin down summed
p_elc = spin up + spin down summed
p_ion = spin up + spin down summed
(See VASP section "LBERRY, IGPAR, NPPSTR, DIPOL tags" for info on
what these are).
"""
# variables to be filled
self.er_ev = {} # will be dict (Spin.up/down) of array(3*float)
self.er_bp = {} # will be dics (Spin.up/down) of array(3*float)
self.er_ev_tot = None # will be array(3*float)
self.er_bp_tot = None # will be array(3*float)
self.p_elec = None
self.p_ion = None
try:
search = []
# Nonspin cases
def er_ev(results, match):
results.er_ev[Spin.up] = np.array(map(float,
match.groups()[1:4])) / 2
results.er_ev[Spin.down] = results.er_ev[Spin.up]
results.context = 2
search.append([r"^ *e<r>_ev=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, er_ev])
def er_bp(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(i))
for i in range(1, 4)]) / 2
results.er_bp[Spin.down] = results.er_bp[Spin.up]
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == 2, er_bp])
# Spin cases
def er_ev_up(results, match):
results.er_ev[Spin.up] = np.array([float(match.group(i))
for i in range(1, 4)])
results.context = Spin.up
search.append([r"^.*Spin component 1 *e<r>_ev=\( *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, er_ev_up])
def er_bp_up(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results,
line: results.context == Spin.up, er_bp_up])
def er_ev_dn(results, match):
results.er_ev[Spin.down] = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
results.context = Spin.down
search.append([r"^.*Spin component 2 *e<r>_ev=\( *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, er_ev_dn])
def er_bp_dn(results, match):
results.er_bp[Spin.down] = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results,
line: results.context == Spin.down, er_bp_dn])
# Always present spin/non-spin
def p_elc(results, match):
results.p_elc = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)", None, p_elc])
def p_ion(results, match):
results.p_ion = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^.*ionic dipole moment: "
r"*p\[ion\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)", None, p_ion])
self.context = None
self.er_ev = {Spin.up: None, Spin.down: None}
self.er_bp = {Spin.up: None, Spin.down: None}
micro_pyawk(self.filename, search, self)
if self.er_ev[Spin.up] is not None and \
self.er_ev[Spin.down] is not None:
self.er_ev_tot = self.er_ev[Spin.up] + self.er_ev[Spin.down]
if self.er_bp[Spin.up] is not None and \
self.er_bp[Spin.down] is not None:
self.er_bp_tot = self.er_bp[Spin.up] + self.er_bp[Spin.down]
except:
self.er_ev_tot = None
self.er_bp_tot = None
raise Exception("IGPAR OUTCAR could not be parsed.")
def read_lepsilon(self):
# variables to be filled
try:
search = []
def dielectric_section_start(results, match):
results.dielectric_index = -1
search.append([r"MACROSCOPIC STATIC DIELECTRIC TENSOR \(", None,
dielectric_section_start])
def dielectric_section_start2(results, match):
results.dielectric_index = 0
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_index == -1,
dielectric_section_start2])
def dielectric_data(results, match):
results.dielectric_tensor[results.dielectric_index, :] = \
np.array([float(match.group(i)) for i in range(1, 4)])
results.dielectric_index += 1
search.append(
[r"^ *([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) *$",
lambda results, line: results.dielectric_index >= 0
if results.dielectric_index is not None
else None,
dielectric_data])
def dielectric_section_stop(results, match):
results.dielectric_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_index >= 1
if results.dielectric_index is not None
else None,
dielectric_section_stop])
self.dielectric_index = None
self.dielectric_tensor = np.zeros((3, 3))
def piezo_section_start(results, match):
results.piezo_index = 0
search.append([r"PIEZOELECTRIC TENSOR for field in x, y, z "
r"\(C/m\^2\)",
None, piezo_section_start])
def piezo_data(results, match):
results.piezo_tensor[results.piezo_index, :] = \
np.array([float(match.group(i)) for i in range(1, 7)])
results.piezo_index += 1
search.append(
[r"^ *[xyz] +([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+) *([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+)*$",
lambda results, line: results.piezo_index >= 0
if results.piezo_index is not None
else None,
piezo_data])
def piezo_section_stop(results, match):
results.piezo_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.piezo_index >= 1
if results.piezo_index is not None
else None,
piezo_section_stop])
self.piezo_index = None
self.piezo_tensor = np.zeros((3, 6))
def born_section_start(results, match):
results.born_ion = -1
search.append([r"BORN EFFECTIVE CHARGES " +
r"\(in e, cummulative output\)",
None, born_section_start])
def born_ion(results, match):
results.born_ion = int(match.group(1)) - 1
results.born.append(np.zeros((3, 3)))
search.append([r"ion +([0-9]+)", lambda results,
line: results.born_ion is not None, born_ion])
def born_data(results, match):
results.born[results.born_ion][int(match.group(1)) - 1, :] = \
np.array([float(match.group(i)) for i in range(2, 5)])
search.append(
[r"^ *([1-3]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+)$",
lambda results, line: results.born_ion >= 0
if results.born_ion is not None
else results.born_ion,
born_data])
def born_section_stop(results, match):
results.born_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.born_ion >= 1
if results.born_ion is not None
else results.born_ion,
born_section_stop])
self.born_ion = None
self.born = []
micro_pyawk(self.filename, search, self)
self.born = np.array(self.born)
self.dielectric_tensor = self.dielectric_tensor.tolist()
self.piezo_tensor = self.piezo_tensor.tolist()
except:
raise Exception("LEPSILON OUTCAR could not be parsed.")
def read_lepsilon_ionic(self):
# variables to be filled
try:
search = []
def dielectric_section_start(results, match):
results.dielectric_ionic_index = -1
search.append([r"MACROSCOPIC STATIC DIELECTRIC TENSOR IONIC", None,
dielectric_section_start])
def dielectric_section_start2(results, match):
results.dielectric_ionic_index = 0
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_ionic_index == -1
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_section_start2])
def dielectric_data(results, match):
results.dielectric_ionic_tensor[results.dielectric_ionic_index, :] = \
np.array([float(match.group(i)) for i in range(1, 4)])
results.dielectric_ionic_index += 1
search.append(
[r"^ *([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) *$",
lambda results, line: results.dielectric_ionic_index >= 0
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_data])
def dielectric_section_stop(results, match):
results.dielectric_ionic_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_ionic_index >= 1
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_section_stop])
self.dielectric_ionic_index = None
self.dielectric_ionic_tensor = np.zeros((3, 3))
def piezo_section_start(results, match):
results.piezo_ionic_index = 0
search.append([r"PIEZOELECTRIC TENSOR IONIC CONTR for field in "
r"x, y, z ",
None, piezo_section_start])
def piezo_data(results, match):
results.piezo_ionic_tensor[results.piezo_ionic_index, :] = \
np.array([float(match.group(i)) for i in range(1, 7)])
results.piezo_ionic_index += 1
search.append(
[r"^ *[xyz] +([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+) *([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+)*$",
lambda results, line: results.piezo_ionic_index >= 0
if results.piezo_ionic_index is not None
else results.piezo_ionic_index,
piezo_data])
def piezo_section_stop(results, match):
results.piezo_ionic_index = None
search.append(
["-------------------------------------",
lambda results, line: results.piezo_ionic_index >= 1
if results.piezo_ionic_index is not None
else results.piezo_ionic_index,
piezo_section_stop])
self.piezo_ionic_index = None
self.piezo_ionic_tensor = np.zeros((3, 6))
micro_pyawk(self.filename, search, self)
self.dielectric_ionic_tensor = self.dielectric_ionic_tensor.tolist()
self.piezo_ionic_tensor = self.piezo_ionic_tensor.tolist()
except:
raise Exception(
"ionic part of LEPSILON OUTCAR could not be parsed.")
def read_lcalcpol(self):
# variables to be filled
self.p_elec = None
self.p_sp1 = None
self.p_sp2 = None
self.p_ion = None
try:
search = []
# Always present spin/non-spin
def p_elec(results, match):
results.p_elec = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, p_elec])
# If spin-polarized (and not noncollinear)
# save spin-polarized electronic values
if self.spin and not self.noncollinear:
def p_sp1(results, match):
results.p_sp1 = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*p\[sp1\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, p_sp1])
def p_sp2(results, match):
results.p_sp2 = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*p\[sp2\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, p_sp2])
def p_ion(results, match):
results.p_ion = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*Ionic dipole moment: *p\[ion\]="
r"\( *([-0-9.Ee+]*)"
r" *([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, p_ion])
micro_pyawk(self.filename, search, self)
except:
raise Exception("LCALCPOL OUTCAR could not be parsed.")
def read_pseudo_zval(self):
"""
Create pseudopotential ZVAL dictionary.
"""
try:
def poscar_line(results, match):
poscar_line = match.group(1)
results.poscar_line = re.findall(r'[A-Z][a-z]?', poscar_line)
def zvals(results, match):
zvals = match.group(1)
results.zvals = map(float, re.findall(r'-?\d+\.\d*', zvals))
search = []
search.append([r'^.*POSCAR.*=(.*)', None, poscar_line])
search.append([r'^\s+ZVAL.*=(.*)', None, zvals])
micro_pyawk(self.filename, search, self)
zval_dict = {}
for x,y in zip(self.poscar_line, self.zvals):
zval_dict.update({x:y})
self.zval_dict = zval_dict
# Clean-up
del(self.poscar_line)
del(self.zvals)
except:
raise Exception("ZVAL dict could not be parsed.")
def read_core_state_eigen(self):
"""
Read the core state eigenenergies at each ionic step.
Returns:
A list of dict over the atom such as [{"AO":[core state eig]}].
The core state eigenenergie list for each AO is over all ionic
step.
Example:
The core state eigenenergie of the 2s AO of the 6th atom of the
structure at the last ionic step is [5]["2s"][-1]
"""
with zopen(self.filename, "rt") as foutcar:
line = foutcar.readline()
while line != "":
line = foutcar.readline()
if "NIONS =" in line:
natom = int(line.split("NIONS =")[1])
cl = [defaultdict(list) for i in range(natom)]
if "the core state eigen" in line:
iat = -1
while line != "":
line = foutcar.readline()
# don't know number of lines to parse without knowing
# specific species, so stop parsing when we reach
# "E-fermi" instead
if "E-fermi" in line:
break
data = line.split()
# data will contain odd number of elements if it is
# the start of a new entry, or even number of elements
# if it continues the previous entry
if len(data) % 2 == 1:
iat += 1 # started parsing a new ion
data = data[1:] # remove element with ion number
for i in range(0, len(data), 2):
cl[iat][data[i]].append(float(data[i + 1]))
return cl
def read_avg_core_poten(self):
"""
Read the core potential at each ionic step.
Returns:
A list for each ionic step containing a list of the average core
potentials for each atom: [[avg core pot]].
Example:
The average core potential of the 2nd atom of the structure at the
last ionic step is: [-1][1]
"""
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a = iter(iterable)
return zip(a, a)
with zopen(self.filename, "rt") as foutcar:
line = foutcar.readline()
aps = []
while line != "":
line = foutcar.readline()
if "the norm of the test charge is" in line:
ap = []
while line != "":
line = foutcar.readline()
# don't know number of lines to parse without knowing
# specific species, so stop parsing when we reach
# "E-fermi" instead
if "E-fermi" in line:
aps.append(ap)
break
data = line.split()
# the average core potentials of up to 5 elements are
# given per line
for i, pot in pairwise(data):
ap.append(float(pot))
return aps
def as_dict(self):
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__, "efermi": self.efermi,
"run_stats": self.run_stats, "magnetization": self.magnetization,
"charge": self.charge, "total_magnetization": self.total_mag,
"nelect": self.nelect, "is_stopped": self.is_stopped,
"drift": self.drift, "ngf": self.ngf,
"sampling_radii": self.sampling_radii,
"electrostatic_potential": self.electrostatic_potential}
if self.lepsilon:
d.update({'piezo_tensor': self.piezo_tensor,
'piezo_ionic_tensor': self.piezo_ionic_tensor,
'dielectric_tensor': self.dielectric_tensor,
'dielectric_ionic_tensor': self.dielectric_ionic_tensor,
'born_ion': self.born_ion,
'born': self.born})
if self.lcalcpol:
d.update({'p_elec': self.p_elec,
'p_ion': self.p_ion})
if self.spin and not self.noncollinear:
d.update({'p_sp1': self.p_sp1,
'p_sp2': self.p_sp2})
d.update({'zval_dict': self.zval_dict})
return d
def read_fermi_contact_shift(self):
'''
output example:
Fermi contact (isotropic) hyperfine coupling parameter (MHz)
-------------------------------------------------------------
ion A_pw A_1PS A_1AE A_1c A_tot
-------------------------------------------------------------
1 -0.002 -0.002 -0.051 0.000 -0.052
2 -0.002 -0.002 -0.051 0.000 -0.052
3 0.056 0.056 0.321 -0.048 0.321
-------------------------------------------------------------
, which corresponds to
[[-0.002, -0.002, -0.051, 0.0, -0.052],
[-0.002, -0.002, -0.051, 0.0, -0.052],
[0.056, 0.056, 0.321, -0.048, 0.321]] from 'fch' data
'''
# Fermi contact (isotropic) hyperfine coupling parameter (MHz)
header_pattern1 = r"\s*Fermi contact \(isotropic\) hyperfine coupling parameter \(MHz\)\s+" \
r"\s*\-+" \
r"\s*ion\s+A_pw\s+A_1PS\s+A_1AE\s+A_1c\s+A_tot\s+" \
r"\s*\-+"
row_pattern1 = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 5)
footer_pattern = r"\-+"
fch_table = self.read_table_pattern(header_pattern1, row_pattern1,
footer_pattern, postprocess=float,
last_one_only=True)
# Dipolar hyperfine coupling parameters (MHz)
header_pattern2 = r"\s*Dipolar hyperfine coupling parameters \(MHz\)\s+" \
r"\s*\-+" \
r"\s*ion\s+A_xx\s+A_yy\s+A_zz\s+A_xy\s+A_xz\s+A_yz\s+" \
r"\s*\-+"
row_pattern2 = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 6)
dh_table = self.read_table_pattern(header_pattern2, row_pattern2,
footer_pattern, postprocess=float,
last_one_only=True)
# Total hyperfine coupling parameters after diagonalization (MHz)
header_pattern3 = r"\s*Total hyperfine coupling parameters after diagonalization \(MHz\)\s+" \
r"\s*\(convention: \|A_zz\| > \|A_xx\| > \|A_yy\|\)\s+" \
r"\s*\-+" \
r"\s*ion\s+A_xx\s+A_yy\s+A_zz\s+asymmetry \(A_yy - A_xx\)/ A_zz\s+" \
r"\s*\-+"
row_pattern3 = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 4)
th_table = self.read_table_pattern(header_pattern3, row_pattern3,
footer_pattern, postprocess=float,
last_one_only=True)
fc_shift_table = {'fch': fch_table, 'dh': dh_table, 'th': th_table}
self.data["fermi_contact_shift"] = fc_shift_table
class VolumetricData(object):
"""
Simple volumetric object for reading LOCPOT and CHGCAR type files.
.. attribute:: structure
Structure associated with the Volumetric Data object
..attribute:: is_spin_polarized
True if run is spin polarized
..attribute:: dim
Tuple of dimensions of volumetric grid in each direction (nx, ny, nz).
..attribute:: data
Actual data as a dict of {string: np.array}. The string are "total"
and "diff", in accordance to the output format of vasp LOCPOT and
CHGCAR files where the total spin density is written first, followed
by the difference spin density.
.. attribute:: ngridpts
Total number of grid points in volumetric data.
"""
def __init__(self, structure, data, distance_matrix=None, data_aug=None):
"""
Typically, this constructor is not used directly and the static
from_file constructor is used. This constructor is designed to allow
summation and other operations between VolumetricData objects.
Args:
structure: Structure associated with the volumetric data
data: Actual volumetric data.
data_aug: Any extra information associated with volumetric data
(typically augmentation charges)
distance_matrix: A pre-computed distance matrix if available.
Useful so pass distance_matrices between sums,
shortcircuiting an otherwise expensive operation.
"""
self.structure = structure
self.is_spin_polarized = len(data) >= 2
self.is_soc = len(data) >= 4
self.dim = data["total"].shape
self.data = data
self.data_aug = data_aug if data_aug else {}
self.ngridpts = self.dim[0] * self.dim[1] * self.dim[2]
# lazy init the spin data since this is not always needed.
self._spin_data = {}
self._distance_matrix = {} if not distance_matrix else distance_matrix
@property
def spin_data(self):
"""
The data decomposed into actual spin data as {spin: data}.
Essentially, this provides the actual Spin.up and Spin.down data
instead of the total and diff. Note that by definition, a
non-spin-polarized run would have Spin.up data == Spin.down data.
"""
if not self._spin_data:
spin_data = dict()
spin_data[Spin.up] = 0.5 * (self.data["total"] +
self.data.get("diff", 0))
spin_data[Spin.down] = 0.5 * (self.data["total"] -
self.data.get("diff", 0))
self._spin_data = spin_data
return self._spin_data
def get_axis_grid(self, ind):
"""
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
"""
ng = self.dim
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)]
def __add__(self, other):
return self.linear_add(other, 1.0)
def __sub__(self, other):
return self.linear_add(other, -1.0)
def linear_add(self, other, scale_factor=1.0):
"""
Method to do a linear sum of volumetric objects. Used by + and -
operators as well. Returns a VolumetricData object containing the
linear sum.
Args:
other (VolumetricData): Another VolumetricData object
scale_factor (float): Factor to scale the other data by.
Returns:
VolumetricData corresponding to self + scale_factor * other.
"""
if self.structure != other.structure:
raise ValueError("Adding or subtraction operations can only be "
"performed for volumetric data with the exact "
"same structure.")
# To add checks
data = {}
for k in self.data.keys():
data[k] = self.data[k] + scale_factor * other.data[k]
return VolumetricData(self.structure, data, self._distance_matrix)
@staticmethod
def parse_file(filename):
"""
Convenience method to parse a generic volumetric data file in the vasp
like format. Used by subclasses for parsing file.
Args:
filename (str): Path of file to parse
Returns:
(poscar, data)
"""
poscar_read = False
poscar_string = []
dataset = []
all_dataset = []
# for holding any strings in input that are not Poscar
# or VolumetricData (typically augmentation charges)
all_dataset_aug = {}
dim = None
dimline = None
read_dataset = False
ngrid_pts = 0
data_count = 0
poscar = None
with zopen(filename, "rt") as f:
for line in f:
original_line = line
line = line.strip()
if read_dataset:
toks = line.split()
for tok in toks:
if data_count < ngrid_pts:
# This complicated procedure is necessary because
# vasp outputs x as the fastest index, followed by y
# then z.
x = data_count % dim[0]
y = int(math.floor(data_count / dim[0])) % dim[1]
z = int(math.floor(data_count / dim[0] / dim[1]))
dataset[x, y, z] = float(tok)
data_count += 1
if data_count >= ngrid_pts:
read_dataset = False
data_count = 0
all_dataset.append(dataset)
elif not poscar_read:
if line != "" or len(poscar_string) == 0:
poscar_string.append(line)
elif line == "":
poscar = Poscar.from_string("\n".join(poscar_string))
poscar_read = True
elif not dim:
dim = [int(i) for i in line.split()]
ngrid_pts = dim[0] * dim[1] * dim[2]
dimline = line
read_dataset = True
dataset = np.zeros(dim)
elif line == dimline:
# when line == dimline, expect volumetric data to follow
# so set read_dataset to True
read_dataset = True
dataset = np.zeros(dim)
else:
# store any extra lines that were not part of the
# volumetric data so we know which set of data the extra
# lines are associated with
key = len(all_dataset) - 1
if key not in all_dataset_aug:
all_dataset_aug[key] = []
all_dataset_aug[key].append(original_line)
if len(all_dataset) == 4:
data = {"total": all_dataset[0], "diff_x": all_dataset[1],
"diff_y": all_dataset[2], "diff_z": all_dataset[3]}
data_aug = {"total": all_dataset_aug.get(0, None),
"diff_x": all_dataset_aug.get(1, None),
"diff_y": all_dataset_aug.get(2, None),
"diff_z": all_dataset_aug.get(3, None)}
# construct a "diff" dict for scalar-like magnetization density,
# referenced to an arbitrary direction (using same method as
# pymatgen.electronic_structure.core.Magmom, see
# Magmom documentation for justification for this)
# TODO: re-examine this, and also similar behavior in
# Magmom - @mkhorton
# TODO: does CHGCAR change with different SAXIS?
diff_xyz = np.array([data["diff_x"], data["diff_y"],
data["diff_z"]])
diff_xyz = diff_xyz.reshape((3, dim[0]*dim[1]*dim[2]))
ref_direction = np.array([1.01, 1.02, 1.03])
ref_sign = np.sign(np.dot(ref_direction, diff_xyz))
diff = np.multiply(np.linalg.norm(diff_xyz, axis=0), ref_sign)
data["diff"] = diff.reshape((dim[0], dim[1], dim[2]))
elif len(all_dataset) == 2:
data = {"total": all_dataset[0], "diff": all_dataset[1]}
data_aug = {"total": all_dataset_aug.get(0, None),
"diff": all_dataset_aug.get(1, None)}
else:
data = {"total": all_dataset[0]}
data_aug = {"total": all_dataset_aug.get(0, None)}
return poscar, data, data_aug
def write_file(self, file_name, vasp4_compatible=False):
"""
Write the VolumetricData object to a vasp compatible file.
Args:
file_name (str): Path to a file
vasp4_compatible (bool): True if the format is vasp4 compatible
"""
def _print_fortran_float(f):
"""
Fortran codes print floats with a leading zero in scientific
notation. When writing CHGCAR files, we adopt this convention
to ensure written CHGCAR files are byte-to-byte identical to
their input files as far as possible.
:param f: float
:return: str
"""
s = "{:.10E}".format(f)
if f > 0:
return "0."+s[0]+s[2:12]+'E'+"{:+03}".format(int(s[13:])+1)
else:
return "-."+s[1]+s[3:13]+'E'+"{:+03}".format(int(s[14:])+1)
with zopen(file_name, "wt") as f:
p = Poscar(self.structure)
# use original name if it's been set (e.g. from Chgcar)
comment = getattr(self, 'name', p.comment)
lines = comment + "\n"
lines += " 1.00000000000000\n"
latt = self.structure.lattice.matrix
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[0, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[1, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[2, :])
if not vasp4_compatible:
lines += "".join(["%5s" % s for s in p.site_symbols]) + "\n"
lines += "".join(["%6d" % x for x in p.natoms]) + "\n"
lines += "Direct\n"
for site in self.structure:
lines += "%10.6f%10.6f%10.6f\n" % tuple(site.frac_coords)
lines += " \n"
f.write(lines)
a = self.dim
def write_spin(data_type):
lines = []
count = 0
f.write(" {} {} {}\n".format(a[0], a[1], a[2]))
for (k, j, i) in itertools.product(list(range(a[2])),
list(range(a[1])),
list(range(a[0]))):
lines.append(_print_fortran_float(self.data[data_type][i, j, k]))
count += 1
if count % 5 == 0:
f.write(" " + "".join(lines) + "\n")
lines = []
else:
lines.append(" ")
f.write(" " + "".join(lines) + " \n")
f.write("".join(self.data_aug.get(data_type, [])))
write_spin("total")
if self.is_spin_polarized and self.is_soc:
write_spin("diff_x")
write_spin("diff_y")
write_spin("diff_z")
elif self.is_spin_polarized:
write_spin("diff")
def get_integrated_diff(self, ind, radius, nbins=1):
"""
Get integrated difference of atom index ind up to radius. This can be
an extremely computationally intensive process, depending on how many
grid points are in the VolumetricData.
Args:
ind (int): Index of atom.
radius (float): Radius of integration.
nbins (int): Number of bins. Defaults to 1. This allows one to
obtain the charge integration up to a list of the cumulative
charge integration values for radii for [radius/nbins,
2 * radius/nbins, ....].
Returns:
Differential integrated charge as a np array of [[radius, value],
...]. Format is for ease of plotting. E.g., plt.plot(data[:,0],
data[:,1])
"""
# For non-spin-polarized runs, this is zero by definition.
if not self.is_spin_polarized:
radii = [radius / nbins * (i + 1) for i in range(nbins)]
data = np.zeros((nbins, 2))
data[:, 0] = radii
return data
struct = self.structure
a = self.dim
if ind not in self._distance_matrix or\
self._distance_matrix[ind]["max_radius"] < radius:
coords = []
for (x, y, z) in itertools.product(*[list(range(i)) for i in a]):
coords.append([x / a[0], y / a[1], z / a[2]])
sites_dist = struct.lattice.get_points_in_sphere(
coords, struct[ind].coords, radius)
self._distance_matrix[ind] = {"max_radius": radius,
"data": np.array(sites_dist)}
data = self._distance_matrix[ind]["data"]
# Use boolean indexing to find all charges within the desired distance.
inds = data[:, 1] <= radius
dists = data[inds, 1]
data_inds = np.rint(np.mod(list(data[inds, 0]), 1) *
np.tile(a, (len(dists), 1))).astype(int)
vals = [self.data["diff"][x, y, z] for x, y, z in data_inds]
hist, edges = np.histogram(dists, bins=nbins,
range=[0, radius],
weights=vals)
data = np.zeros((nbins, 2))
data[:, 0] = edges[1:]
data[:, 1] = [sum(hist[0:i + 1]) / self.ngridpts
for i in range(nbins)]
return data
def get_average_along_axis(self, ind):
"""
Get the averaged total of the volumetric data a certain axis direction.
For example, useful for visualizing Hartree Potentials from a LOCPOT
file.
Args:
ind (int): Index of axis.
Returns:
Average total along axis
"""
m = self.data["total"]
ng = self.dim
if ind == 0:
total = np.sum(np.sum(m, axis=1), 1)
elif ind == 1:
total = np.sum(np.sum(m, axis=0), 1)
else:
total = np.sum(np.sum(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
def to_hdf5(self, filename):
"""
Writes the VolumetricData to a HDF5 format, which is a highly optimized
format for reading storing large data. The mapping of the VolumetricData
to this file format is as follows:
VolumetricData.data -> f["vdata"]
VolumetricData.structure ->
f["Z"]: Sequence of atomic numbers
f["fcoords"]: Fractional coords
f["lattice"]: Lattice in the pymatgen.core.lattice.Lattice matrix
format
f.attrs["structure_json"]: String of json representation
Args:
filename (str): Filename to output to.
"""
import h5py
with h5py.File(filename, "w") as f:
ds = f.create_dataset("lattice", (3, 3), dtype='float')
ds[...] = self.structure.lattice.matrix
ds = f.create_dataset("Z", (len(self.structure.species), ),
dtype="i")
ds[...] = np.array([sp.Z for sp in self.structure.species])
ds = f.create_dataset("fcoords", self.structure.frac_coords.shape,
dtype='float')
ds[...] = self.structure.frac_coords
dt = h5py.special_dtype(vlen=str)
ds = f.create_dataset("species", (len(self.structure.species), ),
dtype=dt)
ds[...] = [str(sp) for sp in self.structure.species]
grp = f.create_group("vdata")
for k, v in self.data.items():
ds = grp.create_dataset(k, self.data[k].shape, dtype='float')
ds[...] = self.data[k]
f.attrs["name"] = self.name
f.attrs["structure_json"] = json.dumps(self.structure.as_dict())
@classmethod
def from_hdf5(cls, filename):
import h5py
with h5py.File(filename, "r") as f:
data = {k: np.array(v) for k, v in f["vdata"].items()}
structure = Structure.from_dict(json.loads(f.attrs["structure_json"]))
return VolumetricData(structure, data)
class Locpot(VolumetricData):
"""
Simple object for reading a LOCPOT file.
Args:
poscar (Poscar): Poscar object containing structure.
data: Actual data.
"""
def __init__(self, poscar, data):
super(Locpot, self).__init__(poscar.structure, data)
self.name = poscar.comment
@staticmethod
def from_file(filename):
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return Locpot(poscar, data)
class Chgcar(VolumetricData):
"""
Simple object for reading a CHGCAR file.
Args:
poscar (Poscar): Poscar object containing structure.
data: Actual data.
"""
def __init__(self, poscar, data, data_aug=None):
super(Chgcar, self).__init__(poscar.structure, data, data_aug=data_aug)
self.poscar = poscar
self.name = poscar.comment
self._distance_matrix = {}
@staticmethod
def from_file(filename):
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return Chgcar(poscar, data, data_aug=data_aug)
@property
def net_magnetization(self):
if self.is_spin_polarized:
return np.sum(self.data['diff'])
else:
return None
class Procar(object):
"""
Object for reading a PROCAR file.
Args:
filename: Name of file containing PROCAR.
.. attribute:: data
The PROCAR data of the form below. It should VASP uses 1-based indexing,
but all indices are converted to 0-based here.::
{
spin: nd.array accessed with (k-point index, band index,
ion index, orbital index)
}
.. attribute:: weights
The weights associated with each k-point as an nd.array of lenght
nkpoints.
..attribute:: phase_factors
Phase factors, where present (e.g. LORBIT = 12). A dict of the form:
{
spin: complex nd.array accessed with (k-point index, band index,
ion index, orbital index)
}
..attribute:: nbands
Number of bands
..attribute:: nkpoints
Number of k-points
..attribute:: nions
Number of ions
"""
def __init__(self, filename):
headers = None
with zopen(filename, "rt") as f:
preambleexpr = re.compile(
r"# of k-points:\s*(\d+)\s+# of bands:\s*(\d+)\s+# of "
r"ions:\s*(\d+)")
kpointexpr = re.compile(r"^k-point\s+(\d+).*weight = ([0-9\.]+)")
bandexpr = re.compile(r"^band\s+(\d+)")
ionexpr = re.compile(r"^ion.*")
expr = re.compile(r"^([0-9]+)\s+")
current_kpoint = 0
current_band = 0
done = False
spin = Spin.down
for l in f:
l = l.strip()
if bandexpr.match(l):
m = bandexpr.match(l)
current_band = int(m.group(1)) - 1
done = False
elif kpointexpr.match(l):
m = kpointexpr.match(l)
current_kpoint = int(m.group(1)) - 1
weights[current_kpoint] = float(m.group(2))
if current_kpoint == 0:
spin = Spin.up if spin == Spin.down else Spin.down
done = False
elif headers is None and ionexpr.match(l):
headers = l.split()
headers.pop(0)
headers.pop(-1)
def f():
return np.zeros((nkpoints, nbands, nions, len(headers)))
data = defaultdict(f)
def f2():
return np.full((nkpoints, nbands, nions, len(headers)),
np.NaN, dtype=np.complex128)
phase_factors = defaultdict(f2)
elif expr.match(l):
toks = l.split()
index = int(toks.pop(0)) - 1
num_data = np.array([float(t)
for t in toks[:len(headers)]])
if not done:
data[spin][current_kpoint, current_band,
index, :] = num_data
else:
if np.isnan(phase_factors[spin][
current_kpoint, current_band, index, 0]):
phase_factors[spin][current_kpoint, current_band,
index, :] = num_data
else:
phase_factors[spin][current_kpoint, current_band,
index, :] += 1j * num_data
elif l.startswith("tot"):
done = True
elif preambleexpr.match(l):
m = preambleexpr.match(l)
nkpoints = int(m.group(1))
nbands = int(m.group(2))
nions = int(m.group(3))
weights = np.zeros(nkpoints)
self.nkpoints = nkpoints
self.nbands = nbands
self.nions = nions
self.weights = weights
self.orbitals = headers
self.data = data
self.phase_factors = phase_factors
def get_projection_on_elements(self, structure):
"""
Method returning a dictionary of projections on elements.
Args:
structure (Structure): Input structure.
Returns:
a dictionary in the {Spin.up:[k index][b index][{Element:values}]]
"""
dico = {}
for spin in self.data.keys():
dico[spin] = [[defaultdict(float)
for i in range(self.nkpoints)]
for j in range(self.nbands)]
for iat in range(self.nions):
name = structure.species[iat].symbol
for spin, d in self.data.items():
for k, b in itertools.product(range(self.nkpoints),
range(self.nbands)):
dico[spin][b][k][name] = np.sum(d[k, b, iat, :])
return dico
def get_occupation(self, atom_index, orbital):
"""
Returns the occupation for a particular orbital of a particular atom.
Args:
atom_num (int): Index of atom in the PROCAR. It should be noted
that VASP uses 1-based indexing for atoms, but this is
converted to 0-based indexing in this parser to be
consistent with representation of structures in pymatgen.
orbital (str): An orbital. If it is a single character, e.g., s,
p, d or f, the sum of all s-type, p-type, d-type or f-type
orbitals occupations are returned respectively. If it is a
specific orbital, e.g., px, dxy, etc., only the occupation
of that orbital is returned.
Returns:
Sum occupation of orbital of atom.
"""
orbital_index = self.orbitals.index(orbital)
return {spin: np.sum(d[:, :, atom_index, orbital_index] * self.weights[:, None])
for spin, d in self.data.items()}
class Oszicar(object):
"""
A basic parser for an OSZICAR output from VASP. In general, while the
OSZICAR is useful for a quick look at the output from a VASP run, we
recommend that you use the Vasprun parser instead, which gives far richer
information about a run.
Args:
filename (str): Filename of file to parse
.. attribute:: electronic_steps
All electronic steps as a list of list of dict. e.g.,
[[{"rms": 160.0, "E": 4507.24605593, "dE": 4507.2, "N": 1,
"deps": -17777.0, "ncg": 16576}, ...], [....]
where electronic_steps[index] refers the list of electronic steps
in one ionic_step, electronic_steps[index][subindex] refers to a
particular electronic step at subindex in ionic step at index. The
dict of properties depends on the type of VASP run, but in general,
"E", "dE" and "rms" should be present in almost all runs.
.. attribute:: ionic_steps:
All ionic_steps as a list of dict, e.g.,
[{"dE": -526.36, "E0": -526.36024, "mag": 0.0, "F": -526.36024},
...]
This is the typical output from VASP at the end of each ionic step.
"""
def __init__(self, filename):
electronic_steps = []
ionic_steps = []
ionic_pattern = re.compile(r"(\d+)\s+F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"d\s*E\s*=\s*([\d\-\.E\+]+)$")
ionic_mag_pattern = re.compile(r"(\d+)\s+F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"d\s*E\s*=\s*([\d\-\.E\+]+)\s+"
r"mag=\s*([\d\-\.E\+]+)")
ionic_MD_pattern = re.compile(r"(\d+)\s+T=\s*([\d\-\.E\+]+)\s+"
r"E=\s*([\d\-\.E\+]+)\s+"
r"F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"EK=\s*([\d\-\.E\+]+)\s+"
r"SP=\s*([\d\-\.E\+]+)\s+"
r"SK=\s*([\d\-\.E\+]+)")
electronic_pattern = re.compile(r"\s*\w+\s*:(.*)")
def smart_convert(header, num):
try:
if header == "N" or header == "ncg":
v = int(num)
return v
v = float(num)
return v
except ValueError:
return "--"
header = []
with zopen(filename, "rt") as fid:
for line in fid:
line = line.strip()
m = electronic_pattern.match(line)
if m:
toks = m.group(1).split()
data = {header[i]: smart_convert(header[i], toks[i])
for i in range(len(toks))}
if toks[0] == "1":
electronic_steps.append([data])
else:
electronic_steps[-1].append(data)
elif ionic_pattern.match(line.strip()):
m = ionic_pattern.match(line.strip())
ionic_steps.append({"F": float(m.group(2)),
"E0": float(m.group(3)),
"dE": float(m.group(4))})
elif ionic_mag_pattern.match(line.strip()):
m = ionic_mag_pattern.match(line.strip())
ionic_steps.append({"F": float(m.group(2)),
"E0": float(m.group(3)),
"dE": float(m.group(4)),
"mag": float(m.group(5))})
elif ionic_MD_pattern.match(line.strip()):
m = ionic_MD_pattern.match(line.strip())
ionic_steps.append({"T": float(m.group(2)),
"E": float(m.group(3)),
"F": float(m.group(4)),
"E0": float(m.group(5)),
"EK": float(m.group(6)),
"SP": float(m.group(7)),
"SK": float(m.group(8))})
elif re.match(r"^\s*N\s+E\s*", line):
header = line.strip().replace("d eps", "deps").split()
self.electronic_steps = electronic_steps
self.ionic_steps = ionic_steps
@property
def all_energies(self):
"""
Compilation of all energies from all electronic steps and ionic steps
as a tuple of list of energies, e.g.,
((4507.24605593, 143.824705755, -512.073149912, ...), ...)
"""
all_energies = []
for i in range(len(self.electronic_steps)):
energies = [step["E"] for step in self.electronic_steps[i]]
energies.append(self.ionic_steps[i]["F"])
all_energies.append(tuple(energies))
return tuple(all_energies)
@property
@unitized("eV")
def final_energy(self):
"""
Final energy from run.
"""
return self.ionic_steps[-1]["E0"]
def as_dict(self):
return {"electronic_steps": self.electronic_steps,
"ionic_steps": self.ionic_steps}
class VaspParserError(Exception):
"""
Exception class for VASP parsing.
"""
pass
def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None,
projections=False):
"""
This method is used to get band structure info from a VASP directory. It
takes into account that the run can be divided in several branches named
"branch_x". If the run has not been divided in branches the method will
turn to parsing vasprun.xml directly.
The method returns None is there"s a parsing error
Args:
dir_name: Directory containing all bandstructure runs.
efermi: Efermi for bandstructure.
projections: True if you want to get the data on site projections if
any. Note that this is sometimes very large
Returns:
A BandStructure Object
"""
# TODO: Add better error handling!!!
if os.path.exists(os.path.join(dir_name, "branch_0")):
# get all branch dir names
branch_dir_names = [os.path.abspath(d)
for d in glob.glob("{i}/branch_*"
.format(i=dir_name))
if os.path.isdir(d)]
# sort by the directory name (e.g, branch_10)
sort_by = lambda x: int(x.split("_")[-1])
sorted_branch_dir_names = sorted(branch_dir_names, key=sort_by)
# populate branches with Bandstructure instances
branches = []
for dir_name in sorted_branch_dir_names:
xml_file = os.path.join(dir_name, "vasprun.xml")
if os.path.exists(xml_file):
run = Vasprun(xml_file, parse_projected_eigen=projections)
branches.append(run.get_band_structure(efermi=efermi))
else:
# It might be better to throw an exception
warnings.warn("Skipping {}. Unable to find {}"
.format(d=dir_name, f=xml_file))
return get_reconstructed_band_structure(branches, efermi)
else:
xml_file = os.path.join(dir_name, "vasprun.xml")
# Better handling of Errors
if os.path.exists(xml_file):
return Vasprun(xml_file, parse_projected_eigen=projections)\
.get_band_structure(kpoints_filename=None, efermi=efermi)
else:
return None
class Xdatcar(object):
"""
Class representing an XDATCAR file. Only tested with VASP 5.x files.
.. attribute:: structures
List of structures parsed from XDATCAR.
.. attribute:: comment
Optional comment string.
Authors: Ram Balachandran
"""
def __init__(self, filename, ionicstep_start=1,
ionicstep_end=None, comment=None):
"""
Init a Xdatcar.
Args:
filename (str): Filename of input XDATCAR file.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
"""
preamble = None
coords_str = []
structures = []
preamble_done = False
if (ionicstep_start < 1):
raise Exception('Start ionic step cannot be less than 1')
if (ionicstep_end is not None and
ionicstep_start < 1):
raise Exception('End ionic step cannot be less than 1')
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.append(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.append(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if (ionicstep_cnt >= ionicstep_start):
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
ionicstep_cnt += 1
coords_str = []
else:
coords_str.append(l)
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
self.structures = structures
self.comment = comment or self.structures[0].formula
@property
def site_symbols(self):
"""
Sequence of symbols associated with the Xdatcar. Similar to 6th line in
vasp 5+ Xdatcar.
"""
syms = [site.specie.symbol for site in self.structures[0]]
return [a[0] for a in itertools.groupby(syms)]
@property
def natoms(self):
"""
Sequence of number of sites of each type associated with the Poscar.
Similar to 7th line in vasp 5+ Xdatcar.
"""
syms = [site.specie.symbol for site in self.structures[0]]
return [len(tuple(a[1])) for a in itertools.groupby(syms)]
def concatenate(self, filename, ionicstep_start=1,
ionicstep_end=None):
"""
Concatenate structures in file to Xdatcar.
Args:
filename (str): Filename of XDATCAR file to be concatenated.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
TODO(rambalachandran):
Requires a check to ensure if the new concatenating file has the
same lattice structure and atoms as the Xdatcar class.
"""
preamble = None
coords_str = []
structures = self.structures
preamble_done = False
if ionicstep_start < 1:
raise Exception('Start ionic step cannot be less than 1')
if (ionicstep_end is not None and
ionicstep_start < 1):
raise Exception('End ionic step cannot be less than 1')
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.append(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.append(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if (ionicstep_cnt >= ionicstep_start):
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
ionicstep_cnt += 1
coords_str = []
else:
coords_str.append(l)
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
self.structures = structures
def get_string(self, ionicstep_start=1,
ionicstep_end=None,
significant_figures=8):
"""
Write Xdatcar class into a file
Args:
filename (str): Filename of output XDATCAR file.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
"""
from pymatgen.io.vasp import Poscar
if (ionicstep_start < 1):
raise Exception('Start ionic step cannot be less than 1')
if (ionicstep_end is not None and
ionicstep_start < 1):
raise Exception('End ionic step cannot be less than 1')
latt = self.structures[0].lattice
if np.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
lines = [self.comment, "1.0", str(latt)]
lines.append(" ".join(self.site_symbols))
lines.append(" ".join([str(x) for x in self.natoms]))
format_str = "{{:.{0}f}}".format(significant_figures)
ionicstep_cnt = 1
output_cnt = 1
for cnt, structure in enumerate(self.structures):
ionicstep_cnt = cnt + 1
if ionicstep_end is None:
if (ionicstep_cnt >= ionicstep_start):
lines.append("Direct configuration="+
' '*(7-len(str(output_cnt)))+str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.append(line)
output_cnt += 1
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
lines.append("Direct configuration="+
' '*(7-len(str(output_cnt)))+str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.append(line)
output_cnt += 1
return "\n".join(lines) + "\n"
def write_file(self, filename, **kwargs):
"""
Write Xdatcar class into a file.
Args:
filename (str): Filename of output XDATCAR file.
The supported kwargs are the same as those for the
Xdatcar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def __str__(self):
return self.get_string()
class Dynmat(object):
"""
Object for reading a DYNMAT file.
Args:
filename: Name of file containing DYNMAT.
.. attribute:: data
A nested dict containing the DYNMAT data of the form::
[atom <int>][disp <int>]['dispvec'] =
displacement vector (part of first line in dynmat block, e.g. "0.01 0 0")
[atom <int>][disp <int>]['dynmat'] =
<list> list of dynmat lines for this atom and this displacement
Authors: Patrick Huck
"""
def __init__(self, filename):
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
self._nspecs, self._natoms, self._ndisps = map(int, lines[
0].split())
self._masses = map(float, lines[1].split())
self.data = defaultdict(dict)
atom, disp = None, None
for i, l in enumerate(lines[2:]):
v = list(map(float, l.split()))
if not i % (self._natoms + 1):
atom, disp = map(int, v[:2])
if atom not in self.data:
self.data[atom] = {}
if disp not in self.data[atom]:
self.data[atom][disp] = {}
self.data[atom][disp]['dispvec'] = v[2:]
else:
if 'dynmat' not in self.data[atom][disp]:
self.data[atom][disp]['dynmat'] = []
self.data[atom][disp]['dynmat'].append(v)
def get_phonon_frequencies(self):
"""calculate phonon frequencies"""
# TODO: the following is most likely not correct or suboptimal
# hence for demonstration purposes only
frequencies = []
for k, v0 in self.data.iteritems():
for v1 in v0.itervalues():
vec = map(abs, v1['dynmat'][k - 1])
frequency = math.sqrt(sum(vec)) * 2. * \
math.pi * 15.633302 # THz
frequencies.append(frequency)
return frequencies
@property
def nspecs(self):
"""returns the number of species"""
return self._nspecs
@property
def natoms(self):
"""returns the number of atoms"""
return self._natoms
@property
def ndisps(self):
"""returns the number of displacements"""
return self._ndisps
@property
def masses(self):
"""returns the list of atomic masses"""
return list(self._masses)
def get_adjusted_fermi_level(efermi, cbm, band_structure):
"""
When running a band structure computations the fermi level needs to be
take from the static run that gave the charge density used for the non-self
consistent band structure run. Sometimes this fermi level is however a
little too low because of the mismatch between the uniform grid used in
the static run and the band structure k-points (e.g., the VBM is on Gamma
and the Gamma point is not in the uniform mesh). Here we use a procedure
consisting in looking for energy levels higher than the static fermi level
(but lower than the LUMO) if any of these levels make the band structure
appears insulating and not metallic anymore, we keep this adjusted fermi
level. This procedure has shown to detect correctly most insulators.
Args:
efermi (float): Fermi energy of the static run
cbm (float): Conduction band minimum of the static run
run_bandstructure: a band_structure object
Returns:
a new adjusted fermi level
"""
# make a working copy of band_structure
bs_working = BandStructureSymmLine.from_dict(band_structure.as_dict())
if bs_working.is_metal():
e = efermi
while e < cbm:
e += 0.01
bs_working._efermi = e
if not bs_working.is_metal():
return e
return efermi
class Wavecar:
"""
This is a class that contains the (pseudo-) wavefunctions from VASP.
Coefficients are read from the given WAVECAR file and the corresponding
G-vectors are generated using the algorithm developed in WaveTrans (see
acknowledgments below). To understand how the wavefunctions are evaluated,
please see the evaluate_wavefunc docstring.
It should be noted that the pseudopotential augmentation is not included in
the WAVECAR file. As a result, some caution should be exercised when
deriving value from this information.
The usefulness of this class is to allow the user to do projections or band
unfolding style manipulations of the wavefunction. An example of this can
be seen in the work of Shen et al. 2017
(https://doi.org/10.1103/PhysRevMaterials.1.065001).
.. attribute:: filename
String of the input file (usually WAVECAR)
.. attribute:: nk
Number of k-points from the WAVECAR
.. attribute:: nb
Number of bands per k-point
.. attribute:: encut
Energy cutoff (used to define G_{cut})
.. attribute:: efermi
Fermi energy
.. attribute:: a
Primitive lattice vectors of the cell (e.g. a_1 = self.a[0, :])
.. attribute:: b
Reciprocal lattice vectors of the cell (e.g. b_1 = self.b[0, :])
.. attribute:: vol
The volume of the unit cell in real space
.. attribute:: kpoints
The list of k-points read from the WAVECAR file
.. attribute:: band_energy
The list of band eigenenergies (and corresponding occupancies) for
each kpoint, where the first index corresponds to the index of the
k-point (e.g. self.band_energy[kp])
.. attribute:: Gpoints
The list of generated G-points for each k-point (a double list), which
are used with the coefficients for each k-point and band to recreate
the wavefunction (e.g. self.Gpoints[kp] is the list of G-points for
k-point kp). The G-points depend on the k-point and reciprocal lattice
and therefore are identical for each band at the same k-point. Each
G-point is represented by integer multipliers (e.g. assuming
Gpoints[kp][n] == [n_1, n_2, n_3], then
G_n = n_1*b_1 + n_2*b_2 + n_3*b_3)
.. attribute:: coeffs
The list of coefficients for each k-point and band for reconstructing
the wavefunction. The first index corresponds to the kpoint and the
second corresponds to the band (e.g. self.coeffs[kp][b] corresponds
to k-point kp and band b).
Acknowledgments:
This code is based upon the Fortran program, WaveTrans, written by
R. M. Feenstra and M. Widom from the Dept. of Physics at Carnegie
Mellon University. To see the original work, please visit:
https://www.andrew.cmu.edu/user/feenstra/wavetrans/
Author: Mark Turiansky
"""
def __init__(self, filename='WAVECAR', verbose=False, precision='normal'):
"""
Information is extracted from the given WAVECAR
Args:
filename (str): input file (default: WAVECAR)
verbose (bool): determines whether processing information is shown
precision (str): determines how fine the fft mesh is (normal or
accurate), only the first letter matters
"""
self.filename = filename
# c = 0.26246582250210965422
# 2m/hbar^2 in agreement with VASP
self._C = 0.262465831
with open(self.filename, 'rb') as f:
# read the header information
recl, spin, rtag = np.fromfile(f, dtype=np.float64, count=3) \
.astype(np.int)
if verbose:
print('recl={}, spin={}, rtag={}'.format(recl, spin, rtag))
recl8 = int(recl/8)
# check that ISPIN wasn't set to 2
if spin == 2:
raise ValueError('spin polarization not currently supported')
# check to make sure we have precision correct
if rtag != 45200 and rtag != 45210:
raise ValueError('invalid rtag of {}'.format(rtag))
# padding
np.fromfile(f, dtype=np.float64, count=(recl8-3))
# extract kpoint, bands, energy, and lattice information
self.nk, self.nb, self.encut = np.fromfile(f, dtype=np.float64,
count=3).astype(np.int)
self.a = np.fromfile(f, dtype=np.float64, count=9).reshape((3, 3))
self.efermi = np.fromfile(f, dtype=np.float64, count=1)[0]
if verbose:
print('kpoints = {}, bands = {}, energy cutoff = {}, fermi '
'energy= {:.04f}\n'.format(self.nk, self.nb, self.encut,
self.efermi))
print('primitive lattice vectors = \n{}'.format(self.a))
self.vol = np.dot(self.a[0, :],
np.cross(self.a[1, :], self.a[2, :]))
if verbose:
print('volume = {}\n'.format(self.vol))
# calculate reciprocal lattice
b = np.array([np.cross(self.a[1, :], self.a[2, :]),
np.cross(self.a[2, :], self.a[0, :]),
np.cross(self.a[0, :], self.a[1, :])])
b = 2*np.pi*b/self.vol
self.b = b
if verbose:
print('reciprocal lattice vectors = \n{}'.format(b))
print('reciprocal lattice vector magnitudes = \n{}\n'
.format(np.linalg.norm(b, axis=1)))
# calculate maximum number of b vectors in each direction
self._generate_nbmax()
if verbose:
print('max number of G values = {}\n\n'.format(self._nbmax))
self.ng = self._nbmax * 3 if precision.lower()[0] == 'n' else \
self._nbmax * 4
# padding
np.fromfile(f, dtype=np.float64, count=recl8-13)
# reading records
# np.set_printoptions(precision=7, suppress=True)
self.Gpoints = [None for _ in range(self.nk)]
self.coeffs = [[None for i in range(self.nb)]
for j in range(self.nk)]
self.kpoints = []
self.band_energy = []
for ispin in range(spin):
if verbose:
print('reading spin {}'.format(ispin))
for ink in range(self.nk):
# information for this kpoint
nplane = int(np.fromfile(f, dtype=np.float64, count=1)[0])
kpoint = np.fromfile(f, dtype=np.float64, count=3)
self.kpoints.append(kpoint)
if verbose:
print('kpoint {: 4} with {: 5} plane waves at {}'
.format(ink, nplane, kpoint))
# energy and occupation information
enocc = np.fromfile(f, dtype=np.float64,
count=3*self.nb).reshape((self.nb, 3))
self.band_energy.append(enocc)
if verbose:
print(enocc[:, [0, 2]])
# padding
np.fromfile(f, dtype=np.float64, count=(recl8-4-3*self.nb))
# generate G integers
self.Gpoints[ink] = self._generate_G_points(kpoint)
if len(self.Gpoints[ink]) != nplane:
raise ValueError('failed to generate the correct '
'number of G points')
# extract coefficients
for inb in range(self.nb):
if rtag == 45200:
self.coeffs[ink][inb] = \
np.fromfile(f, dtype=np.complex64,
count=nplane)
np.fromfile(f, dtype=np.float64,
count=recl8-nplane)
elif rtag == 45210:
# this should handle double precision coefficients
# but I don't have a WAVECAR to test it with
self.coeffs[ink][inb] = \
np.fromfile(f, dtype=np.complex128,
count=nplane)
np.fromfile(f, dtype=np.float64,
count=recl8-2*nplane)
def _generate_nbmax(self):
"""
Helper function that determines maximum number of b vectors for
each direction.
This algorithm is adapted from WaveTrans (see Class docstring). There
should be no reason for this function to be called outside of
initialization.
"""
bmag = np.linalg.norm(self.b, axis=1)
b = self.b
# calculate maximum integers in each direction for G
phi12 = np.arccos(np.dot(b[0, :], b[1, :])/(bmag[0]*bmag[1]))
sphi123 = np.dot(b[2, :], np.cross(b[0, :], b[1, :])) / \
(bmag[2]*np.linalg.norm(np.cross(b[0, :], b[1, :])))
nbmaxA = np.sqrt(self.encut*self._C) / bmag
nbmaxA[0] /= np.abs(np.sin(phi12))
nbmaxA[1] /= np.abs(np.sin(phi12))
nbmaxA[2] /= np.abs(sphi123)
nbmaxA += 1
phi13 = np.arccos(np.dot(b[0, :], b[2, :])/(bmag[0]*bmag[2]))
sphi123 = np.dot(b[1, :], np.cross(b[0, :], b[2, :])) / \
(bmag[1]*np.linalg.norm(np.cross(b[0, :], b[2, :])))
nbmaxB = np.sqrt(self.encut*self._C) / bmag
nbmaxB[0] /= np.abs(np.sin(phi13))
nbmaxB[1] /= np.abs(sphi123)
nbmaxB[2] /= np.abs(np.sin(phi13))
nbmaxB += 1
phi23 = np.arccos(np.dot(b[1, :], b[2, :])/(bmag[1]*bmag[2]))
sphi123 = np.dot(b[0, :], np.cross(b[1, :], b[2, :])) / \
(bmag[0]*np.linalg.norm(np.cross(b[1, :], b[2, :])))
nbmaxC = np.sqrt(self.encut*self._C) / bmag
nbmaxC[0] /= np.abs(sphi123)
nbmaxC[1] /= np.abs(np.sin(phi23))
nbmaxC[2] /= np.abs(np.sin(phi23))
nbmaxC += 1
self._nbmax = np.max([nbmaxA, nbmaxB, nbmaxC], axis=0) \
.astype(np.int)
def _generate_G_points(self, kpoint):
"""
Helper function to generate G-points based on nbmax.
This function iterates over possible G-point values and determines
if the energy is less than G_{cut}. Valid values are appended to
the output array. This function should not be called outside of
initialization.
Args:
kpoint (np.array): the array containing the current k-point value
Returns:
a list containing valid G-points
"""
gpoints = []
for i in range(2*self._nbmax[2]+1):
i3 = i-2*self._nbmax[2]-1 if i > self._nbmax[2] else i
for j in range(2*self._nbmax[1]+1):
j2 = j-2*self._nbmax[1]-1 if j > self._nbmax[1] else j
for k in range(2*self._nbmax[0]+1):
k1 = k-2*self._nbmax[0]-1 if k > self._nbmax[0] else k
G = np.array([k1, j2, i3])
v = kpoint + G
g = np.linalg.norm(np.dot(v, self.b))
E = g**2 / self._C
if E < self.encut:
gpoints.append(G)
return np.array(gpoints, dtype=np.float64)
def evaluate_wavefunc(self, kpoint, band, r):
r"""
Evaluates the wavefunction for a given position, r.
The wavefunction is given by the k-point and band. It is evaluated
at the given position by summing over the components. Formally,
\psi_n^k (r) = \sum_{i=1}^N c_i^{n,k} \exp (i (k + G_i^{n,k}) \cdot r)
where \psi_n^k is the wavefunction for the nth band at k-point k, N is
the number of plane waves, c_i^{n,k} is the ith coefficient that
corresponds to the nth band and k-point k, and G_i^{n,k} is the ith
G-point corresponding to k-point k.
NOTE: This function is very slow; a discrete fourier transform is the
preferred method of evaluation (see Wavecar.fft_mesh).
Args:
kpoint (int): the index of the kpoint where the wavefunction
will be evaluated
band (int): the index of the band where the wavefunction will be
evaluated
r (np.array): the position where the wavefunction will be evaluated
Returns:
a complex value corresponding to the evaluation of the wavefunction
"""
v = self.Gpoints[kpoint] + self.kpoints[kpoint]
u = np.dot(np.dot(v, self.b), r)
c = self.coeffs[kpoint][band]
return np.sum(np.dot(c, np.exp(1j*u, dtype=np.complex64))) / \
np.sqrt(self.vol)
def fft_mesh(self, kpoint, band, shift=True):
"""
Places the coefficients of a wavefunction onto an fft mesh.
Once the mesh has been obtained, a discrete fourier transform can be
used to obtain real-space evaluation of the wavefunction. The output
of this function can be passed directly to numpy's fft function. For
example:
mesh = Wavecar().fft_mesh(kpoint, band)
evals = np.fft.fft(mesh)
Args:
kpoint (int): the index of the kpoint where the wavefunction
will be evaluated
band (int): the index of the band where the wavefunction will be
evaluated
shift (bool): determines if the zero frequency coefficient is
placed at index (0, 0, 0) or centered
Returns:
a numpy ndarray representing the 3D mesh of coefficients
"""
mesh = np.zeros(tuple(self.ng), dtype=np.complex)
for gp, coeff in zip(self.Gpoints[kpoint], self.coeffs[kpoint][band]):
t = tuple(gp.astype(np.int) + (self.ng/2).astype(np.int))
mesh[t] = coeff
if shift:
return np.fft.ifftshift(mesh)
else:
return mesh
class Wavederf(object):
"""
Object for reading a WAVEDERF file.
Note: This file is only produced when LOPTICS is true AND vasp has been
recompiled after uncommenting the line that calls
WRT_CDER_BETWEEN_STATES_FORMATTED in linear_optics.F
Args:
filename: Name of file containing WAVEDERF.
.. attribute:: data
A numpy array containing the WAVEDERF data of the form below. It should
be noted that VASP uses 1-based indexing for bands, but this is
converted to 0-based numpy array indexing.
For each kpoint (in the same order as in IBZKPT), and for each pair of
bands:
[ #kpoint index
[ #band 1 index
[ #band 2 index
[cdum_x_real, cdum_x_imag, cdum_y_real, cdum_y_imag, cdum_z_real, cdum_z_imag]
]
]
]
This structure follows the file format. Numpy array methods can be used
to fetch data in a more useful way (e.g., get matrix elements between
wo specific bands at each kpoint, fetch x/y/z components,
real/imaginary parts, abs/phase, etc. )
Author: Miguel Dias Costa
"""
def __init__(self, filename):
with zopen(filename, "rt") as f:
header = f.readline().split()
ispin = int(header[0])
nb_kpoints = int(header[1])
nb_bands = int(header[2])
data = np.zeros((nb_kpoints, nb_bands, nb_bands, 6))
for ik in range(nb_kpoints):
for ib1 in range(nb_bands):
for ib2 in range(nb_bands):
# each line in the file includes besides the band
# indexes, which are redundant, each band's energy
# and occupation, which are already available elsewhere,
# so we store only the 6 matrix elements after this 6
# redundant values
data[ik][ib1][ib2] = [
float(element)
for element in f.readline().split()[6:]]
self.data = data
self._nb_kpoints = nb_kpoints
self._nb_bands = nb_bands
@property
def nb_bands(self):
"""
returns the number of bands in the band structure
"""
return self._nb_bands
@property
def nb_kpoints(self):
"""
Returns the number of k-points in the band structure calculation
"""
return self._nb_kpoints
def get_elements_between_bands(self, band_i, band_j):
"""
Method returning a numpy array with elements
[cdum_x_real, cdum_x_imag, cdum_y_real, cdum_y_imag, cdum_z_real, cdum_z_imag]
between bands band_i and band_j (vasp 1-based indexing) for all kpoints.
Args:
band_i (Integer): Index of band i
band_j (Integer): Index of band j
Returns:
a numpy list of elements for each kpoint
"""
if band_i < 1 or band_i > self.nb_bands or band_j < 1 or band_j > self.nb_bands:
raise ValueError("Band index out of bounds")
return self.data[:, band_i - 1, band_j - 1, :]
class UnconvergedVASPWarning(Warning):
"""
Warning for unconverged vasp run.
"""
pass
|
{
"content_hash": "14390a57d491026a2f0d731e59b56530",
"timestamp": "",
"source": "github",
"line_count": 4152,
"max_line_length": 139,
"avg_line_length": 41.331647398843934,
"alnum_prop": 0.5115699060072607,
"repo_name": "czhengsci/pymatgen",
"id": "dbd6e6d30c60f6de823ffef27fece3973e5901e1",
"size": "171719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/io/vasp/outputs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5938"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6706935"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
}
|
"""Gradients for operators defined in control_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.control_flow_ops import *
from tensorflow.python.ops.gen_control_flow_ops import *
@ops.RegisterGradient("Switch")
def _SwitchGrad(op, *grad):
op = GetRealOp(op)
ctxt = op._get_control_flow_context() # pylint: disable=protected-access
if isinstance(ctxt, WhileContext):
merge_op = ctxt.switch_map.get(op)
if merge_op:
merge_op._update_input(1, grad[1])
return None, None
else:
merge_op = merge(grad, name="b_switch")[0]
ctxt.switch_map[op] = merge_op.op
return merge_op, None
elif isinstance(ctxt, CondContext):
good_grad = grad[ctxt.branch]
zero_grad = grad[1 - ctxt.branch]
zero_grad = switch(zero_grad, ctxt.pred, name="grad_0")[1 - ctxt.branch]
return merge([good_grad, zero_grad], name="switch_grad")[0], None
else:
false_grad = switch(grad[0], op.inputs[1])[0]
true_grad = switch(grad[1], op.inputs[1])[1]
return merge([false_grad, true_grad])[0], None
@ops.RegisterGradient("RefSwitch")
def _RefSwitchGrad(op, *grad):
return _SwitchGrad(op, *grad)
@ops.RegisterGradient("Merge")
def _MergeGrad(op, grad, _):
op = GetRealOp(op)
input_op = op.inputs[0].op
# pylint: disable=protected-access
ctxt = input_op._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(ctxt, WhileContext):
grad_ctxt = ctxt.grad_context
return switch(grad, grad_ctxt.pivot)
elif isinstance(ctxt, CondContext):
return switch(grad, ctxt.pred, name="merge_grad")
else:
num_inputs = len(op.inputs)
cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)]
return [Switch(grad, cond[i])[1] for i in xrange(num_inputs)]
@ops.RegisterGradient("Exit")
def _ExitGrad(op, grad):
# pylint: disable=protected-access
forward_ctxt = op._get_control_flow_context()
# pylint: enable=protected-access
if not forward_ctxt.back_prop:
return None
grad_ctxt = forward_ctxt.grad_context
grad_ctxt.AddName(grad.name)
return enter(grad, grad_ctxt.name, is_constant=False,
parallel_iterations=forward_ctxt.parallel_iterations,
name="b_exit")
@ops.RegisterGradient("NextIteration")
def _NextIterationGrad(_, grad):
return next_iteration(grad)
@ops.RegisterGradient("Enter")
def _EnterGrad(op, grad):
op = GetRealOp(op)
# pylint: disable=protected-access
forward_ctxt = op._get_control_flow_context()
# pylint: enable=protected-access
grad_ctxt = forward_ctxt.grad_context
if grad_ctxt:
if op.get_attr("is_constant"):
# Add a gradient accumulator for every loop invariant.
result = grad_ctxt.AddBackPropAccumulateLoop(grad)
else:
result = exit(grad)
return result
else:
return grad
@ops.RegisterGradient("RefEnter")
def _RefEnterGrad(op, grad):
return _EnterGrad(op, grad)
@ops.RegisterGradient("LoopCond")
def _LoopCondGrad(_):
return None
|
{
"content_hash": "cd824873d3cc02104b0d33ba2e415467",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 76,
"avg_line_length": 31.18095238095238,
"alnum_prop": 0.6973121563836285,
"repo_name": "MemeticParadigm/TensorFlow",
"id": "d6a0c6e6c27a3a53448e0ad1db2daace4569f91b",
"size": "3274",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/control_flow_grad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "127104"
},
{
"name": "C++",
"bytes": "4901913"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "637241"
},
{
"name": "Java",
"bytes": "44388"
},
{
"name": "JavaScript",
"bytes": "5067"
},
{
"name": "Objective-C",
"bytes": "630"
},
{
"name": "Protocol Buffer",
"bytes": "45213"
},
{
"name": "Python",
"bytes": "2473570"
},
{
"name": "Shell",
"bytes": "1714"
},
{
"name": "TypeScript",
"bytes": "237446"
}
],
"symlink_target": ""
}
|
print "hello dude!!!"
print "hello dude!!!"
print "hello dude!!!"
print "hello dude!!!"
print "hello dude!!!"
print "hello dude!!!"
print "hello dude!!!"
print "hello dude!!!"
print "hello dude!!!"
print "hello dude!!!"
print "hello dude!!!"
print "hello dude!!!"
print "hello dude!!!"
|
{
"content_hash": "672703f4873c4c9a603d57beb89f6399",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 21,
"avg_line_length": 20.5,
"alnum_prop": 0.6341463414634146,
"repo_name": "ramaganapathy1/AMuDA-Ir-back-end",
"id": "590218c05a393b9837cfde7626ff0264d6038def",
"size": "287",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "production/keyphrase/preprocess_audio/try.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5936"
},
{
"name": "CSS",
"bytes": "798083"
},
{
"name": "HTML",
"bytes": "1986444"
},
{
"name": "JavaScript",
"bytes": "2166655"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "Python",
"bytes": "3652977"
},
{
"name": "Shell",
"bytes": "20172"
}
],
"symlink_target": ""
}
|
"""Read and cache directory listings.
The listdir() routine returns a sorted list of the files in a directory,
using a cache to avoid reading the directory more often than necessary.
The annotate() routine appends slashes to directories."""
import os
__all__ = ["listdir", "opendir", "annotate", "reset"]
cache = {}
def reset():
"""Reset the cache completely."""
global cache
cache = {}
def listdir(path):
"""List directory contents, using cache."""
try:
cached_mtime, list = cache[path]
del cache[path]
except KeyError:
cached_mtime, list = -1, []
mtime = os.stat(path).st_mtime
if mtime != cached_mtime:
list = os.listdir(path)
list.sort()
cache[path] = mtime, list
return list
opendir = listdir # XXX backward compatibility
def annotate(head, list):
"""Add '/' suffixes to directories."""
for i in range(len(list)):
if os.path.isdir(os.path.join(head, list[i])):
list[i] = list[i] + '/'
|
{
"content_hash": "08ea49d1ea361f2bc1e2c6a2089255f4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 27.473684210526315,
"alnum_prop": 0.6015325670498084,
"repo_name": "ericlink/adms-server",
"id": "1ce80b1b6d65b661884191ae657da2e9a3f34c54",
"size": "1044",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "playframework-dist/play-1.1/python/Lib/dircache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "408"
},
{
"name": "C",
"bytes": "152256"
},
{
"name": "CSS",
"bytes": "97486"
},
{
"name": "HTML",
"bytes": "553901"
},
{
"name": "Java",
"bytes": "3086962"
},
{
"name": "JavaScript",
"bytes": "736134"
},
{
"name": "Python",
"bytes": "15750302"
},
{
"name": "SQLPL",
"bytes": "10111"
},
{
"name": "Scala",
"bytes": "1432"
},
{
"name": "Shell",
"bytes": "1369"
}
],
"symlink_target": ""
}
|
from typing import Collection, List, Optional, Set, Tuple, Union
from django.db.models.query import QuerySet
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from typing_extensions import TypedDict
from zerver.lib.exceptions import StreamAdministratorRequired
from zerver.lib.markdown import markdown_convert
from zerver.lib.request import JsonableError
from zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id
from zerver.models import (
DefaultStreamGroup,
Realm,
RealmAuditLog,
Recipient,
Stream,
Subscription,
UserProfile,
active_non_guest_user_ids,
bulk_get_streams,
get_realm_stream,
get_stream,
get_stream_by_id_in_realm,
is_cross_realm_bot_email,
)
from zerver.tornado.django_api import send_event
class StreamDict(TypedDict, total=False):
"""
This type ultimately gets used in two places:
- we use it to create a stream
- we use it to specify a stream
It's possible we want a smaller type to use
for removing streams, but it would complicate
how we write the types for list_to_stream.
Note that these fields are just a subset of
the fields in the Stream model.
"""
name: str
description: str
invite_only: bool
is_web_public: bool
stream_post_policy: int
history_public_to_subscribers: Optional[bool]
message_retention_days: Optional[int]
def get_default_value_for_history_public_to_subscribers(
realm: Realm,
invite_only: bool,
history_public_to_subscribers: Optional[bool],
) -> bool:
if invite_only:
if history_public_to_subscribers is None:
# A private stream's history is non-public by default
history_public_to_subscribers = False
else:
# If we later decide to support public streams without
# history, we can remove this code path.
history_public_to_subscribers = True
if realm.is_zephyr_mirror_realm:
# In the Zephyr mirroring model, history is unconditionally
# not public to subscribers, even for public streams.
history_public_to_subscribers = False
return history_public_to_subscribers
def render_stream_description(text: str) -> str:
return markdown_convert(text, no_previews=True).rendered_content
def send_stream_creation_event(stream: Stream, user_ids: List[int]) -> None:
event = dict(type="stream", op="create", streams=[stream.to_dict()])
send_event(stream.realm, event, user_ids)
def create_stream_if_needed(
realm: Realm,
stream_name: str,
*,
invite_only: bool = False,
is_web_public: bool = False,
stream_post_policy: int = Stream.STREAM_POST_POLICY_EVERYONE,
history_public_to_subscribers: Optional[bool] = None,
stream_description: str = "",
message_retention_days: Optional[int] = None,
acting_user: Optional[UserProfile] = None,
) -> Tuple[Stream, bool]:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
realm, invite_only, history_public_to_subscribers
)
(stream, created) = Stream.objects.get_or_create(
realm=realm,
name__iexact=stream_name,
defaults=dict(
name=stream_name,
description=stream_description,
invite_only=invite_only,
is_web_public=is_web_public,
stream_post_policy=stream_post_policy,
history_public_to_subscribers=history_public_to_subscribers,
is_in_zephyr_realm=realm.is_zephyr_mirror_realm,
message_retention_days=message_retention_days,
),
)
if created:
recipient = Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
stream.recipient = recipient
stream.rendered_description = render_stream_description(stream_description)
stream.save(update_fields=["recipient", "rendered_description"])
if stream.is_public():
send_stream_creation_event(stream, active_non_guest_user_ids(stream.realm_id))
else:
realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()]
send_stream_creation_event(stream, realm_admin_ids)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_CREATED,
event_time=event_time,
)
return stream, created
def create_streams_if_needed(
realm: Realm, stream_dicts: List[StreamDict], acting_user: Optional[UserProfile] = None
) -> Tuple[List[Stream], List[Stream]]:
"""Note that stream_dict["name"] is assumed to already be stripped of
whitespace"""
added_streams: List[Stream] = []
existing_streams: List[Stream] = []
for stream_dict in stream_dicts:
stream, created = create_stream_if_needed(
realm,
stream_dict["name"],
invite_only=stream_dict.get("invite_only", False),
stream_post_policy=stream_dict.get(
"stream_post_policy", Stream.STREAM_POST_POLICY_EVERYONE
),
history_public_to_subscribers=stream_dict.get("history_public_to_subscribers"),
stream_description=stream_dict.get("description", ""),
message_retention_days=stream_dict.get("message_retention_days", None),
acting_user=acting_user,
)
if created:
added_streams.append(stream)
else:
existing_streams.append(stream)
return added_streams, existing_streams
def check_stream_name(stream_name: str) -> None:
if stream_name.strip() == "":
raise JsonableError(_("Invalid stream name '{}'").format(stream_name))
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError(
_("Stream name too long (limit: {} characters).").format(Stream.MAX_NAME_LENGTH)
)
for i in stream_name:
if ord(i) == 0:
raise JsonableError(
_("Stream name '{}' contains NULL (0x00) characters.").format(stream_name)
)
def subscribed_to_stream(user_profile: UserProfile, stream_id: int) -> bool:
return Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__type_id=stream_id,
).exists()
def check_stream_access_based_on_stream_post_policy(sender: UserProfile, stream: Stream) -> None:
if sender.is_realm_admin or is_cross_realm_bot_email(sender.delivery_email):
pass
elif stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS:
raise JsonableError(_("Only organization administrators can send to this stream."))
elif (
stream.stream_post_policy == Stream.STREAM_POST_POLICY_MODERATORS
and not sender.is_moderator
):
raise JsonableError(
_("Only organization administrators and moderators can send to this stream.")
)
elif stream.stream_post_policy != Stream.STREAM_POST_POLICY_EVERYONE and sender.is_guest:
raise JsonableError(_("Guests cannot send to this stream."))
elif (
stream.stream_post_policy == Stream.STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS
and sender.is_provisional_member
):
raise JsonableError(_("New members cannot send to this stream."))
return
def access_stream_for_send_message(
sender: UserProfile, stream: Stream, forwarder_user_profile: Optional[UserProfile]
) -> None:
# Our caller is responsible for making sure that `stream` actually
# matches the realm of the sender.
try:
check_stream_access_based_on_stream_post_policy(sender, stream)
except JsonableError as e:
if sender.is_bot and sender.bot_owner is not None:
check_stream_access_based_on_stream_post_policy(sender.bot_owner, stream)
else:
raise JsonableError(e.msg)
# forwarder_user_profile cases should be analyzed first, as incorrect
# message forging is cause for denying access regardless of any other factors.
if forwarder_user_profile is not None and forwarder_user_profile != sender:
if (
forwarder_user_profile.can_forge_sender
and forwarder_user_profile.realm_id == sender.realm_id
and sender.realm_id == stream.realm_id
):
return
else:
raise JsonableError(_("User not authorized for this query"))
if is_cross_realm_bot_email(sender.delivery_email):
return
if stream.realm_id != sender.realm_id:
# Sending to other realm's streams is always disallowed,
# with the exception of cross-realm bots.
raise JsonableError(_("User not authorized for this query"))
if stream.is_web_public:
# Even guest users can write to web-public streams.
return
if not (stream.invite_only or sender.is_guest):
# This is a public stream and sender is not a guest user
return
if subscribed_to_stream(sender, stream.id):
# It is private, but your are subscribed
return
if sender.can_forge_sender:
# can_forge_sender allows sending to any stream in the realm.
return
if sender.is_bot and (
sender.bot_owner is not None and subscribed_to_stream(sender.bot_owner, stream.id)
):
# Bots can send to any stream their owner can.
return
# All other cases are an error.
raise JsonableError(_("Not authorized to send to stream '{}'").format(stream.name))
def check_for_exactly_one_stream_arg(stream_id: Optional[int], stream: Optional[str]) -> None:
if stream_id is None and stream is None:
raise JsonableError(_("Please supply 'stream'."))
if stream_id is not None and stream is not None:
raise JsonableError(_("Please choose one: 'stream' or 'stream_id'."))
def check_stream_access_for_delete_or_update(
user_profile: UserProfile, stream: Stream, sub: Optional[Subscription] = None
) -> None:
error = _("Invalid stream id")
if stream.realm_id != user_profile.realm_id:
raise JsonableError(error)
if user_profile.is_realm_admin:
return
if sub is None and stream.invite_only:
raise JsonableError(error)
if sub is not None and sub.is_stream_admin:
return
raise StreamAdministratorRequired()
def access_stream_for_delete_or_update(
user_profile: UserProfile, stream_id: int
) -> Tuple[Stream, Optional[Subscription]]:
try:
stream = Stream.objects.get(id=stream_id)
except Stream.DoesNotExist:
raise JsonableError(_("Invalid stream id"))
try:
sub = Subscription.objects.get(
user_profile=user_profile, recipient=stream.recipient, active=True
)
except Subscription.DoesNotExist:
sub = None
check_stream_access_for_delete_or_update(user_profile, stream, sub)
return (stream, sub)
# Only set allow_realm_admin flag to True when you want to allow realm admin to
# access unsubscribed private stream content.
def access_stream_common(
user_profile: UserProfile,
stream: Stream,
error: str,
require_active: bool = True,
allow_realm_admin: bool = False,
) -> Optional[Subscription]:
"""Common function for backend code where the target use attempts to
access the target stream, returning all the data fetched along the
way. If that user does not have permission to access that stream,
we throw an exception. A design goal is that the error message is
the same for streams you can't access and streams that don't exist."""
# First, we don't allow any access to streams in other realms.
if stream.realm_id != user_profile.realm_id:
# Callers should verify this on their own, so this functions as defensive code.
raise AssertionError("user_profile and stream realms don't match")
try:
sub = Subscription.objects.get(
user_profile=user_profile, recipient_id=stream.recipient_id, active=require_active
)
except Subscription.DoesNotExist:
sub = None
# Any realm user, even guests, can access web_public streams.
if stream.is_web_public:
return sub
# If the stream is in your realm and public, you can access it.
if stream.is_public() and not user_profile.is_guest:
return sub
# Or if you are subscribed to the stream, you can access it.
if sub is not None:
return sub
# For some specific callers (e.g. getting list of subscribers,
# removing other users from a stream, and updating stream name and
# description), we allow realm admins to access stream even if
# they are not subscribed to a private stream.
if user_profile.is_realm_admin and allow_realm_admin:
return sub
# Otherwise it is a private stream and you're not on it, so throw
# an error.
raise JsonableError(error)
def access_stream_by_id(
user_profile: UserProfile,
stream_id: int,
require_active: bool = True,
allow_realm_admin: bool = False,
) -> Tuple[Stream, Optional[Subscription]]:
error = _("Invalid stream id")
try:
stream = get_stream_by_id_in_realm(stream_id, user_profile.realm)
except Stream.DoesNotExist:
raise JsonableError(error)
sub = access_stream_common(
user_profile,
stream,
error,
require_active=require_active,
allow_realm_admin=allow_realm_admin,
)
return (stream, sub)
def get_public_streams_queryset(realm: Realm) -> "QuerySet[Stream]":
return Stream.objects.filter(realm=realm, invite_only=False, history_public_to_subscribers=True)
def get_web_public_streams_queryset(realm: Realm) -> "QuerySet[Stream]":
# In theory, is_web_public=True implies invite_only=False and
# history_public_to_subscribers=True, but it's safer to include
# this in the query.
return Stream.objects.filter(
realm=realm,
deactivated=False,
invite_only=False,
history_public_to_subscribers=True,
is_web_public=True,
)
def check_stream_name_available(realm: Realm, name: str) -> None:
check_stream_name(name)
try:
get_stream(name, realm)
raise JsonableError(_("Stream name '{}' is already taken.").format(name))
except Stream.DoesNotExist:
pass
def access_stream_by_name(
user_profile: UserProfile, stream_name: str, allow_realm_admin: bool = False
) -> Tuple[Stream, Optional[Subscription]]:
error = _("Invalid stream name '{}'").format(stream_name)
try:
stream = get_realm_stream(stream_name, user_profile.realm_id)
except Stream.DoesNotExist:
raise JsonableError(error)
sub = access_stream_common(
user_profile,
stream,
error,
allow_realm_admin=allow_realm_admin,
)
return (stream, sub)
def access_web_public_stream(stream_id: int, realm: Realm) -> Stream:
error = _("Invalid stream id")
try:
stream = get_stream_by_id_in_realm(stream_id, realm)
except Stream.DoesNotExist:
raise JsonableError(error)
if not stream.is_web_public:
raise JsonableError(error)
return stream
def access_stream_for_unmute_topic_by_name(
user_profile: UserProfile, stream_name: str, error: str
) -> Stream:
"""
It may seem a little silly to have this helper function for unmuting
topics, but it gets around a linter warning, and it helps to be able
to review all security-related stuff in one place.
Our policy for accessing streams when you unmute a topic is that you
don't necessarily need to have an active subscription or even "legal"
access to the stream. Instead, we just verify the stream_id has been
muted in the past (not here, but in the caller).
Long term, we'll probably have folks just pass us in the id of the
MutedTopic row to unmute topics.
"""
try:
stream = get_stream(stream_name, user_profile.realm)
except Stream.DoesNotExist:
raise JsonableError(error)
return stream
def access_stream_for_unmute_topic_by_id(
user_profile: UserProfile, stream_id: int, error: str
) -> Stream:
try:
stream = Stream.objects.get(id=stream_id, realm_id=user_profile.realm_id)
except Stream.DoesNotExist:
raise JsonableError(error)
return stream
def private_stream_user_ids(stream_id: int) -> Set[int]:
subscriptions = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=False
)
return {sub["user_profile_id"] for sub in subscriptions.values("user_profile_id")}
def public_stream_user_ids(stream: Stream) -> Set[int]:
guest_subscriptions = get_active_subscriptions_for_stream_id(
stream.id, include_deactivated_users=False
).filter(user_profile__role=UserProfile.ROLE_GUEST)
guest_subscriptions = {
sub["user_profile_id"] for sub in guest_subscriptions.values("user_profile_id")
}
return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions
def can_access_stream_user_ids(stream: Stream) -> Set[int]:
# return user ids of users who can access the attributes of a
# stream, such as its name/description. Useful for sending events
# to all users with access to a stream's attributes.
if stream.is_public():
# For a public stream, this is everyone in the realm
# except unsubscribed guest users
return public_stream_user_ids(stream)
else:
# for a private stream, it's subscribers plus realm admins.
return private_stream_user_ids(stream.id) | {
user.id for user in stream.realm.get_admin_users_and_bots()
}
def can_access_stream_history(user_profile: UserProfile, stream: Stream) -> bool:
"""Determine whether the provided user is allowed to access the
history of the target stream. The stream is specified by name.
This is used by the caller to determine whether this user can get
historical messages before they joined for a narrowing search.
Because of the way our search is currently structured,
we may be passed an invalid stream here. We return
False in that situation, and subsequent code will do
validation and raise the appropriate JsonableError.
Note that this function should only be used in contexts where
access_stream is being called elsewhere to confirm that the user
can actually see this stream.
"""
if user_profile.realm_id != stream.realm_id:
raise AssertionError("user_profile and stream realms don't match")
if stream.is_web_public:
return True
if stream.is_history_realm_public() and not user_profile.is_guest:
return True
if stream.is_history_public_to_subscribers():
# In this case, we check if the user is subscribed.
error = _("Invalid stream name '{}'").format(stream.name)
try:
access_stream_common(user_profile, stream, error)
except JsonableError:
return False
return True
return False
def can_access_stream_history_by_name(user_profile: UserProfile, stream_name: str) -> bool:
try:
stream = get_stream(stream_name, user_profile.realm)
except Stream.DoesNotExist:
return False
return can_access_stream_history(user_profile, stream)
def can_access_stream_history_by_id(user_profile: UserProfile, stream_id: int) -> bool:
try:
stream = get_stream_by_id_in_realm(stream_id, user_profile.realm)
except Stream.DoesNotExist:
return False
return can_access_stream_history(user_profile, stream)
def filter_stream_authorization(
user_profile: UserProfile, streams: Collection[Stream]
) -> Tuple[List[Stream], List[Stream]]:
recipient_ids = [stream.recipient_id for stream in streams]
subscribed_recipient_ids = set(
Subscription.objects.filter(
user_profile=user_profile, recipient_id__in=recipient_ids, active=True
).values_list("recipient_id", flat=True)
)
unauthorized_streams: List[Stream] = []
for stream in streams:
# The user is authorized for their own streams
if stream.recipient_id in subscribed_recipient_ids:
continue
# Web public streams are accessible even to guests
if stream.is_web_public:
continue
# Members and administrators are authorized for public streams
if not stream.invite_only and not user_profile.is_guest:
continue
unauthorized_streams.append(stream)
authorized_streams = [
stream
for stream in streams
if stream.id not in {stream.id for stream in unauthorized_streams}
]
return authorized_streams, unauthorized_streams
def list_to_streams(
streams_raw: Collection[StreamDict],
user_profile: UserProfile,
autocreate: bool = False,
admin_access_required: bool = False,
) -> Tuple[List[Stream], List[Stream]]:
"""Converts list of dicts to a list of Streams, validating input in the process
For each stream name, we validate it to ensure it meets our
requirements for a proper stream name using check_stream_name.
This function in autocreate mode should be atomic: either an exception will be raised
during a precheck, or all the streams specified will have been created if applicable.
@param streams_raw The list of stream dictionaries to process;
names should already be stripped of whitespace by the caller.
@param user_profile The user for whom we are retrieving the streams
@param autocreate Whether we should create streams if they don't already exist
"""
# Validate all streams, getting extant ones, then get-or-creating the rest.
stream_set = {stream_dict["name"] for stream_dict in streams_raw}
for stream_name in stream_set:
# Stream names should already have been stripped by the
# caller, but it makes sense to verify anyway.
assert stream_name == stream_name.strip()
check_stream_name(stream_name)
existing_streams: List[Stream] = []
missing_stream_dicts: List[StreamDict] = []
existing_stream_map = bulk_get_streams(user_profile.realm, stream_set)
if admin_access_required:
existing_recipient_ids = [stream.recipient_id for stream in existing_stream_map.values()]
subs = Subscription.objects.filter(
user_profile=user_profile, recipient_id__in=existing_recipient_ids, active=True
)
sub_map = {sub.recipient_id: sub for sub in subs}
for stream in existing_stream_map.values():
sub = sub_map.get(stream.recipient_id, None)
check_stream_access_for_delete_or_update(user_profile, stream, sub)
message_retention_days_not_none = False
for stream_dict in streams_raw:
stream_name = stream_dict["name"]
stream = existing_stream_map.get(stream_name.lower())
if stream is None:
if stream_dict.get("message_retention_days", None) is not None:
message_retention_days_not_none = True
missing_stream_dicts.append(stream_dict)
else:
existing_streams.append(stream)
if len(missing_stream_dicts) == 0:
# This is the happy path for callers who expected all of these
# streams to exist already.
created_streams: List[Stream] = []
else:
# autocreate=True path starts here
if not user_profile.can_create_streams():
# Guest users case will not be handled here as it will be
# handled by the decorator in add_subscriptions_backend.
raise JsonableError(_("Insufficient permission"))
elif not autocreate:
raise JsonableError(
_("Stream(s) ({}) do not exist").format(
", ".join(stream_dict["name"] for stream_dict in missing_stream_dicts),
)
)
elif message_retention_days_not_none:
if not user_profile.is_realm_owner:
raise JsonableError(_("User cannot create stream with this settings."))
user_profile.realm.ensure_not_on_limited_plan()
# We already filtered out existing streams, so dup_streams
# will normally be an empty list below, but we protect against somebody
# else racing to create the same stream. (This is not an entirely
# paranoid approach, since often on Zulip two people will discuss
# creating a new stream, and both people eagerly do it.)
created_streams, dup_streams = create_streams_if_needed(
realm=user_profile.realm, stream_dicts=missing_stream_dicts, acting_user=user_profile
)
existing_streams += dup_streams
return existing_streams, created_streams
def access_default_stream_group_by_id(realm: Realm, group_id: int) -> DefaultStreamGroup:
try:
return DefaultStreamGroup.objects.get(realm=realm, id=group_id)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_("Default stream group with id '{}' does not exist.").format(group_id))
def get_stream_by_narrow_operand_access_unchecked(operand: Union[str, int], realm: Realm) -> Stream:
"""This is required over access_stream_* in certain cases where
we need the stream data only to prepare a response that user can access
and not send it out to unauthorized recipients.
"""
if isinstance(operand, str):
return get_stream(operand, realm)
return get_stream_by_id_in_realm(operand, realm)
|
{
"content_hash": "975b4bb226ac5e814eafe6832774d214",
"timestamp": "",
"source": "github",
"line_count": 703,
"max_line_length": 100,
"avg_line_length": 36.61735419630156,
"alnum_prop": 0.6699168673762722,
"repo_name": "punchagan/zulip",
"id": "d945663e4a0d068b6891dce023e9c7c02bbf575b",
"size": "25742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/lib/streams.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "453615"
},
{
"name": "Dockerfile",
"bytes": "4898"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "607321"
},
{
"name": "Handlebars",
"bytes": "315160"
},
{
"name": "JavaScript",
"bytes": "3572990"
},
{
"name": "Perl",
"bytes": "9884"
},
{
"name": "Puppet",
"bytes": "94991"
},
{
"name": "Python",
"bytes": "8750579"
},
{
"name": "Ruby",
"bytes": "3875"
},
{
"name": "Shell",
"bytes": "134468"
},
{
"name": "TypeScript",
"bytes": "223296"
}
],
"symlink_target": ""
}
|
import os
from resolverapi import create_app
app = create_app(os.environ.get('RESOLVER_ENV', 'prod'))
if __name__ == '__main__':
app.run()
|
{
"content_hash": "643a139d4b842d0bc637d1ff4096ebf0",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 56,
"avg_line_length": 20.714285714285715,
"alnum_prop": 0.6413793103448275,
"repo_name": "opendns/OpenResolve",
"id": "429ecd0023d7557341af3c32b423bee194862e4e",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "26803"
}
],
"symlink_target": ""
}
|
def find_all_around(row, column, matrix):
arr = []
try:
c = matrix[row][column]
d = matrix[row + 1][column]
r = matrix[row][column + 1]
dr = matrix[row + 1][column + 1]
return [c, r, d, dr]
except IndexError:
pass
def differentSquares(matrix):
row = 0
arr = []
while row < len(matrix):
column = 0
while column < len(matrix[row]):
arr.append(find_all_around(row, column, matrix))
column += 1
row += 1
a = set(tuple(element) for element in arr if element is not None)
return len(a)
|
{
"content_hash": "61f2a644f82c62518de2e8e11cce3b6e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 69,
"avg_line_length": 28.09090909090909,
"alnum_prop": 0.5226537216828478,
"repo_name": "emirot/codefights",
"id": "b9ccdae379211c71e05125f49c0fb71519957cd6",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intro/differentSquares.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "104702"
}
],
"symlink_target": ""
}
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from .._common_models import (
WindowsAzureData,
)
class Entity(WindowsAzureData):
''' Entity class. The attributes of entity will be created dynamically. '''
pass
class EntityProperty(WindowsAzureData):
''' Entity property. contains type and value. '''
def __init__(self, type=None, value=None):
self.type = type
self.value = value
class Table(WindowsAzureData):
''' Only for IntelliSense and telling user the return type. '''
pass
class TableSharedAccessPermissions(object):
'''Permissions for a table.'''
'''Get entities and query entities.'''
QUERY = 'r'
'''Add entities.'''
ADD = 'a'
'''Update entities.'''
UPDATE = 'u'
'''Delete entities.'''
DELETE = 'd'
|
{
"content_hash": "fd21e04cfa3e781e8d5bfd4ab478b1df",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 28.796296296296298,
"alnum_prop": 0.594855305466238,
"repo_name": "rjhunter8285/nsc-cloudproject-s22016",
"id": "1961b196a4aa397fe5010a68f5f11c4da03b4c9e",
"size": "1557",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "prototype/api/FlaskApp/FlaskApp/python_modules/azure/storage/table/models.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5101"
},
{
"name": "HTML",
"bytes": "50636"
},
{
"name": "Java",
"bytes": "48035"
},
{
"name": "JavaScript",
"bytes": "1359974"
},
{
"name": "PHP",
"bytes": "2175"
},
{
"name": "Python",
"bytes": "13485797"
},
{
"name": "Shell",
"bytes": "6524"
}
],
"symlink_target": ""
}
|
import numpy as np
from functools import reduce
from string import ascii_uppercase
from ..externals.six import string_types
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def _f_oneway(*args):
"""Perform a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test
p-value : float
The associated p-value from the F-distribution
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homocedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`stats.kruskal`_) although with
some loss of power
The algorithm is from Heiman[2], pp.394-7.
See scipy.stats.f_oneway that should give the same results while
being less efficient
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
from scipy import stats
sf = stats.f.sf
n_classes = len(args)
n_samples_per_class = np.array([len(a) for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = reduce(lambda x, y: x + y,
[np.sum(a ** 2, axis=0) for a in args])
sums_args = [np.sum(a, axis=0) for a in args]
square_of_sums_alldata = reduce(lambda x, y: x + y, sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = sf(dfbn, dfwn, f)
return f, prob
def f_oneway(*args):
"""Call scipy.stats.f_oneway, but return only f-value."""
return _f_oneway(*args)[0]
def _map_effects(n_factors, effects):
"""Map effects to indices."""
if n_factors > len(ascii_uppercase):
raise ValueError('Maximum number of factors supported is 26')
factor_names = list(ascii_uppercase[:n_factors])
if isinstance(effects, string_types):
if '*' in effects and ':' in effects:
raise ValueError('Not "*" and ":" permitted in effects')
elif '+' in effects and ':' in effects:
raise ValueError('Not "+" and ":" permitted in effects')
elif effects == 'all':
effects = None
elif len(effects) == 1 or ':' in effects:
effects = [effects]
elif '+' in effects:
# all main effects
effects = effects.split('+')
elif '*' in effects:
pass # handle later
else:
raise ValueError('"{0}" is not a valid option for "effects"'
.format(effects))
if isinstance(effects, list):
bad_names = [e for e in effects if e not in factor_names]
if len(bad_names) > 1:
raise ValueError('Effect names: {0} are not valid. They should '
'the first `n_factors` ({1}) characters from the'
'alphabet'.format(bad_names, n_factors))
indices = list(np.arange(2 ** n_factors - 1))
names = list()
for this_effect in indices:
contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
this_code = (n_factors - 1) - np.where(contrast_idx == 1)[0]
this_name = [factor_names[e] for e in this_code]
this_name.sort()
names.append(':'.join(this_name))
if effects is None or isinstance(effects, string_types):
effects_ = names
else:
effects_ = effects
selection = [names.index(sel) for sel in effects_]
names = [names[sel] for sel in selection]
if isinstance(effects, string_types):
if '*' in effects:
# hierarchical order of effects
# the * based effect can be used as stop index
sel_ind = names.index(effects.replace('*', ':')) + 1
names = names[:sel_ind]
selection = selection[:sel_ind]
return selection, names
def _get_contrast_indices(effect_idx, n_factors): # noqa: D401
"""Henson's factor coding, see num2binvec."""
binrepr = np.binary_repr(effect_idx, n_factors)
return np.array([int(i) for i in binrepr], dtype=int)
def _iter_contrasts(n_subjects, factor_levels, effect_picks):
"""Set up contrasts."""
from scipy.signal import detrend
sc = []
n_factors = len(factor_levels)
# prepare computation of Kronecker products
for n_levels in factor_levels:
# for each factor append
# 1) column vector of length == number of levels,
# 2) square matrix with diagonal == number of levels
# main + interaction effects for contrasts
sc.append([np.ones([n_levels, 1]),
detrend(np.eye(n_levels), type='constant')])
for this_effect in effect_picks:
contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
c_ = sc[0][contrast_idx[n_factors - 1]]
for i_contrast in range(1, n_factors):
this_contrast = contrast_idx[(n_factors - 1) - i_contrast]
c_ = np.kron(c_, sc[i_contrast][this_contrast])
df1 = np.linalg.matrix_rank(c_)
df2 = df1 * (n_subjects - 1)
yield c_, df1, df2
def f_threshold_mway_rm(n_subjects, factor_levels, effects='A*B',
pvalue=0.05):
"""Compute f-value thesholds for a two-way ANOVA.
Parameters
----------
n_subjects : int
The number of subjects to be analyzed.
factor_levels : list-like
The number of levels per factor.
effects : str
A string denoting the effect to be returned. The following
mapping is currently supported:
* ``'A'``: main effect of A
* ``'B'``: main effect of B
* ``'A:B'``: interaction effect
* ``'A+B'``: both main effects
* ``'A*B'``: all three effects
pvalue : float
The p-value to be thresholded.
Returns
-------
f_threshold : list | float
list of f-values for each effect if the number of effects
requested > 2, else float.
See Also
--------
f_oneway
f_mway_rm
Notes
-----
.. versionadded:: 0.10
"""
from scipy.stats import f
effect_picks, _ = _map_effects(len(factor_levels), effects)
f_threshold = []
for _, df1, df2 in _iter_contrasts(n_subjects, factor_levels,
effect_picks):
f_threshold.append(f(df1, df2).isf(pvalue))
return f_threshold if len(f_threshold) > 1 else f_threshold[0]
def f_mway_rm(data, factor_levels, effects='all', alpha=0.05,
correction=False, return_pvals=True):
"""Compute M-way repeated measures ANOVA for fully balanced designs.
Parameters
----------
data : ndarray
3D array where the first two dimensions are compliant
with a subjects X conditions scheme where the first
factor repeats slowest::
A1B1 A1B2 A2B1 A2B2
subject 1 1.34 2.53 0.97 1.74
subject ... .... .... .... ....
subject k 2.45 7.90 3.09 4.76
The last dimensions is thought to carry the observations
for mass univariate analysis.
factor_levels : list-like
The number of levels per factor.
effects : str | list
A string denoting the effect to be returned. The following
mapping is currently supported (example with 2 factors):
* ``'A'``: main effect of A
* ``'B'``: main effect of B
* ``'A:B'``: interaction effect
* ``'A+B'``: both main effects
* ``'A*B'``: all three effects
* ``'all'``: all effects (equals 'A*B' in a 2 way design)
If list, effect names are used: ``['A', 'B', 'A:B']``.
alpha : float
The significance threshold.
correction : bool
The correction method to be employed if one factor has more than two
levels. If True, sphericity correction using the Greenhouse-Geisser
method will be applied.
return_pvals : bool
If True, return p values corresponding to f values.
Returns
-------
f_vals : ndarray
An array of f values with length corresponding to the number
of effects estimated. The shape depends on the number of effects
estimated.
p_vals : ndarray
If not requested via return_pvals, defaults to an empty array.
See Also
--------
f_oneway
f_threshold_mway_rm
Notes
-----
.. versionadded:: 0.10
"""
from scipy.stats import f
if data.ndim == 2: # general purpose support, e.g. behavioural data
data = data[:, :, np.newaxis]
elif data.ndim > 3: # let's allow for some magic here.
data = data.reshape(
data.shape[0], data.shape[1], np.prod(data.shape[2:]))
effect_picks, _ = _map_effects(len(factor_levels), effects)
n_obs = data.shape[2]
n_replications = data.shape[0]
# put last axis in front to 'iterate' over mass univariate instances.
data = np.rollaxis(data, 2)
fvalues, pvalues = [], []
for c_, df1, df2 in _iter_contrasts(n_replications, factor_levels,
effect_picks):
y = np.dot(data, c_)
b = np.mean(y, axis=1)[:, np.newaxis, :]
ss = np.sum(np.sum(y * b, axis=2), axis=1)
mse = (np.sum(np.sum(y * y, axis=2), axis=1) - ss) / (df2 / df1)
fvals = ss / mse
fvalues.append(fvals)
if correction:
# sample covariances, leave off "/ (y.shape[1] - 1)" norm because
# it falls out.
v = np.array([np.dot(y_.T, y_) for y_ in y])
v = (np.array([np.trace(vv) for vv in v]) ** 2 /
(df1 * np.sum(np.sum(v * v, axis=2), axis=1)))
eps = v
df1, df2 = np.zeros(n_obs) + df1, np.zeros(n_obs) + df2
if correction:
# numerical imprecision can cause eps=0.99999999999999989
# even with a single category, so never let our degrees of
# freedom drop below 1.
df1, df2 = [np.maximum(d[None, :] * eps, 1.) for d in (df1, df2)]
if return_pvals:
pvals = f(df1, df2).sf(fvals)
else:
pvals = np.empty(0)
pvalues.append(pvals)
# handle single effect returns
return [np.squeeze(np.asarray(vv)) for vv in (fvalues, pvalues)]
|
{
"content_hash": "eac7bf4d20f8a9f90eb65ae512b52e1a",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 78,
"avg_line_length": 34.7289156626506,
"alnum_prop": 0.5853425845620122,
"repo_name": "nicproulx/mne-python",
"id": "831c8baa49f2a1c7b861e0ff7e04453734e88573",
"size": "11733",
"binary": false,
"copies": "3",
"ref": "refs/heads/placeholder",
"path": "mne/stats/parametric.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3723"
},
{
"name": "Python",
"bytes": "5866703"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from icontrol.exceptions import iControlUnexpectedHTTPError
def test_cluster_load(request, mgmt_root):
# Load will produce exception on non-cluster BIGIP.
# iControlUnexpectedHTTPError: 404 Unexpected Error: Not Found for uri:
try:
assert str(mgmt_root.tm.sys.cluster.default.load().kind) == 'tm:sys:cluster:clusterstate'
except iControlUnexpectedHTTPError as err:
assert ('01020036:3: The requested cluster (default) was not found.' in str(err))
def test_cluster_stats_load(request, mgmt_root):
# Load will give the result even on non-cluster BIGIP. However, the payload will be almost empty
assert str(mgmt_root.tm.sys.cluster.stats.load().kind) == 'tm:sys:cluster:clustercollectionstats'
def test_cluster_default_stats_load(request, mgmt_root):
# Load will produce exception on non-cluster BIGIP.
# iControlUnexpectedHTTPError: 404 Unexpected Error: Not Found for uri:
try:
assert str(mgmt_root.tm.sys.cluster.default.stats.load().kind) == 'tm:sys:cluster:clusterstats'
except iControlUnexpectedHTTPError as err:
assert ('01020036:3: The requested cluster (default) was not found.' in str(err))
|
{
"content_hash": "0e67f9b5f0e197bdd8d56c1e0948f12b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 103,
"avg_line_length": 49,
"alnum_prop": 0.7363945578231292,
"repo_name": "F5Networks/f5-common-python",
"id": "5ade5b4208a834fd4ba8058803bef7fe6e94ddca",
"size": "1758",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "f5/bigip/tm/sys/test/functional/test_cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "713"
},
{
"name": "Groovy",
"bytes": "4321"
},
{
"name": "Python",
"bytes": "2705690"
},
{
"name": "Shell",
"bytes": "6398"
}
],
"symlink_target": ""
}
|
"""Regenerates all the .isolated test data files.
Keep in sync with ../run_isolated_smoke_test.py.
"""
import glob
import hashlib
import json
import os
import sys
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# Ordering is important to keep this script simple.
INCLUDES_TO_FIX = [
('manifest2.isolated', ['manifest1.isolated']),
('check_files.isolated', ['gtest_fake.isolated', 'manifest2.isolated']),
]
def sha1(filename):
with open(filename, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def load(filename):
with open(filename, 'r') as f:
return json.load(f)
def save(filename, data):
"""Saves data as json properly formatted.
Strips spurious whitespace json.dump likes to add at the end of lines and add
a trailing \n.
"""
out = ''.join(
'%s\n' % l.rstrip()
for l in json.dumps(data, indent=2, sort_keys=True).splitlines())
with open(filename, 'wb') as f:
f.write(out)
def main():
# Simplify our life.
os.chdir(ROOT_DIR)
# First, reformat all the files.
for filename in glob.glob('*.isolated'):
save(filename, load(filename))
# Then update the SHA-1s.
for manifest, includes in INCLUDES_TO_FIX:
data = load(manifest)
data['includes'] = [sha1(f) for f in includes]
save(manifest, data)
return 0
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "43dacc62ce1af6fc73cf0184b8167a87",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 79,
"avg_line_length": 21.85483870967742,
"alnum_prop": 0.6583025830258302,
"repo_name": "espadrine/opera",
"id": "bfe3da3b660b248a2594de3f6e1427b7658a9452",
"size": "1544",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chromium/src/tools/swarm_client/tests/run_isolated/regen_test_data.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
'''
IMPORTS
'''
from datetime import timedelta, datetime
import requests
import os
import re
import copy
import json
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
'''
GLOBAL VARS
'''
API_KEY = demisto.params().get('api_key')
BASE_PATH = '{}/api/v1'.format(demisto.params().get('server'))
HTTP_HEADERS = {
'Content-Type': 'application/json'
}
USE_SSL = not demisto.params().get('unsecure')
MESSAGE_STATUS = demisto.params().get('message_status')
'''
SEARCH ATTRIBUTES VALID VALUES
'''
REJECTION_REASONS = ['ETP102', 'ETP103', 'ETP104', 'ETP200', 'ETP201', 'ETP203', 'ETP204', 'ETP205',
'ETP300', 'ETP301', 'ETP302', 'ETP401', 'ETP402', 'ETP403', 'ETP404', 'ETP405']
STATUS_VALUES = ["accepted", "deleted", "delivered", "delivered (retroactive)", "dropped",
"dropped oob", "dropped (oob retroactive)", "permanent failure", "processing",
"quarantined", "rejected", "temporary failure"]
'''
BASIC FUNCTIONS
'''
def set_proxies():
if not demisto.params().get('proxy', False):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
def listify(comma_separated_list):
if isinstance(comma_separated_list, list):
return comma_separated_list
return comma_separated_list.split(',')
def http_request(method, url, body=None, headers={}, url_params=None):
'''
returns the http response
'''
# add API key to headers
headers['x-fireeye-api-key'] = API_KEY
request_kwargs = {
'headers': headers,
'verify': USE_SSL
}
# add optional arguments if specified
if body is not None:
request_kwargs['data'] = json.dumps(body)
if url_params is not None:
request_kwargs['params'] = json.dumps(url_params)
LOG('attempting {} request sent to {} with body:\n{}'.format(method, url, json.dumps(body, indent=4)))
response = requests.request(
method,
url,
**request_kwargs
)
# handle request failure
if response.status_code not in range(200, 205):
raise ValueError('Request failed with status code {}\n{}'.format(response.status_code, response.text))
return response.json()
def return_error_entry(message):
entry = {
'Type': entryTypes['error'],
'Contents': str(message),
'ContentsFormat': formats['text'],
}
demisto.results(entry)
def to_search_attribute_object(value, filter=None, is_list=False, valid_values=None):
values = listify(value) if is_list else value
if valid_values:
for val in values:
if val not in valid_values:
raise ValueError('{} is not a valid value'.format(val))
attribute = {
'value': values,
'includes': ['SMTP', 'HEADER']
}
if filter:
attribute['filter'] = filter
return attribute
def format_search_attributes(from_email=None, from_email_not_in=None, recipients=None,
recipients_not_in=None, subject=None, from_accepted_date_time=None,
to_accepted_date_time=None, rejection_reason=None, sender_ip=None, status=None,
status_not_in=None, last_modified_date_time=None, domains=None):
search_attributes = {} # type: Dict
# handle from_email attribute
if from_email and from_email_not_in:
raise ValueError('Only one of the followings can be specified: from_email, from_email_not_in')
if from_email:
search_attributes['fromEmail'] = to_search_attribute_object(from_email, filter='in', is_list=True)
elif from_email_not_in:
search_attributes['fromEmail'] = to_search_attribute_object(from_email_not_in, filter='not in', is_list=True)
# handle recipients attributes
if recipients and recipients_not_in:
raise ValueError('Only one of the followings can be specified: recipients, recipients_not_in')
if recipients:
search_attributes['recipients'] = to_search_attribute_object(recipients, filter='in', is_list=True)
elif recipients_not_in:
search_attributes['recipients'] = to_search_attribute_object(recipients_not_in, filter='not in', is_list=True)
# handle status attributes
if status and status_not_in:
raise ValueError('Only one of the followings can be specified: status, status_not_in')
if status:
search_attributes['status'] = to_search_attribute_object(status, filter='in', is_list=True,
valid_values=STATUS_VALUES)
elif status_not_in:
search_attributes['status'] = to_search_attribute_object(status, filter='in', is_list=True,
valid_values=STATUS_VALUES)
if subject:
search_attributes['subject'] = to_search_attribute_object(subject, filter='in', is_list=True)
if rejection_reason:
search_attributes['rejectionReason'] = to_search_attribute_object(rejection_reason, is_list=True,
valid_values=REJECTION_REASONS)
if sender_ip:
search_attributes['senderIP'] = to_search_attribute_object(sender_ip, filter='in', is_list=True)
if domains:
search_attributes['domains'] = to_search_attribute_object(domains, is_list=True)
if from_accepted_date_time and to_accepted_date_time:
search_attributes['period'] = {
'range': {
'fromAcceptedDateTime': from_accepted_date_time,
'toAcceptedDateTime': to_accepted_date_time
}
}
if last_modified_date_time:
# try to parse '>timestamp' | '>=timestamp' | '<timestamp' | '<=timestamp'
operator_ends_at = 0 if last_modified_date_time.find('=') == 1 else 1
search_attributes["lastModifiedDateTime"] = {
'value': last_modified_date_time[operator_ends_at:],
'filter': last_modified_date_time[:operator_ends_at]
}
return search_attributes
def readable_message_data(message):
return {
'Message ID': message['id'],
'Accepted Time': message['acceptedDateTime'],
'From': message['from'],
'Recipients': message.get('recipients'),
'Subject': message['subject'],
'Message Status': message['status']
}
def message_context_data(message):
context_data = copy.deepcopy(message)
# remove 'attributes' level
context_data.update(context_data.pop('attributes', {}))
# parse email sddresses
match = re.search('<(.*)>', context_data['senderHeader'].replace('\\"', ''))
context_data['from'] = match.group() if match else context_data['senderHeader']
if context_data.get('recipientHeader') is None:
context_data['recipients'] = []
return context_data
recipients = []
for recipient_header in context_data.get('recipientHeader', []):
match = re.search('<(.*)>', recipient_header)
recipient_address = match.group() if match else recipient_header
recipients.append(recipient_address)
context_data['recipients'] = ','.join(recipients)
return context_data
def search_messages_request(attributes={}, has_attachments=None, max_message_size=None):
url = '{}/messages/trace'.format(BASE_PATH)
body = {
'attributes': attributes,
'type': 'MessageAttributes',
'size': max_message_size or 20
}
if has_attachments is not None:
body['hasAttachments'] = has_attachments
response = http_request(
'POST',
url,
body=body,
headers=HTTP_HEADERS
)
# no results
if response['meta']['total'] == 0:
return []
return response['data']
def search_messages_command():
args = demisto.args()
if 'size' in args.keys():
# parse to int
args['size'] = int(args['size'])
if args.get('has_attachments') is not None:
# parse to boolean
args['hasAttachments'] = args['hasAttachments'] == 'true'
search_attributes = format_search_attributes(
from_email=args.get('from_email'),
from_email_not_in=args.get('from_email_not_in'),
recipients=args.get('recipients'),
recipients_not_in=args.get('recipients_not_in'),
subject=args.get('subject'),
from_accepted_date_time=args.get('from_accepted_date_time'),
to_accepted_date_time=args.get('to_accepted_date_time'),
rejection_reason=args.get('rejection_reason'),
sender_ip=args.get('sender_ip'),
status=args.get('status'),
status_not_in=args.get('status_not_in'),
last_modified_date_time=args.get('last_modified_date_time'),
domains=args.get('domains')
)
# raw data
messages_raw = search_messages_request(search_attributes, args.get('hasAttachments'), args.get('size'))
# create context data
messages_context = [message_context_data(message) for message in messages_raw]
# create readable data
messages_readable_data = [readable_message_data(message) for message in messages_context]
messages_md_headers = [
'Message ID',
'Accepted Time',
'From',
'Recipients',
'Subject',
'Message Status'
]
md_table = tableToMarkdown(
'FireEye ETP - Search Messages',
messages_readable_data,
headers=messages_md_headers
)
entry = {
'Type': entryTypes['note'],
'Contents': messages_raw,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md_table,
'EntryContext': {
"FireEyeETP.Messages(obj.id==val.id)": messages_context
}
}
demisto.results(entry)
def get_message_request(message_id):
url = '{}/messages/{}'.format(BASE_PATH, message_id)
response = http_request(
'GET',
url
)
if response['meta']['total'] == 0:
return {}
return response['data'][0]
def get_message_command():
# get raw data
raw_message = get_message_request(demisto.args()['message_id'])
if raw_message:
# create context data
context_data = message_context_data(raw_message)
# create readable data
message_readable_data = readable_message_data(context_data)
messages_md_headers = [
'Message ID',
'Accepted Time',
'From',
'Recipients',
'Subject',
'Message Status'
]
md_table = tableToMarkdown(
'FireEye ETP - Get Message',
message_readable_data,
headers=messages_md_headers
)
entry = {
'Type': entryTypes['note'],
'Contents': raw_message,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md_table,
'EntryContext': {
"FireEyeETP.Messages(obj.id==val.id)": context_data
}
}
demisto.results(entry)
# no results
else:
entry = {
'Type': entryTypes['note'],
'Contents': {},
'ContentsFormat': formats['text'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '### FireEye ETP - Get Message \n no results'
}
demisto.results(entry)
def alert_readable_data_summery(alert):
return {
'Alert ID': alert.get('id'),
'Alert Timestamp': alert.get('alert').get('timestamp'),
'From': alert.get('email').get('headers').get('from'),
'Recipients': '{}|{}'.format(alert.get('email').get('headers').get('to'), alert.get('email').get('headers').get('cc')),
'Subject': alert.get('email').get('headers').get('subject'),
'MD5': alert.get('alert').get('malware_md5'),
'URL/Attachment': alert.get('email').get('attachment'),
'Email Status': alert.get('email').get('status'),
'Email Accepted': alert.get('email').get('timestamp').get('accepted'),
'Threat Intel': alert.get('ati')
}
def alert_readable_data(alert):
return {
'Alert ID': alert.get('id'),
'Alert Timestamp': alert.get('alert').get('timestamp'),
'From': alert.get('email').get('headers').get('from'),
'Recipients': '{}|{}'.format(alert.get('email').get('headers').get('to'), alert.get('email').get('headers').get('cc')),
'Subject': alert.get('email').get('headers').get('subject'),
'MD5': alert.get('alert').get('malware_md5'),
'URL/Attachment': alert.get('email').get('attachment'),
'Email Status': alert.get('email').get('status'),
'Email Accepted': alert.get('email').get('timestamp').get('accepted'),
'Sevirity': alert.get('alert').get('severity')
}
def malware_readable_data(malware):
return {
'Name': malware.get('name'),
'Domain': malware.get('domain'),
'Downloaded At': malware.get('downloaded_at'),
'Executed At': malware.get('executed_at'),
'Type': malware.get('stype'),
'Submitted At': malware.get('submitted_at'),
'SID': malware.get('sid')
}
def alert_context_data(alert):
context_data = copy.deepcopy(alert)
# remove 'attributes' level
context_data.update(context_data.pop('attributes', {}))
return context_data
def get_alerts_request(legacy_id=None, from_last_modified_on=None, etp_message_id=None, size=None, raw_response=False):
url = '{}/alerts'.format(BASE_PATH)
# constract the body for the request
body = {}
attributes = {}
if legacy_id:
attributes['legacy_id'] = legacy_id
if etp_message_id:
attributes['etp_message_id'] = etp_message_id
if attributes:
body['attribute'] = attributes
if size:
body['size'] = size
if from_last_modified_on:
body['fromLastModifiedOn'] = from_last_modified_on
response = http_request(
'POST',
url,
body=body,
headers=HTTP_HEADERS
)
if raw_response:
return response
if response['meta']['total'] == 0:
return []
return response['data']
def get_alerts_command():
args = demisto.args()
if 'size' in args.keys():
args['size'] = int(args['size'])
if 'legacy_id' in args.keys():
args['legacy_id'] = int(args['legacy_id'])
# get raw data
alerts_raw = get_alerts_request(
legacy_id=args.get('legacy_id'),
from_last_modified_on=args.get('from_last_modified_on'),
etp_message_id=args.get('etp_message_id'),
size=args.get('size')
)
# create context data
alerts_context = [alert_context_data(alert) for alert in alerts_raw]
# create readable data
alerts_readable_data = [alert_readable_data_summery(alert) for alert in alerts_context]
alerts_summery_headers = [
'Alert ID',
'Alert Timestamp',
'Email Accepted',
'From',
'Recipients',
'Subject',
'MD5',
'URL/Attachment',
'Email Status',
'Threat Intel'
]
md_table = tableToMarkdown(
'FireEye ETP - Get Alerts',
alerts_readable_data,
headers=alerts_summery_headers
)
entry = {
'Type': entryTypes['note'],
'Contents': alerts_raw,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md_table,
'EntryContext': {
"FireEyeETP.Alerts(obj.id==val.id)": alerts_context
}
}
demisto.results(entry)
def get_alert_request(alert_id):
url = '{}/alerts/{}'.format(BASE_PATH, alert_id)
response = http_request(
'GET',
url
)
if response['meta']['total'] == 0:
return {}
return response['data'][0]
def get_alert_command():
# get raw data
alert_raw = get_alert_request(demisto.args()['alert_id'])
if alert_raw:
# create context data
alert_context = alert_context_data(alert_raw)
# create readable data
readable_data = alert_readable_data(alert_context)
alert_md_table = tableToMarkdown(
'Alert Details',
readable_data
)
data = alert_context['alert']['explanation']['malware_detected']['malware']
malware_data = [malware_readable_data(malware) for malware in data]
malware_md_table = tableToMarkdown(
'Malware Details',
malware_data
)
entry = {
'Type': entryTypes['note'],
'Contents': alert_raw,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '## FireEye ETP - Get Alert\n{}\n{}'.format(alert_md_table, malware_md_table),
'EntryContext': {
"FireEyeETP.Alerts(obj.id==val.id)": alert_context
}
}
demisto.results(entry)
# no results
else:
entry = {
'Type': entryTypes['note'],
'Contents': {},
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': '### FireEye ETP - Get Alert\nno results',
}
demisto.results(entry)
def parse_string_in_iso_format_to_datetime(iso_format_string):
alert_last_modified = None
try:
alert_last_modified = datetime.strptime(iso_format_string, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
try:
alert_last_modified = datetime.strptime(iso_format_string, "%Y-%m-%dT%H:%M:%S")
except ValueError:
alert_last_modified = datetime.strptime(iso_format_string, "%Y-%m-%dT%H:%M")
return alert_last_modified
def parse_alert_to_incident(alert):
context_data = alert_context_data(alert)
incident = {
'name': context_data['email']['headers']['subject'],
'rawJSON': json.dumps(context_data)
}
return incident
def fetch_incidents():
last_run = demisto.getLastRun()
week_ago = datetime.now() - timedelta(days=7)
iso_format = "%Y-%m-%dT%H:%M:%S.%f"
if 'last_modified' not in last_run.keys():
# parse datetime to iso format string yyy-mm-ddThh:mm:ss.fff
last_run['last_modified'] = week_ago.strftime(iso_format)[:-3]
if 'last_created' not in last_run.keys():
last_run['last_created'] = week_ago.strftime(iso_format)
alerts_raw_response = get_alerts_request(
from_last_modified_on=last_run['last_modified'],
size=100,
raw_response=True
)
# end if no results returned
if not alerts_raw_response or not alerts_raw_response.get('data'):
demisto.incidents([])
return
alerts = alerts_raw_response.get('data', [])
last_alert_created = parse_string_in_iso_format_to_datetime(last_run['last_created'])
alert_creation_limit = parse_string_in_iso_format_to_datetime(last_run['last_created'])
incidents = []
for alert in alerts:
# filter by message status if specified
if MESSAGE_STATUS and alert['attributes']['email']['status'] != MESSAGE_STATUS:
continue
# filter alerts created before 'last_created'
current_alert_created = parse_string_in_iso_format_to_datetime(alert['attributes']['alert']['timestamp'])
if current_alert_created < alert_creation_limit:
continue
# append alert to incident
incidents.append(parse_alert_to_incident(alert))
# set last created
if current_alert_created > last_alert_created:
last_alert_created = current_alert_created
last_run['last_modified'] = alerts_raw_response['meta']['fromLastModifiedOn']['end']
last_run['last_created'] = last_alert_created.strftime(iso_format)
demisto.incidents(incidents)
demisto.setLastRun(last_run)
'''
EXECUTION
'''
def main():
set_proxies()
try:
if demisto.command() == 'test-module':
get_alerts_request(size=1)
# request was succesful
demisto.results('ok')
if demisto.command() == 'fetch-incidents':
fetch_incidents()
if demisto.command() == 'fireeye-etp-search-messages':
search_messages_command()
if demisto.command() == 'fireeye-etp-get-message':
get_message_command()
if demisto.command() == 'fireeye-etp-get-alerts':
get_alerts_command()
if demisto.command() == 'fireeye-etp-get-alert':
get_alert_command()
except ValueError as e:
LOG(e)
LOG.print_log()
return_error_entry(e)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
{
"content_hash": "89553c622ffce6ae305817fb9f719975",
"timestamp": "",
"source": "github",
"line_count": 634,
"max_line_length": 127,
"avg_line_length": 33.124605678233436,
"alnum_prop": 0.597495357363935,
"repo_name": "demisto/content",
"id": "0bdf2dd8c2c2fc942ee64a33418e94b887e4fe18",
"size": "21001",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/FireEyeETP/Integrations/FireEyeETP/FireEyeETP.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
import os
from unittest.mock import MagicMock
import cauldron
from cauldron.environ.response import Response
from cauldron.test import support
from cauldron.test.support import scaffolds
from cauldron.test.support.messages import Message
from cauldron.cli.commands.steps import actions as step_actions
class TestStepActions(scaffolds.ResultsTest):
"""..."""
def test_index_from_location_float(self):
"""Should convert float index to integer."""
result = step_actions.index_from_location(None, None, 12.2)
self.assertEqual(result, 12)
self.assertIsInstance(result, int)
def test_index_from_location_default(self):
"""Should return default if unable to parse location."""
result = step_actions.index_from_location(None, None, None, 42)
self.assertEqual(result, 42)
def test_index_from_location_default_final(self):
"""Should return default if unable to parse location."""
result = step_actions.index_from_location(None, None, self, 42)
self.assertEqual(result, 42)
def test_index_from_location_bad_string(self):
"""Should return default value if bad string is supplied."""
support.create_project(self, 'ray')
project = cauldron.project.get_internal_project()
result = step_actions.index_from_location(None, project, '12s', 42)
self.assertEqual(result, 42)
def test_index_from_location_step_name(self):
"""Should return index from step name if supplied."""
support.create_project(self, 'bradbury')
support.add_step(self)
project = cauldron.project.get_internal_project()
step = project.steps[0]
result = step_actions.index_from_location(None, project, step.filename)
self.assertEqual(result, 1)
def test_mute_no_such_step(self):
"""Should fail to mute a step that does not exist."""
support.create_project(self, 'lewis')
project = cauldron.project.get_internal_project()
r = Response()
step_actions.toggle_muting(r, project, 'not-a-step')
self.assertTrue(r.failed)
self.assertEqual(r.errors[0].code, 'NO_SUCH_STEP')
def test_toggle_muting(self):
"""Should reverse the muted state of the step."""
support.create_project(self, 'carrol')
support.add_step(self)
project = cauldron.project.get_internal_project()
step = project.steps[0]
self.assertFalse(step.is_muted)
r = Response()
step_actions.toggle_muting(r, project, step.filename)
self.assertTrue(step.is_muted)
r = Response()
step_actions.toggle_muting(r, project, step.filename)
self.assertFalse(step.is_muted)
def test_echo_steps_empty():
"""Should successfully echo no steps."""
project = MagicMock()
project.steps = []
response = Response()
step_actions.echo_steps(response, project)
assert support.has_success_code(response, 'ECHO_STEPS')
def test_clean_steps():
"""Should clean all steps in project."""
should_clean_step = MagicMock()
should_clean_step.is_dirty.return_value = True
should_clean_step.last_modified = 123
should_ignore_step = MagicMock()
should_ignore_step.is_dirty.return_value = True
should_ignore_step.last_modified = 0
project = MagicMock()
project.steps = [should_clean_step, should_ignore_step]
response = step_actions.clean_steps(Response(), project)
assert support.has_success_code(response, 'MARKED_CLEAN')
assert should_clean_step.mark_dirty.called, """
Expect the step that should be cleaned to be cleaned.
"""
assert not should_ignore_step.mark_dirty.called, """
Expect the step that should be ignored to be skipped.
"""
|
{
"content_hash": "926150abe9faf7eae167f0420efff17e",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 33.07826086956522,
"alnum_prop": 0.667192429022082,
"repo_name": "sernst/cauldron",
"id": "d776b766764b50ed5f5a5ad47e6137097d2f2b95",
"size": "3804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cauldron/test/cli/commands/test_step_actions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "36"
},
{
"name": "CSS",
"bytes": "1369"
},
{
"name": "Dockerfile",
"bytes": "842"
},
{
"name": "HTML",
"bytes": "21740"
},
{
"name": "JavaScript",
"bytes": "48753"
},
{
"name": "Python",
"bytes": "913057"
},
{
"name": "SCSS",
"bytes": "17130"
},
{
"name": "Shell",
"bytes": "300"
},
{
"name": "Vue",
"bytes": "95790"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('register', '0014_auto_20160225_2045'),
]
operations = [
migrations.AlterField(
model_name='detailapross',
name='work_done',
field=models.TextField(blank=True, max_length=250, null=True),
),
]
|
{
"content_hash": "583b9576fbb66b9d595155cda8272e16",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 22.88888888888889,
"alnum_prop": 0.6067961165048543,
"repo_name": "nanomolina/JP",
"id": "8dce5a3308ce661ad86dc423b8fc1177af90c6b4",
"size": "484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/odontology/register/migrations/0015_auto_20160225_2049.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189771"
},
{
"name": "HTML",
"bytes": "222882"
},
{
"name": "JavaScript",
"bytes": "42164"
},
{
"name": "Python",
"bytes": "191397"
}
],
"symlink_target": ""
}
|
"""
This module is for the miscellaneous GEOS routines, particularly the
ones that return the area, distance, and length.
"""
from ctypes import c_int, c_double, POINTER
from django.contrib.gis.geos.libgeos import lgeos, GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_dbl
### ctypes generator function ###
def dbl_from_geom(func, num_geom=1):
"""
Argument is a Geometry, return type is double that is passed
in by reference as the last argument.
"""
argtypes = [GEOM_PTR for i in xrange(num_geom)]
argtypes += [POINTER(c_double)]
func.argtypes = argtypes
func.restype = c_int # Status code returned
func.errcheck = check_dbl
return func
### ctypes prototypes ###
# Area, distance, and length prototypes.
geos_area = dbl_from_geom(lgeos.GEOSArea)
geos_distance = dbl_from_geom(lgeos.GEOSDistance, num_geom=2)
geos_length = dbl_from_geom(lgeos.GEOSLength)
|
{
"content_hash": "d5399891883015086d2bd21a78586137",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 69,
"avg_line_length": 35.18518518518518,
"alnum_prop": 0.7,
"repo_name": "greggian/TapdIn",
"id": "b6df25ddd649838fdc0fc42c7b034f61ab523f34",
"size": "950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/gis/geos/prototypes/misc.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "82525"
},
{
"name": "Python",
"bytes": "3585862"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import sys
from django import forms
from django.apps.registry import Apps
from django.core.checks import Error
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.fields import NOT_PROVIDED
from django.test import SimpleTestCase
from colorful.fields import RGBColorField
from colorful.forms import RGB_REGEX
from colorful.widgets import ColorFieldWidget
try:
from unittest.mock import patch
except ImportError: # py < 3.3
from mock import patch
class TestRBGColorField(SimpleTestCase):
def setUp(self):
self.field = RGBColorField('verbose_name', default='#123445')
self.field_with_colors = RGBColorField('verbose_name', colors=['#123445', '#000'])
def test_validate_fails(self):
self.assertRaises(ValidationError, self.field.clean, '', None)
self.assertRaises(ValidationError, self.field.clean, '12', None)
self.assertRaises(ValidationError, self.field.clean, 'GGGGGG', None)
self.assertRaises(ValidationError, self.field.clean, '#GGGGGG', None)
self.assertRaises(ValidationError, self.field.clean, 'GGG', None)
self.assertRaises(ValidationError, self.field.clean, '#GGG', None)
self.assertRaises(ValidationError, self.field.clean, '#1234567', None)
self.assertRaisesMessage(
ValidationError,
'Ensure this value has at most 7 characters (it has 8).',
self.field.clean, '#1234567', None
)
def test_validate_passes(self):
self.assertEqual('#123445', self.field.clean('#123445', None))
self.assertEqual('#123', self.field.clean('#123', None))
self.assertEqual('#ABCDEF', self.field.clean('#ABCDEF', None))
self.assertEqual('ABCDEF', self.field.clean('ABCDEF', None))
self.assertEqual('123', self.field.clean('123', None))
self.assertEqual('ABC', self.field.clean('ABC', None))
def test_deconstruct(self):
name, path, args, kwargs = self.field.deconstruct()
self.assertIsNone(name)
module, cls = path.rsplit('.', 1)
field_class = getattr(sys.modules[module], cls)
field_instance = field_class(*args, **kwargs)
self.assertIsInstance(field_instance, self.field.__class__)
self.assertEqual(field_instance.verbose_name, self.field.verbose_name)
self.assertEqual(field_instance.default, self.field.default)
self.assertIsNone(field_instance.colors)
def test_deconstruct_with_colors(self):
name, path, args, kwargs = self.field_with_colors.deconstruct()
self.assertIsNone(name)
module, cls = path.rsplit('.', 1)
field_class = getattr(sys.modules[module], cls)
field_instance = field_class(*args, **kwargs)
self.assertIsInstance(field_instance, self.field_with_colors.__class__)
self.assertEqual(field_instance.verbose_name, self.field.verbose_name)
self.assertEqual(field_instance.default, NOT_PROVIDED)
self.assertEqual(field_instance.colors, field_instance.colors)
def test_formfield(self):
formfield = self.field.formfield()
self.assertIsInstance(formfield, forms.RegexField)
self.assertIsInstance(formfield.widget, ColorFieldWidget)
self.assertEqual(formfield.regex, RGB_REGEX)
@patch('django.db.models.CharField.check')
def test_check(self, charfield_check):
test_apps = Apps()
# do not test django's charfield checks
charfield_check.side_effect = list
# fine fields from setUp
self.assertEqual(self.field.check(), [])
self.assertEqual(self.field_with_colors.check(), [])
# check type error
class ColorsTypeSystemCheckTestModel(models.Model):
color = RGBColorField(colors='#333,#ff00FF')
class Meta:
apps = test_apps
app_label = 'colorful'
self.assertEqual(ColorsTypeSystemCheckTestModel.check(), [
Error(
'colors is not iterable',
hint='Define the colors param as list of strings.',
obj=ColorsTypeSystemCheckTestModel._meta.get_field('color'),
id='colorful.E001'
)
])
# check item error
class ColorsItemSystemCheckTestModel(models.Model):
color = RGBColorField(colors=['#'])
class Meta:
apps = test_apps
app_label = 'colorful'
self.assertEqual(ColorsItemSystemCheckTestModel.check(), [
Error(
'colors item validation error',
hint='Each item of the colors param must be a valid color '
'string itself.',
obj=ColorsItemSystemCheckTestModel._meta.get_field('color'),
id='colorful.E002'
)
])
class TestColorFieldWidget(SimpleTestCase):
def test_render_with_id(self):
widget = ColorFieldWidget()
self.assertIn('<input id="id_color" name="test" type="color" value="#123456" />',
widget.render('test', '#123456', {'id': 'id_color'}))
self.assertIn('''<script type="text/javascript">
(function($){
$(document).ready(function(){
$('#id_color').each(function(i, elm){
// Make sure html5 color element is not replaced
if (elm.type != 'color') $(elm).colorPicker({});
});
});
})('django' in window && django.jQuery ? django.jQuery: jQuery);
</script>
''', widget.render('test', '#123456', {'id': 'id_color'}))
def test_render_no_id(self):
widget = ColorFieldWidget()
self.assertIn('<input id="id_test" name="test" type="color" value="#123456" />',
widget.render('test', '#123456'))
self.assertIn('''<script type="text/javascript">
(function($){
$(document).ready(function(){
$('#id_test').each(function(i, elm){
// Make sure html5 color element is not replaced
if (elm.type != 'color') $(elm).colorPicker({});
});
});
})('django' in window && django.jQuery ? django.jQuery: jQuery);
</script>
''', widget.render('test', '#123456'))
def test_render_with_colors(self):
widget = ColorFieldWidget(colors=['#ffffff', '#223344', '#557799'])
self.assertIn('<input id="id_test" list="datalist_for_id_test" name="test" type="color" value="#123456" />',
widget.render('test', '#123456'))
self.assertIn('''<script type="text/javascript">
(function($){
$(document).ready(function(){
$('#id_test').each(function(i, elm){
// Make sure html5 color element is not replaced
if (elm.type != 'color') $(elm).colorPicker({"colors": ["ffffff", "223344", "557799"]});
});
});
})('django' in window && django.jQuery ? django.jQuery: jQuery);
</script>
''', widget.render('test', '#123456')) # NOQA
|
{
"content_hash": "6e4f2123ebb286bda8e645468b1d1ae1",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 120,
"avg_line_length": 43.664739884393065,
"alnum_prop": 0.5741329097167064,
"repo_name": "Vitagene1/django-colorful",
"id": "776c9c725ee6d915b9e598bb83d9fe5c5f7fd0a8",
"size": "7554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "982"
},
{
"name": "JavaScript",
"bytes": "12399"
},
{
"name": "Python",
"bytes": "14072"
}
],
"symlink_target": ""
}
|
if __name__ == '__main__':
import argparse
import os
import time
parser = argparse.ArgumentParser(description='Launches a Java JBosen application.')
parser.add_argument('host_file', type=str, help='Path to the host file to use.')
parser.add_argument('class_path', type=str, help='Java classpath that contains the application.')
parser.add_argument('main_class', type=str, help='Fully qualified class name that contains the main method.')
parser.add_argument('--num_local_worker_threads', type=int, default=1, help='Number of application worker threads per client.')
parser.add_argument('--num_local_comm_channels', type=int, default=1, help='Number of network channels per client.')
parser.add_argument('--java_args', type=str, default='', help='Extra arguments to pass to Java.')
parser.add_argument('--app_args', type=str, default='', help='Extra arguments to pass to the application.')
args = parser.parse_args()
with open(args.host_file, 'r') as f:
host_ips = [line.split(':')[0] for line in f]
def kill(client_id, ip):
cmd = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' + ip + ' '
cmd += '\'pkill -f "^java .*' + args.main_class + '"\''
print(cmd)
os.system(cmd)
def launch(client_id, ip):
cmd = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' + ip + ' '
cmd += '"cd ' + os.getcwd() + '; '
cmd += 'java ' + args.java_args + ' '
cmd += '-cp ' + args.class_path + ' '
cmd += args.main_class + ' '
cmd += '-clientId %d' % client_id + ' '
cmd += '-hostFile %s' % args.host_file + ' '
cmd += '-numLocalWorkerThreads %d' % args.num_local_worker_threads + ' '
cmd += '-numLocalCommChannels %d' % args.num_local_comm_channels + ' '
cmd += args.app_args + '" &'
print(cmd)
os.system(cmd)
print("Killing previous instances of the application...")
for client_id, ip in enumerate(host_ips):
kill(client_id, ip)
print("Starting new instances of the application...")
for client_id, ip in enumerate(host_ips):
launch(client_id, ip)
|
{
"content_hash": "4914d0a1f0cf4ee6528335c661c261ed",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 129,
"avg_line_length": 45.391304347826086,
"alnum_prop": 0.6460727969348659,
"repo_name": "petuum/jbosen",
"id": "1be42f8153cff19ada61ddc6403af220fdcbb06f",
"size": "2111",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/jbosen_run.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "396591"
},
{
"name": "Python",
"bytes": "8969"
},
{
"name": "Shell",
"bytes": "211"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import os
from shutil import copytree
from os.path import dirname, exists, join
import re
from subprocess import check_call, Popen, PIPE
from datetime import datetime
from distutils.core import Command
from citools.build import ReplaceTemplateFiles, RenameTemplateFiles
from citools.debian.control import ControlFile, Dependency
from citools.git import fetch_repository
from citools.version import get_git_describe, compute_version, compute_meta_version, get_git_head_hash, retrieve_current_branch
__all__ = (
"BuildDebianPackage", "UpdateDebianVersion",
"CreateDebianPackage", "CreateDebianMetaPackage",
"CreateDebianization", "UpdateDependencyVersions",
)
def return_true(*args, **kwargs):
return True
class BuildDebianPackage(Command):
""" After debianization is in place, build a package for it """
description = "run debian build wrapper dpkg-buildpackage"
user_options = [
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
check_call(['dpkg-buildpackage', '-rfakeroot-tcp', '-us', '-uc'])
def get_new_dependencies(dir, accepted_tag_pattern=None, branch="master"):
version = compute_version(get_git_describe(repository_directory=dir, fix_environment=True, accepted_tag_pattern=accepted_tag_pattern))
control = os.path.join(dir, 'debian', 'control')
version = ".".join(map(str, version))
### FIXME: We shall not do this again AND should only use templates
from citools.build import replace_template_files
replace_template_files(root_directory=dir, variables={
'branch' : branch,
'version' : version,
})
cfile = ControlFile(filename=control)
packages = cfile.get_packages()
for p in packages:
p.version = version
return packages
def fetch_new_dependencies(repository, workdir=None):
if repository.has_key('branch'):
branch = repository['branch']
else:
if workdir:
branch = retrieve_current_branch(repository_directory=workdir, fix_environment=True)
else:
branch = retrieve_current_branch()
repo = fetch_repository(
repository=repository['url'], branch=branch
)
#FIXME: This should not be hardcoded
project_pattern = "%s-[0-9]*" % repository['package_name']
deps = get_new_dependencies(repo, accepted_tag_pattern=project_pattern, branch=branch)
return deps
def replace_versioned_packages(control_path, version, workdir=None):
workdir = workdir or os.curdir
cfile = ControlFile(filename=control_path)
cfile.replace_versioned_packages(version)
cfile.dump(control_path)
def replace_versioned_debian_files(debian_path, original_version, new_version, control_file):
versioned_deps = control_file.get_versioned_dependencies()
for path, dirs, files in os.walk(debian_path):
for file in files:
for dep in versioned_deps:
s = "%s-%s" % (dep.name, original_version)
if file.startswith(s):
f = open(os.path.join(path, file))
content = f.read()
f.close()
new_name = "%s-%s%s" % (dep.name, new_version, file[len(s):])
new_content = re.sub(original_version, new_version, content)
f = open(os.path.join(path, new_name), 'w')
f.write(new_content)
f.close()
os.remove(os.path.join(path, file))
def update_dependency_versions(repositories, control_path, workdir=None, accepted_tag_pattern=None):
"""
Update control_path (presumably debian/control) with package version collected
by parsing debian/controls in dependencies.
Also updates with change of my path.
If any versioned dependencies are present, replace them too, as well as debian files
"""
workdir = workdir or os.curdir
cfile = ControlFile(filename=control_path)
deps_from_repositories = []
cfile_meta_version = '0.0.0.0'
for repository in repositories:
deps = fetch_new_dependencies(repository, workdir)
deps_from_repositories.extend(deps)
#FIXME: This will download deps again, fix it
meta_version = compute_meta_version(repositories, workdir=workdir, accepted_tag_pattern=accepted_tag_pattern)
meta_version_string = ".".join(map(str, meta_version))
# also add myself as dependency
deps = get_new_dependencies(workdir, accepted_tag_pattern=accepted_tag_pattern)
# deps are my actual version; we want to update it to metaversion
for dep in deps:
dep.version = meta_version_string
deps_from_repositories.extend(deps)
cfile.replace_dependencies(deps_from_repositories)
replace_versioned_debian_files(debian_path=dirname(control_path), original_version=cfile_meta_version, new_version=meta_version_string, control_file=cfile)
cfile.replace_versioned_packages(meta_version_string, old_version=cfile_meta_version)
cfile.dump(control_path)
class UpdateDependencyVersions(Command):
description = "parse and update versions in debian control file"
user_options = [
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
format = "%s-[0-9]*" % self.distribution.metadata.get_name()
update_dependency_versions(self.distribution.dependencies_git_repositories, os.path.join('debian', 'control'), accepted_tag_pattern=format)
except:
import traceback
traceback.print_exc()
raise
def update_debianization(version):
"""
Update Debian's changelog to current version and append "dummy" message.
"""
# we need to add string version in the whole method
if isinstance(version, (tuple, list)):
version = '.'.join(map(str, version))
changelog = 'debian/changelog'
hash = get_git_head_hash()
message = "Version %(version)s was build from revision %(hash)s by automated build system" % {
'version' : version,
'hash' : hash
}
proc = Popen(['dch', '--changelog', changelog, '--newversion', version, '"%s"' % message], stdout=PIPE)
return_code = proc.wait()
if return_code == 0:
return proc.stdout.read().strip()
else:
raise ValueError("Updating debianization failed with exit code %s" % return_code)
def get_packages_names():
control = os.path.join('debian', 'control')
if not os.path.exists(control):
raise ValueError("Cannot find debian/control")
packages = []
version_pattern = re.compile("^(Package\:){1}(\s)*(?P<name>[\w\-\.]+).*(\s)*$")
for line in open(control, 'r'):
match = re.match(version_pattern, line)
if match:
packages.append(match.groupdict()['name'])
return packages
def get_package_path(package_name, module_name, current_version=None):
""" Return filesystem path to debian package build by bdist_deb"""
if not current_version:
#FIXME: not to hardcode
format = "%s-[0-9]*" % module_name
current_version = '.'.join(map(str, compute_version(get_git_describe(accepted_tag_pattern=format))))
package_name = u"%(name)s_%(version)s_%(arch)s.deb" % {
'name' : package_name,
'version' : current_version,
'arch' : 'all'
}
return os.path.normpath(os.path.join(os.curdir, os.pardir, package_name))
class UpdateDebianVersion(Command):
description = "copy version string to debian changelog"
user_options = [
('build-number=', None, "Provide a buildnumber for auto-computed version"),
]
def initialize_options(self):
self.build_number = None
def finalize_options(self):
pass
def run(self):
""" Compute current version and update debian version accordingly """
version = self.distribution.get_version()
if self.build_number:
version = '%s-%s' % (version, self.build_number)
try:
update_debianization(version)
except Exception:
import traceback
traceback.print_exc()
raise
class CreateDebianPackage(Command):
description = "run what's needed to build debian package"
user_options = [
('build-number=', None, "Provide a buildnumber for auto-computed version"),
]
def initialize_options(self):
self.build_number = None
def finalize_options(self):
pass
def run(self):
for cmd_name in self.get_sub_commands():
sub_cmd = self.reinitialize_command(cmd_name)
sub_cmd.build_number = self.build_number
self.run_command(cmd_name)
sub_commands = [
("compute_version_git", None),
("replace_templates", None),
("rename_template_files", None),
("update_debian_version", None),
("bdist_deb", None),
]
class CreateDebianMetaPackage(Command):
description = "run what's needed to build debian meta package"
user_options = [
('build-number=', None, "Provide a buildnumber for auto-computed version"),
]
def initialize_options(self):
self.build_number = None
def finalize_options(self):
pass
def run(self):
for cmd_name in self.get_sub_commands():
sub_cmd = self.reinitialize_command(cmd_name)
sub_cmd.build_number = self.build_number
self.run_command(cmd_name)
sub_commands = [
("compute_version_meta_git", None),
("replace_templates", None),
("rename_template_files", None),
("update_debian_version", None),
("update_dependency_versions", None),
("copy_dependency_images", None),
("bdist_deb", None),
]
def parse_setuppy_dependency(package):
package = 'python-' + package.replace('_', '-')
if '=' in package:
i = package.index('=')
offset = 0
if package[package.rindex('=')-1] in ('<', '>', '='):
offset = 1
name, sign, version = package[:i-offset], package[i-offset:i+1], package[i+1:]
return Dependency(name, version, sign)
return Dependency(package)
def get_tzdiff(local, remote):
'''
little hack because of pretty bad time difference management in python
TODO: can this solve datetime module itself?
'''
delta_minute = (local.hour - remote.hour) * 60 + (local.minute - remote.minute)
sign = delta_minute < 0 and '-' or '+'
hour = abs(delta_minute/60)
minute = delta_minute%60
return '%s%02d%02d' % (sign, hour, minute)
def create_debianization(distribution):
if exists('debian'):
raise NotImplementedError()
# default values
name = distribution.get_name()
name = 'python-%s' % name.replace('_', '-').lower()
maintainer = distribution.get_maintainer()
maintainer_email = distribution.get_maintainer_email()
if maintainer == 'UNKNOWN':
maintainer = 'CH content team'
if maintainer_email == 'UNKNOWN':
maintainer_email = 'pg-content-dev@chconf.com'
maintainer = '%s <%s>' % (maintainer, maintainer_email)
version = distribution.get_version()
if not version:
version = '0.0.0'
# get current date in proper format
now = datetime.now()
utcnow = datetime.utcnow()
tzdiff = get_tzdiff(now, utcnow)
nowstring = '%s %s' % (now.strftime('%a, %d %b %Y %H:%M:%S'), tzdiff)
description = distribution.get_description()
description = description.strip().replace('\n', '\n ')
architecture = 'all'
if distribution.has_ext_modules():
architecture = 'any'
# replace all occurences in debian template dir
copytree(join(dirname(__file__), 'default_debianization'), 'debian')
# do the replacement in template dir
for root, dirs, files in os.walk('debian'):
for f in files:
file = join(root, f)
with open(file) as fin:
content = fin.read()
for key, value in (
('#NAME#', name),
('#MAINTAINER#', maintainer),
('#VERSION#', version),
('#DATE#', nowstring),
):
content = content.replace(key, value)
with open(file, 'w') as fout:
fout.write(content)
# update control file
cf = ControlFile(filename='debian/control')
src = cf.source
p = cf.packages[0]
src['Source'] = p['Package'] = name
src['Maintainer'] = maintainer
p['Description'] = description
p['Architecture'] = architecture
install_requires = distribution.install_requires
if install_requires:
for package in install_requires:
p['Depends'].append(parse_setuppy_dependency(package))
cf.dump('debian/control')
class CreateDebianization(Command):
description = "Create default debian directory containg everything needed to build a debian package."
user_options = [
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# TODO: build dependencies
create_debianization(self.distribution)
|
{
"content_hash": "cad6efa20f7c79f9ce2e8ecdb33a806c",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 159,
"avg_line_length": 31.735714285714284,
"alnum_prop": 0.6308050116287793,
"repo_name": "ella/citools",
"id": "11b45ef99515650142e2b50032e5b6e48db6914e",
"size": "13329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "citools/debian/commands.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "208730"
},
{
"name": "Shell",
"bytes": "3077"
}
],
"symlink_target": ""
}
|
import numpy as np
import os
import fabio
class edfmemmap:
"""Access edf data with memmaps (cannot handle certain things like compression)"""
def __init__(self, filename, mode="r"):
# f = fabio.edfimage.EdfImage(filename)
f = fabio.open(filename)
self.dtype = f.bytecode
self.shape = f.shape
self.ndim = len(self.shape)
offset = f._frames[f.currentframe].start
self.mdata = np.memmap(
filename, dtype=self.dtype, offset=offset, shape=self.shape, order="C"
)
if f.swap_needed():
self.mdata.byteswap(True)
@property
def data(self):
return self.mdata
def __getitem__(self, index):
return self.data[index]
class edfimage:
"""Access edf data with fabio"""
def __init__(self, filename, mode="r"):
try:
self.f = fabio.open(filename)
except Exception as e:
raise OSError("Fabio cannot open file " + repr(filename)) from e
self.ndim = 2
@property
def dtype(self):
return self.f.bytecode
@property
def shape(self):
return self.f.shape
@property
def data(self):
return self.f.data
def __getitem__(self, index):
return self.data[index]
@property
def header(self):
return self.f.header
def saveedf(filename, data, header, overwrite=False):
exists = os.path.exists(filename)
if exists:
if overwrite:
os.remove(filename)
else:
raise IOError("File exists (overwrite=False): {}".format(filename))
fabio.edfimage.EdfImage(data=data, header=header).write(filename)
|
{
"content_hash": "6e551305c48f51a32445ffe8c2612e3b",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 86,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.5955926146515783,
"repo_name": "woutdenolf/spectrocrunch",
"id": "a5188e59fefea7f9e3dba84aba5c5554ef350771",
"size": "1704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spectrocrunch/io/edf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "149"
},
{
"name": "PowerShell",
"bytes": "77398"
},
{
"name": "Python",
"bytes": "2175415"
},
{
"name": "Sage",
"bytes": "11773"
},
{
"name": "Shell",
"bytes": "108098"
}
],
"symlink_target": ""
}
|
from flask import request, url_for, g
from flask_peewee.rest import RestAPI, RestrictOwnerResource, Authentication, RestResource
from app import app
from auth import auth
from models import *
class IntApi(RestAPI):
@property
def registry(self):
return self._registry
class IntAuthentication(Authentication):
def __init__(self, auth, protected_methods=None):
super(IntAuthentication, self).__init__(protected_methods)
self.auth = auth
def authorize(self):
if request.method in self.protected_methods:
return self.auth.get_logged_in_user()
return True
class IntRestResource(RestResource):
paginate_by = 10
def get_request_metadata(self, paginated_query):
var = paginated_query.page_var
request_arguments = request.args.copy()
current_page = paginated_query.get_page()
next = previous = ''
if current_page > 1:
request_arguments[var] = current_page - 1
previous = url_for(self.get_url_name('api_list'), **request_arguments)
if current_page < paginated_query.get_pages():
request_arguments[var] = current_page + 1
next = url_for(self.get_url_name('api_list'), **request_arguments)
return {
'model': self.get_api_name(),
'page': current_page,
'pages': paginated_query.get_pages(),
'total': paginated_query.query.count(),
'previous': previous,
'next': next,
}
class IntOwnerResource(IntRestResource, RestrictOwnerResource):
owner_field = 'user'
def validate_owner(self, user, obj):
return user.admin or user == getattr(obj, self.owner_field)
class IntOnlyViewByOwnerResource(IntOwnerResource):
def restrict_get_query(self, user, query):
if not user.admin:
query = query.where(getattr(self.model, self.owner_field) == g.user)
return query
def process_query(self, query):
query = super(IntOwnerResource, self).process_query(query)
return self.restrict_get_query(g.user, query)
class UserResource(IntRestResource):
exclude = ('password',)
class QuizBookResource(IntOnlyViewByOwnerResource):
include_resources = {'user': UserResource}
class QuestionResource(IntRestResource):
pass
class ActivityResource(IntOnlyViewByOwnerResource):
include_resources = {'book': QuizBookResource, 'user': UserResource}
user_auth = IntAuthentication(auth)
api = IntApi(app, prefix='/api/v1', default_auth=user_auth, name='simple_api')
api.register(User, UserResource, auth=Authentication())
api.register(QuizBook, QuizBookResource)
api.register(Question, QuestionResource)
api.register(Activity, ActivityResource)
|
{
"content_hash": "75c6bf8cdf958201cc4b4ec9d926e3fd",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 90,
"avg_line_length": 29.361702127659573,
"alnum_prop": 0.6710144927536232,
"repo_name": "ak64th/IntQuiz",
"id": "6326c5ba3cc70c3a1ccb73b75a955372d6cf8715",
"size": "2775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28429"
},
{
"name": "HTML",
"bytes": "25489"
},
{
"name": "Python",
"bytes": "40124"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class NetworkInterfaceDnsSettings(Model):
"""DNS settings of a network interface.
:param dns_servers: List of DNS servers IP addresses. Use
'AzureProvidedDNS' to switch to azure provided DNS resolution.
'AzureProvidedDNS' value cannot be combined with other IPs, it must be the
only value in dnsServers collection.
:type dns_servers: list[str]
:param applied_dns_servers: If the VM that uses this NIC is part of an
Availability Set, then this list will have the union of all DNS servers
from all NICs that are part of the Availability Set. This property is what
is configured on each of those VMs.
:type applied_dns_servers: list[str]
:param internal_dns_name_label: Relative DNS name for this NIC used for
internal communications between VMs in the same virtual network.
:type internal_dns_name_label: str
:param internal_fqdn: Fully qualified DNS name supporting internal
communications between VMs in the same virtual network.
:type internal_fqdn: str
"""
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
'applied_dns_servers': {'key': 'appliedDnsServers', 'type': '[str]'},
'internal_dns_name_label': {'key': 'internalDnsNameLabel', 'type': 'str'},
'internal_fqdn': {'key': 'internalFqdn', 'type': 'str'},
}
def __init__(self, dns_servers=None, applied_dns_servers=None, internal_dns_name_label=None, internal_fqdn=None):
super(NetworkInterfaceDnsSettings, self).__init__()
self.dns_servers = dns_servers
self.applied_dns_servers = applied_dns_servers
self.internal_dns_name_label = internal_dns_name_label
self.internal_fqdn = internal_fqdn
|
{
"content_hash": "0497f1db5e58d59b69834900918d03f8",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 117,
"avg_line_length": 48.08108108108108,
"alnum_prop": 0.6908375491849353,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "dc60cbe8fba3c620ca37e8f44ebf1db5294dd21c",
"size": "2253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/network_interface_dns_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
import oeis
class TestChoose(TestCase):
def test_equal(self):
r = oeis.choose(9, 9)
self.assertEqual(r, 1)
|
{
"content_hash": "5fd62d5bc5662a1028cacb8dfd077bb9",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 30,
"avg_line_length": 19.875,
"alnum_prop": 0.660377358490566,
"repo_name": "GuySrinivasan/oeis",
"id": "23571de6496372c98dbc4f0ba2caaf88a0747445",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oeis/tests/test_utility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "456"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2015 SONATA-NFV and Paderborn University
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
"""
This module implements a simple REST API that behaves like SONATA's gatekeeper.
It is only used to support the development of SONATA's SDK tools and to demonstrate
the year 1 version of the emulator until the integration with WP4's orchestrator is done.
"""
import logging
import os
import uuid
import hashlib
import zipfile
import yaml
import threading
from docker import DockerClient, APIClient
from flask import Flask, request
import flask_restful as fr
from collections import defaultdict
import pkg_resources
from subprocess import Popen
from random import randint
import ipaddress
import copy
import time
logging.basicConfig()
LOG = logging.getLogger("sonata-dummy-gatekeeper")
LOG.setLevel(logging.DEBUG)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
GK_STORAGE = "/tmp/son-dummy-gk/"
UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
# Enable Dockerfile build functionality
BUILD_DOCKERFILE = False
# flag to indicate that we run without the emulator (only the bare API for integration testing)
GK_STANDALONE_MODE = False
# should a new version of an image be pulled even if its available
FORCE_PULL = False
# Automatically deploy SAPs (endpoints) of the service as new containers
# Attention: This is not a configuration switch but a global variable! Don't change its default value.
DEPLOY_SAP = False
# flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
BIDIRECTIONAL_CHAIN = False
# override the management interfaces in the descriptors with default docker0 interfaces in the containers
USE_DOCKER_MGMT = False
# automatically deploy uploaded packages (no need to execute son-access deploy --latest separately)
AUTO_DEPLOY = False
# and also automatically terminate any other running services
AUTO_DELETE = False
def generate_subnets(prefix, base, subnet_size=50, mask=24):
# Generate a list of ipaddress in subnets
r = list()
for net in range(base, base + subnet_size):
subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
r.append(ipaddress.ip_network(unicode(subnet)))
return r
# private subnet definitions for the generated interfaces
# 10.10.xxx.0/24
SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
# 10.20.xxx.0/30
ELAN_SUBNETS = generate_subnets('10.20', 0, subnet_size=50, mask=24)
# 10.30.xxx.0/30
ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
# path to the VNFD for the SAP VNF that is deployed as internal SAP point
SAP_VNFD=None
# Time in seconds to wait for vnf stop scripts to execute fully
VNF_STOP_WAIT_TIME = 5
class Gatekeeper(object):
def __init__(self):
self.services = dict()
self.dcs = dict()
self.net = None
self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
LOG.info("Create SONATA dummy gatekeeper.")
def register_service_package(self, service_uuid, service):
"""
register new service package
:param service_uuid
:param service object
"""
self.services[service_uuid] = service
# lets perform all steps needed to onboard the service
service.onboard()
def get_next_vnf_name(self):
self.vnf_counter += 1
return "vnf%d" % self.vnf_counter
class Service(object):
"""
This class represents a NS uploaded as a *.son package to the
dummy gatekeeper.
Can have multiple running instances of this service.
"""
def __init__(self,
service_uuid,
package_file_hash,
package_file_path):
self.uuid = service_uuid
self.package_file_hash = package_file_hash
self.package_file_path = package_file_path
self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
self.manifest = None
self.nsd = None
self.vnfds = dict()
self.saps = dict()
self.saps_ext = list()
self.saps_int = list()
self.local_docker_files = dict()
self.remote_docker_image_urls = dict()
self.instances = dict()
# dict to find the vnf_name for any vnf id
self.vnf_id2vnf_name = dict()
def onboard(self):
"""
Do all steps to prepare this service to be instantiated
:return:
"""
# 1. extract the contents of the package and store them in our catalog
self._unpack_service_package()
# 2. read in all descriptor files
self._load_package_descriptor()
self._load_nsd()
self._load_vnfd()
if DEPLOY_SAP:
self._load_saps()
# 3. prepare container images (e.g. download or build Dockerfile)
if BUILD_DOCKERFILE:
self._load_docker_files()
self._build_images_from_dockerfiles()
else:
self._load_docker_urls()
self._pull_predefined_dockerimages()
LOG.info("On-boarded service: %r" % self.manifest.get("name"))
def start_service(self):
"""
This methods creates and starts a new service instance.
It computes placements, iterates over all VNFDs, and starts
each VNFD as a Docker container in the data center selected
by the placement algorithm.
:return:
"""
LOG.info("Starting service %r" % self.uuid)
# 1. each service instance gets a new uuid to identify it
instance_uuid = str(uuid.uuid4())
# build a instances dict (a bit like a NSR :))
self.instances[instance_uuid] = dict()
self.instances[instance_uuid]["vnf_instances"] = list()
# 2. compute placement of this service instance (adds DC names to VNFDs)
if not GK_STANDALONE_MODE:
#self._calculate_placement(FirstDcPlacement)
self._calculate_placement(RoundRobinDcPlacementWithSAPs)
# 3. start all vnfds that we have in the service (except SAPs)
for vnf_id in self.vnfds:
vnfd = self.vnfds[vnf_id]
vnfi = None
if not GK_STANDALONE_MODE:
vnfi = self._start_vnfd(vnfd, vnf_id)
self.instances[instance_uuid]["vnf_instances"].append(vnfi)
# 4. start all SAPs in the service
for sap in self.saps:
self._start_sap(self.saps[sap], instance_uuid)
# 5. Deploy E-Line and E_LAN links
if "virtual_links" in self.nsd:
vlinks = self.nsd["virtual_links"]
# constituent virtual links are not checked
#fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
elan_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-LAN")]
GK.net.deployed_elines.extend(eline_fwd_links)
GK.net.deployed_elans.extend(elan_fwd_links)
# 5a. deploy E-Line links
self._connect_elines(eline_fwd_links, instance_uuid)
# 5b. deploy E-LAN links
self._connect_elans(elan_fwd_links, instance_uuid)
# 6. run the emulator specific entrypoint scripts in the VNFIs of this service instance
self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
LOG.info("Service started. Instance id: %r" % instance_uuid)
return instance_uuid
def stop_service(self, instance_uuid):
"""
This method stops a running service instance.
It iterates over all VNF instances, stopping them each
and removing them from their data center.
:param instance_uuid: the uuid of the service instance to be stopped
"""
LOG.info("Stopping service %r" % self.uuid)
# get relevant information
# instance_uuid = str(self.uuid.uuid4())
vnf_instances = self.instances[instance_uuid]["vnf_instances"]
# trigger stop skripts in vnf instances and wait a few seconds for completion
self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
time.sleep(VNF_STOP_WAIT_TIME)
for v in vnf_instances:
self._stop_vnfi(v)
for sap_name in self.saps_ext:
ext_sap = self.saps[sap_name]
target_dc = ext_sap.get("dc")
target_dc.removeExternalSAP(sap_name)
LOG.info("Stopping the SAP instance: %r in DC %r" % (sap_name, target_dc))
if not GK_STANDALONE_MODE:
# remove placement?
# self._remove_placement(RoundRobinPlacement)
None
# last step: remove the instance from the list of all instances
del self.instances[instance_uuid]
def _start_vnfd(self, vnfd, vnf_id, **kwargs):
"""
Start a single VNFD of this service
:param vnfd: vnfd descriptor dict
:param vnf_id: unique id of this vnf in the nsd
:return:
"""
# the vnf_name refers to the container image to be deployed
vnf_name = vnfd.get("name")
# iterate over all deployment units within each VNFDs
for u in vnfd.get("virtual_deployment_units"):
# 1. get the name of the docker image to start and the assigned DC
if vnf_id not in self.remote_docker_image_urls:
raise Exception("No image name for %r found. Abort." % vnf_id)
docker_name = self.remote_docker_image_urls.get(vnf_id)
target_dc = vnfd.get("dc")
# 2. perform some checks to ensure we can start the container
assert(docker_name is not None)
assert(target_dc is not None)
if not self._check_docker_image_exists(docker_name):
raise Exception("Docker image %r not found. Abort." % docker_name)
# 3. get the resource limits
res_req = u.get("resource_requirements")
cpu_list = res_req.get("cpu").get("cores")
if cpu_list is None:
cpu_list = res_req.get("cpu").get("vcpus")
if cpu_list is None:
cpu_list="1"
cpu_bw = res_req.get("cpu").get("cpu_bw")
if not cpu_bw:
cpu_bw=1
mem_num = str(res_req.get("memory").get("size"))
if len(mem_num)==0:
mem_num="2"
mem_unit = str(res_req.get("memory").get("size_unit"))
if str(mem_unit)==0:
mem_unit="GB"
mem_limit = float(mem_num)
if mem_unit=="GB":
mem_limit=mem_limit*1024*1024*1024
elif mem_unit=="MB":
mem_limit=mem_limit*1024*1024
elif mem_unit=="KB":
mem_limit=mem_limit*1024
mem_lim = int(mem_limit)
cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
# check if we need to deploy the management ports (defined as type:management both on in the vnfd and nsd)
intfs = vnfd.get("connection_points", [])
mgmt_intf_names = []
if USE_DOCKER_MGMT:
mgmt_intfs = [vnf_id + ':' + intf['id'] for intf in intfs if intf.get('type') == 'management']
# check if any of these management interfaces are used in a management-type network in the nsd
for nsd_intf_name in mgmt_intfs:
vlinks = [ l["connection_points_reference"] for l in self.nsd.get("virtual_links", [])]
for link in vlinks:
if nsd_intf_name in link and self.check_mgmt_interface(link):
# this is indeed a management interface and can be skipped
vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(nsd_intf_name)
found_interfaces = [intf for intf in intfs if intf.get('id') == vnf_interface]
intfs.remove(found_interfaces[0])
mgmt_intf_names.append(vnf_interface)
# 4. generate the volume paths for the docker container
volumes=list()
# a volume to extract log files
docker_log_path = "/tmp/results/%s/%s"%(self.uuid,vnf_id)
LOG.debug("LOG path for vnf %s is %s."%(vnf_id,docker_log_path))
if not os.path.exists(docker_log_path):
LOG.debug("Creating folder %s"%docker_log_path)
os.makedirs(docker_log_path)
volumes.append(docker_log_path+":/mnt/share/")
# 5. do the dc.startCompute(name="foobar") call to run the container
# TODO consider flavors, and other annotations
# TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
# use the vnf_id in the nsd as docker name
# so deployed containers can be easily mapped back to the nsd
LOG.info("Starting %r as %r in DC %r" % (vnf_name, vnf_id, vnfd.get("dc")))
LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
vnfi = target_dc.startCompute(
vnf_id,
network=intfs,
image=docker_name,
flavor_name="small",
cpu_quota=cpu_quota,
cpu_period=cpu_period,
cpuset=cpu_list,
mem_limit=mem_lim,
volumes=volumes,
type=kwargs.get('type','docker'))
# rename the docker0 interfaces (eth0) to the management port name defined in the VNFD
if USE_DOCKER_MGMT:
for intf_name in mgmt_intf_names:
self._vnf_reconfigure_network(vnfi, 'eth0', new_name=intf_name)
return vnfi
def _stop_vnfi(self, vnfi):
"""
Stop a VNF instance.
:param vnfi: vnf instance to be stopped
"""
# Find the correct datacenter
status = vnfi.getStatus()
dc = vnfi.datacenter
# stop the vnfi
LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
dc.stopCompute(status["name"])
def _get_vnf_instance(self, instance_uuid, vnf_id):
"""
Returns the Docker object for the given VNF id (or Docker name).
:param instance_uuid: UUID of the service instance to search in.
:param name: VNF name or Docker name. We are fuzzy here.
:return:
"""
dn = vnf_id
for vnfi in self.instances[instance_uuid]["vnf_instances"]:
if vnfi.name == dn:
return vnfi
LOG.warning("No container with name: {0} found.".format(dn))
return None
@staticmethod
def _vnf_reconfigure_network(vnfi, if_name, net_str=None, new_name=None):
"""
Reconfigure the network configuration of a specific interface
of a running container.
:param vnfi: container instance
:param if_name: interface name
:param net_str: network configuration string, e.g., 1.2.3.4/24
:return:
"""
# assign new ip address
if net_str is not None:
intf = vnfi.intf(intf=if_name)
if intf is not None:
intf.setIP(net_str)
LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
else:
LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
if new_name is not None:
vnfi.cmd('ip link set', if_name, 'down')
vnfi.cmd('ip link set', if_name, 'name', new_name)
vnfi.cmd('ip link set', new_name, 'up')
LOG.debug("Reconfigured interface name of %s:%s to %s" % (vnfi.name, if_name, new_name))
def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
for vnfi in vnfi_list:
config = vnfi.dcinfo.get("Config", dict())
env = config.get("Env", list())
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
LOG.debug("%r = %r" % (var , cmd))
if var=="SON_EMU_CMD":
LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
# execute command in new thread to ensure that GK is not blocked by VNF
t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
def _trigger_emulator_stop_scripts_in_vnfis(self, vnfi_list):
for vnfi in vnfi_list:
config = vnfi.dcinfo.get("Config", dict())
env = config.get("Env", list())
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
if var=="SON_EMU_CMD_STOP":
LOG.info("Executing stop script in %r: %r" % (vnfi.name, cmd))
# execute command in new thread to ensure that GK is not blocked by VNF
t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
def _unpack_service_package(self):
"""
unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
"""
LOG.info("Unzipping: %r" % self.package_file_path)
with zipfile.ZipFile(self.package_file_path, "r") as z:
z.extractall(self.package_content_path)
def _load_package_descriptor(self):
"""
Load the main package descriptor YAML and keep it as dict.
:return:
"""
self.manifest = load_yaml(
os.path.join(
self.package_content_path, "META-INF/MANIFEST.MF"))
def _load_nsd(self):
"""
Load the entry NSD YAML and keep it as dict.
:return:
"""
if "entry_service_template" in self.manifest:
nsd_path = os.path.join(
self.package_content_path,
make_relative_path(self.manifest.get("entry_service_template")))
self.nsd = load_yaml(nsd_path)
GK.net.deployed_nsds.append(self.nsd)
# create dict to find the vnf_name for any vnf id
self.vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
reduce(lambda x, y: dict(x, **y),
map(lambda d: {d["vnf_id"]: d["vnf_name"]},
self.nsd["network_functions"])))
LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
def _load_vnfd(self):
"""
Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
:return:
"""
# first make a list of all the vnfds in the package
vnfd_set = dict()
if "package_content" in self.manifest:
for pc in self.manifest.get("package_content"):
if pc.get("content-type") == "application/sonata.function_descriptor":
vnfd_path = os.path.join(
self.package_content_path,
make_relative_path(pc.get("name")))
vnfd = load_yaml(vnfd_path)
vnfd_set[vnfd.get("name")] = vnfd
# then link each vnf_id in the nsd to its vnfd
for vnf_id in self.vnf_id2vnf_name:
vnf_name = self.vnf_id2vnf_name[vnf_id]
self.vnfds[vnf_id] = vnfd_set[vnf_name]
LOG.debug("Loaded VNFD: {0} id: {1}".format(vnf_name, vnf_id))
def _load_saps(self):
# create list of all SAPs
# check if we need to deploy management ports
if USE_DOCKER_MGMT:
SAPs = [p for p in self.nsd["connection_points"] if 'management' not in p.get('type')]
else:
SAPs = [p for p in self.nsd["connection_points"]]
for sap in SAPs:
# endpoint needed in this service
sap_id, sap_interface, sap_docker_name = parse_interface(sap['id'])
# make sure SAP has type set (default internal)
sap["type"] = sap.get("type", 'internal')
# Each Service Access Point (connection_point) in the nsd is an IP address on the host
if sap["type"] == "external":
# add to vnfds to calculate placement later on
sap_net = SAP_SUBNETS.pop(0)
self.saps[sap_docker_name] = {"name": sap_docker_name , "type": "external", "net": sap_net}
# add SAP vnf to list in the NSD so it is deployed later on
# each SAP gets a unique VNFD and vnf_id in the NSD and custom type (only defined in the dummygatekeeper)
self.nsd["network_functions"].append(
{"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_ext"})
# Each Service Access Point (connection_point) in the nsd is getting its own container (default)
elif sap["type"] == "internal" or sap["type"] == "management":
# add SAP to self.vnfds
if SAP_VNFD is None:
sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
else:
sapfile = SAP_VNFD
sap_vnfd = load_yaml(sapfile)
sap_vnfd["connection_points"][0]["id"] = sap_interface
sap_vnfd["name"] = sap_docker_name
sap_vnfd["type"] = "internal"
# add to vnfds to calculate placement later on and deploy
self.saps[sap_docker_name] = sap_vnfd
# add SAP vnf to list in the NSD so it is deployed later on
# each SAP get a unique VNFD and vnf_id in the NSD
self.nsd["network_functions"].append(
{"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_int"})
LOG.debug("Loaded SAP: name: {0}, type: {1}".format(sap_docker_name, sap['type']))
# create sap lists
self.saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
self.saps_int = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "internal"]
def _start_sap(self, sap, instance_uuid):
if not DEPLOY_SAP:
return
LOG.info('start SAP: {0} ,type: {1}'.format(sap['name'],sap['type']))
if sap["type"] == "internal":
vnfi = None
if not GK_STANDALONE_MODE:
vnfi = self._start_vnfd(sap, sap['name'], type='sap_int')
self.instances[instance_uuid]["vnf_instances"].append(vnfi)
elif sap["type"] == "external":
target_dc = sap.get("dc")
# add interface to dc switch
target_dc.attachExternalSAP(sap['name'], sap['net'])
def _connect_elines(self, eline_fwd_links, instance_uuid):
"""
Connect all E-LINE links in the NSD
:param eline_fwd_links: list of E-LINE links in the NSD
:param: instance_uuid of the service
:return:
"""
# cookie is used as identifier for the flowrules installed by the dummygatekeeper
# eg. different services get a unique cookie for their flowrules
cookie = 1
for link in eline_fwd_links:
# check if we need to deploy this link when its a management link:
if USE_DOCKER_MGMT:
if self.check_mgmt_interface(link["connection_points_reference"]):
continue
src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
dst_id, dst_if_name, dst_sap_id = parse_interface(link["connection_points_reference"][1])
setChaining = False
# check if there is a SAP in the link and chain everything together
if src_sap_id in self.saps and dst_sap_id in self.saps:
LOG.info('2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id, dst_sap_id))
continue
elif src_sap_id in self.saps_ext:
src_id = src_sap_id
# set intf name to None so the chaining function will choose the first one
src_if_name = None
dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
if dst_vnfi is not None:
# choose first ip address in sap subnet
sap_net = self.saps[src_sap_id]['net']
sap_ip = "{0}/{1}".format(str(sap_net[2]), sap_net.prefixlen)
self._vnf_reconfigure_network(dst_vnfi, dst_if_name, sap_ip)
setChaining = True
elif dst_sap_id in self.saps_ext:
dst_id = dst_sap_id
# set intf name to None so the chaining function will choose the first one
dst_if_name = None
src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
if src_vnfi is not None:
sap_net = self.saps[dst_sap_id]['net']
sap_ip = "{0}/{1}".format(str(sap_net[2]), sap_net.prefixlen)
self._vnf_reconfigure_network(src_vnfi, src_if_name, sap_ip)
setChaining = True
# Link between 2 VNFs
else:
# make sure we use the correct sap vnf name
if src_sap_id in self.saps_int:
src_id = src_sap_id
if dst_sap_id in self.saps_int:
dst_id = dst_sap_id
# re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
if src_vnfi is not None and dst_vnfi is not None:
eline_net = ELINE_SUBNETS.pop(0)
ip1 = "{0}/{1}".format(str(eline_net[1]), eline_net.prefixlen)
ip2 = "{0}/{1}".format(str(eline_net[2]), eline_net.prefixlen)
self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
setChaining = True
# Set the chaining
if setChaining:
ret = GK.net.setChain(
src_id, dst_id,
vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
LOG.debug(
"Setting up E-Line link. (%s:%s) -> (%s:%s)" % (
src_id, src_if_name, dst_id, dst_if_name))
def _connect_elans(self, elan_fwd_links, instance_uuid):
"""
Connect all E-LAN links in the NSD
:param elan_fwd_links: list of E-LAN links in the NSD
:param: instance_uuid of the service
:return:
"""
for link in elan_fwd_links:
# check if we need to deploy this link when its a management link:
if USE_DOCKER_MGMT:
if self.check_mgmt_interface(link["connection_points_reference"]):
continue
elan_vnf_list = []
# check if an external SAP is in the E-LAN (then a subnet is already defined)
intfs_elan = [intf for intf in link["connection_points_reference"]]
lan_sap = self.check_ext_saps(intfs_elan)
if lan_sap:
lan_net = self.saps[lan_sap]['net']
lan_hosts = list(lan_net.hosts())
sap_ip = str(lan_hosts.pop(0))
else:
lan_net = ELAN_SUBNETS.pop(0)
lan_hosts = list(lan_net.hosts())
# generate lan ip address for all interfaces except external SAPs
for intf in link["connection_points_reference"]:
# skip external SAPs, they already have an ip
vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf)
if vnf_sap_docker_name in self.saps_ext:
elan_vnf_list.append({'name': vnf_sap_docker_name, 'interface': vnf_interface})
continue
ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)), lan_net.prefixlen)
vnf_id, intf_name, vnf_sap_id = parse_interface(intf)
# make sure we use the correct sap vnf name
src_docker_name = vnf_id
if vnf_sap_id in self.saps_int:
src_docker_name = vnf_sap_id
vnf_id = vnf_sap_id
LOG.debug(
"Setting up E-LAN interface. (%s:%s) -> %s" % (
vnf_id, intf_name, ip_address))
# re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
# E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
# (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
vnfi = self._get_vnf_instance(instance_uuid, vnf_id)
if vnfi is not None:
self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
# add this vnf and interface to the E-LAN for tagging
elan_vnf_list.append({'name': src_docker_name, 'interface': intf_name})
# install the VLAN tags for this E-LAN
GK.net.setLAN(elan_vnf_list)
def _load_docker_files(self):
"""
Get all paths to Dockerfiles from VNFDs and store them in dict.
:return:
"""
for k, v in self.vnfds.iteritems():
for vu in v.get("virtual_deployment_units"):
if vu.get("vm_image_format") == "docker":
vm_image = vu.get("vm_image")
docker_path = os.path.join(
self.package_content_path,
make_relative_path(vm_image))
self.local_docker_files[k] = docker_path
LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
def _load_docker_urls(self):
"""
Get all URLs to pre-build docker images in some repo.
:return:
"""
# also merge sap dicts, because internal saps also need a docker container
all_vnfs = self.vnfds.copy()
all_vnfs.update(self.saps)
for k, v in all_vnfs.iteritems():
for vu in v.get("virtual_deployment_units", {}):
if vu.get("vm_image_format") == "docker":
url = vu.get("vm_image")
if url is not None:
url = url.replace("http://", "")
self.remote_docker_image_urls[k] = url
LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
def _build_images_from_dockerfiles(self):
"""
Build Docker images for each local Dockerfile found in the package: self.local_docker_files
"""
if GK_STANDALONE_MODE:
return # do not build anything in standalone mode
dc = DockerClient()
LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
for k, v in self.local_docker_files.iteritems():
for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
LOG.debug("DOCKER BUILD: %s" % line)
LOG.info("Docker image created: %s" % k)
def _pull_predefined_dockerimages(self):
"""
If the package contains URLs to pre-build Docker images, we download them with this method.
"""
dc = DockerClient()
for url in self.remote_docker_image_urls.itervalues():
if not FORCE_PULL: # only pull if not present (speedup for development)
if len(dc.images.list(name=url)) > 0:
LOG.debug("Image %r present. Skipping pull." % url)
continue
LOG.info("Pulling image: %r" % url)
# this seems to fail with latest docker api version 2.0.2
# dc.images.pull(url,
# insecure_registry=True)
#using docker cli instead
cmd = ["docker",
"pull",
url,
]
Popen(cmd).wait()
def _check_docker_image_exists(self, image_name):
"""
Query the docker service and check if the given image exists
:param image_name: name of the docker image
:return:
"""
return len(DockerClient().images.list(name=image_name)) > 0
def _calculate_placement(self, algorithm):
"""
Do placement by adding the a field "dc" to
each VNFD that points to one of our
data center objects known to the gatekeeper.
"""
assert(len(self.vnfds) > 0)
assert(len(GK.dcs) > 0)
# instantiate algorithm an place
p = algorithm()
p.place(self.nsd, self.vnfds, self.saps, GK.dcs)
LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
# lets print the placement result
for name, vnfd in self.vnfds.iteritems():
LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
for sap in self.saps:
sap_dict = self.saps[sap]
LOG.info("Placed SAP %r on DC %r" % (sap, str(sap_dict.get("dc"))))
def _calculate_cpu_cfs_values(self, cpu_time_percentage):
"""
Calculate cpu period and quota for CFS
:param cpu_time_percentage: percentage of overall CPU to be used
:return: cpu_period, cpu_quota
"""
if cpu_time_percentage is None:
return -1, -1
if cpu_time_percentage < 0:
return -1, -1
# (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
# Attention minimum cpu_quota is 1ms (micro)
cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage))
cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container
# ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
if cpu_quota < 1000:
LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
cpu_quota = 1000
LOG.warning("Increased CPU quota to avoid system error.")
LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota))
return int(cpu_period), int(cpu_quota)
def check_ext_saps(self, intf_list):
# check if the list of interfacs contains an external SAP
saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
for intf_name in intf_list:
vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf_name)
if vnf_sap_docker_name in saps_ext:
return vnf_sap_docker_name
def check_mgmt_interface(self, intf_list):
SAPs_mgmt = [p.get('id') for p in self.nsd["connection_points"] if 'management' in p.get('type')]
for intf_name in intf_list:
if intf_name in SAPs_mgmt:
return True
"""
Some (simple) placement algorithms
"""
class FirstDcPlacement(object):
"""
Placement: Always use one and the same data center from the GK.dcs dict.
"""
def place(self, nsd, vnfds, saps, dcs):
for id, vnfd in vnfds.iteritems():
vnfd["dc"] = list(dcs.itervalues())[0]
class RoundRobinDcPlacement(object):
"""
Placement: Distribute VNFs across all available DCs in a round robin fashion.
"""
def place(self, nsd, vnfds, saps, dcs):
c = 0
dcs_list = list(dcs.itervalues())
for id, vnfd in vnfds.iteritems():
vnfd["dc"] = dcs_list[c % len(dcs_list)]
c += 1 # inc. c to use next DC
class RoundRobinDcPlacementWithSAPs(object):
"""
Placement: Distribute VNFs across all available DCs in a round robin fashion,
every SAP is instantiated on the same DC as the connected VNF.
"""
def place(self, nsd, vnfds, saps, dcs):
# place vnfs
c = 0
dcs_list = list(dcs.itervalues())
for id, vnfd in vnfds.iteritems():
vnfd["dc"] = dcs_list[c % len(dcs_list)]
c += 1 # inc. c to use next DC
# place SAPs
vlinks = nsd.get("virtual_links", [])
eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
elan_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-LAN")]
# SAPs on E-Line links are placed on the same DC as the VNF on the E-Line
for link in eline_fwd_links:
src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
dst_id, dst_if_name, dst_sap_id = parse_interface(link["connection_points_reference"][1])
# check if there is a SAP in the link
if src_sap_id in saps:
# get dc where connected vnf is mapped to
dc = vnfds[dst_id]['dc']
saps[src_sap_id]['dc'] = dc
if dst_sap_id in saps:
# get dc where connected vnf is mapped to
dc = vnfds[src_id]['dc']
saps[dst_sap_id]['dc'] = dc
# SAPs on E-LANs are placed on a random DC
dcs_list = list(dcs.itervalues())
dc_len = len(dcs_list)
for link in elan_fwd_links:
for intf in link["connection_points_reference"]:
# find SAP interfaces
intf_id, intf_name, intf_sap_id = parse_interface(intf)
if intf_sap_id in saps:
dc = dcs_list[randint(0, dc_len-1)]
saps[intf_sap_id]['dc'] = dc
"""
Resource definitions and API endpoints
"""
class Packages(fr.Resource):
def post(self):
"""
Upload a *.son service package to the dummy gatekeeper.
We expect request with a *.son file and store it in UPLOAD_FOLDER
:return: UUID
"""
try:
# get file contents
LOG.info("POST /packages called")
# lets search for the package in the request
is_file_object = False # make API more robust: file can be in data or in files field
if "package" in request.files:
son_file = request.files["package"]
is_file_object = True
elif len(request.data) > 0:
son_file = request.data
else:
return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
# generate a uuid to reference this package
service_uuid = str(uuid.uuid4())
file_hash = hashlib.sha1(str(son_file)).hexdigest()
# ensure that upload folder exists
ensure_dir(UPLOAD_FOLDER)
upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
# store *.son file to disk
if is_file_object:
son_file.save(upload_path)
else:
with open(upload_path, 'wb') as f:
f.write(son_file)
size = os.path.getsize(upload_path)
# first stop and delete any other running services
if AUTO_DELETE:
service_list = copy.copy(GK.services)
for service_uuid in service_list:
instances_list = copy.copy(GK.services[service_uuid].instances)
for instance_uuid in instances_list:
# valid service and instance UUID, stop service
GK.services.get(service_uuid).stop_service(instance_uuid)
LOG.info("service instance with uuid %r stopped." % instance_uuid)
# create a service object and register it
s = Service(service_uuid, file_hash, upload_path)
GK.register_service_package(service_uuid, s)
# automatically deploy the service
if AUTO_DEPLOY:
# ok, we have a service uuid, lets start the service
reset_subnets()
service_instance_uuid = GK.services.get(service_uuid).start_service()
# generate the JSON result
return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
except Exception as ex:
LOG.exception("Service package upload failed:")
return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
def get(self):
"""
Return a list of UUID's of uploaded service packages.
:return: dict/list
"""
LOG.info("GET /packages")
return {"service_uuid_list": list(GK.services.iterkeys())}
class Instantiations(fr.Resource):
def post(self):
"""
Instantiate a service specified by its UUID.
Will return a new UUID to identify the running service instance.
:return: UUID
"""
LOG.info("POST /instantiations (or /requests) called")
# try to extract the service uuid from the request
json_data = request.get_json(force=True)
service_uuid = json_data.get("service_uuid")
# lets be a bit fuzzy here to make testing easier
if (service_uuid is None or service_uuid=="latest") and len(GK.services) > 0:
# if we don't get a service uuid, we simple start the first service in the list
service_uuid = list(GK.services.iterkeys())[0]
if service_uuid in GK.services:
# ok, we have a service uuid, lets start the service
service_instance_uuid = GK.services.get(service_uuid).start_service()
return {"service_instance_uuid": service_instance_uuid}, 201
return "Service not found", 404
def get(self):
"""
Returns a list of UUIDs containing all running services.
:return: dict / list
"""
LOG.info("GET /instantiations")
return {"service_instantiations_list": [
list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
def delete(self):
"""
Stops a running service specified by its service and instance UUID.
"""
# try to extract the service and instance UUID from the request
json_data = request.get_json(force=True)
service_uuid = json_data.get("service_uuid")
instance_uuid = json_data.get("service_instance_uuid")
# try to be fuzzy
if service_uuid is None and len(GK.services) > 0:
#if we don't get a service uuid, we simply stop the last service in the list
service_uuid = list(GK.services.iterkeys())[0]
if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
# valid service and instance UUID, stop service
GK.services.get(service_uuid).stop_service(instance_uuid)
return "service instance with uuid %r stopped." % instance_uuid,200
return "Service not found", 404
class Exit(fr.Resource):
def put(self):
"""
Stop the running Containernet instance regardless of data transmitted
"""
list(GK.dcs.values())[0].net.stop()
def initialize_GK():
global GK
GK = Gatekeeper()
# create a single, global GK object
GK = None
initialize_GK()
# setup Flask
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
api = fr.Api(app)
# define endpoints
api.add_resource(Packages, '/packages', '/api/v2/packages')
api.add_resource(Instantiations, '/instantiations', '/api/v2/instantiations', '/api/v2/requests')
api.add_resource(Exit, '/emulator/exit')
def start_rest_api(host, port, datacenters=dict()):
GK.dcs = datacenters
GK.net = get_dc_network()
# start the Flask server (not the best performance but ok for our use case)
app.run(host=host,
port=port,
debug=True,
use_reloader=False # this is needed to run Flask in a non-main thread
)
def ensure_dir(name):
if not os.path.exists(name):
os.makedirs(name)
def load_yaml(path):
with open(path, "r") as f:
try:
r = yaml.load(f)
except yaml.YAMLError as exc:
LOG.exception("YAML parse error")
r = dict()
return r
def make_relative_path(path):
if path.startswith("file://"):
path = path.replace("file://", "", 1)
if path.startswith("/"):
path = path.replace("/", "", 1)
return path
def get_dc_network():
"""
retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
:return:
"""
assert (len(GK.dcs) > 0)
return GK.dcs.values()[0].net
def parse_interface(interface_name):
"""
convert the interface name in the nsd to the according vnf_id, vnf_interface names
:param interface_name:
:return:
"""
if ':' in interface_name:
vnf_id, vnf_interface = interface_name.split(':')
vnf_sap_docker_name = interface_name.replace(':', '_')
else:
vnf_id = interface_name
vnf_interface = interface_name
vnf_sap_docker_name = interface_name
return vnf_id, vnf_interface, vnf_sap_docker_name
def reset_subnets():
# private subnet definitions for the generated interfaces
# 10.10.xxx.0/24
global SAP_SUBNETS
SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
# 10.20.xxx.0/30
global ELAN_SUBNETS
ELAN_SUBNETS = generate_subnets('10.20', 0, subnet_size=50, mask=24)
# 10.30.xxx.0/30
global ELINE_SUBNETS
ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
if __name__ == '__main__':
"""
Lets allow to run the API in standalone mode.
"""
GK_STANDALONE_MODE = True
logging.getLogger("werkzeug").setLevel(logging.INFO)
start_rest_api("0.0.0.0", 8000)
|
{
"content_hash": "6afc3ddbf84475c3213a94c46736ba1c",
"timestamp": "",
"source": "github",
"line_count": 1164,
"max_line_length": 121,
"avg_line_length": 41.28865979381443,
"alnum_prop": 0.578401997503121,
"repo_name": "knodir/son-emu",
"id": "759ed419000544f59e762aee2673f43d1a8bf882",
"size": "48060",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/emuvim/api/sonata/dummygatekeeper.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "625"
},
{
"name": "HTML",
"bytes": "6268"
},
{
"name": "JavaScript",
"bytes": "13295"
},
{
"name": "Python",
"bytes": "1601028"
},
{
"name": "Shell",
"bytes": "21871"
}
],
"symlink_target": ""
}
|
'''Arsenal API network_interfaces.'''
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from datetime import datetime
from pyramid.view import view_config
from sqlalchemy.orm.exc import NoResultFound
from arsenalweb.models.common import (
DBSession,
)
from arsenalweb.models.ip_addresses import (
IpAddressAudit,
)
from arsenalweb.models.nodes import (
NodeAudit,
)
from arsenalweb.models.network_interfaces import (
NetworkInterface,
NetworkInterfaceAudit,
)
from arsenalweb.views import (
get_authenticated_user,
)
from arsenalweb.views.api.common import (
api_200,
api_400,
api_404,
api_500,
api_501,
collect_params,
)
LOG = logging.getLogger(__name__)
# Functions
def find_net_if_by_unique_id(unique_id):
'''Find a network_interface by unique_id. Return an object if found, raises
an exception otherwise.'''
unique_id = unique_id.lower()
LOG.debug('Searching for network_interface.unique_id: '
'{0}'.format(unique_id))
net_if = DBSession.query(NetworkInterface)
net_if = net_if.filter(NetworkInterface.unique_id == unique_id)
return net_if.one()
def find_net_if_by_id(net_if_id):
'''Find a network_interface by id. Return an object if found, raises
an exception otherwise.'''
LOG.debug('Searching for network_interface.id: {0}'.format(net_if_id))
net_if = DBSession.query(NetworkInterface)
net_if = net_if.filter(NetworkInterface.id == net_if_id)
return net_if.one()
def create_net_if(name=None,
unique_id=None,
updated_by=None,
ip_address_id=None,
bond_master=None,
port_description=None,
port_number=None,
port_switch=None,
port_vlan=None,):
'''Create a new network_interface.'''
try:
# Convert everything that is defined to a string.
my_attribs = locals().copy()
for my_attr in my_attribs:
if my_attribs.get(my_attr):
my_attribs[my_attr] = str(my_attribs[my_attr])
# Guarantee unique_id is lowercase
unique_id = unique_id.lower()
# Guarantee ip_address_id is an int
try:
ip_address_id = int(ip_address_id)
except TypeError:
pass
LOG.info('Creating new network_interface name: {0} unique_id: {1} '
'ip_address_id: {2} updated_by: {3} bond_master: {4} port_description: '
'{5} port_number: {6} port_switch: {7} port_vlan: '
'{8}'.format(name,
unique_id,
ip_address_id,
updated_by,
bond_master,
port_description,
port_number,
port_switch,
port_vlan))
utcnow = datetime.utcnow()
net_if = NetworkInterface(name=name,
unique_id=unique_id,
ip_address_id=ip_address_id,
bond_master=bond_master,
port_description=port_description,
port_number=port_number,
port_switch=port_switch,
port_vlan=port_vlan,
updated_by=updated_by,
created=utcnow,
updated=utcnow)
DBSession.add(net_if)
DBSession.flush()
audit = NetworkInterfaceAudit(object_id=net_if.id,
field='unique_id',
old_value='created',
new_value=net_if.unique_id,
updated_by=updated_by,
created=utcnow)
DBSession.add(audit)
if ip_address_id:
LOG.debug('Creating audit entry for ip_address assignment '
'to network_interface...')
ip_addr_audit = IpAddressAudit(object_id=ip_address_id,
field='net_if_assignment',
old_value='created',
new_value=net_if.id,
updated_by=updated_by,
created=utcnow)
DBSession.add(ip_addr_audit)
DBSession.flush()
return net_if
except Exception as ex:
msg = 'Error creating new network_interface name: {0} unique_id: {1} ' \
'ip_address_id: {2} updated_by: {3} bond_master: {4} port_description: ' \
'{5} port_number: {6} port_switch: {7} port_vlan: {8} ' \
'exception: {9}'.format(name,
unique_id,
ip_address_id,
updated_by,
bond_master,
port_description,
port_number,
port_switch,
port_vlan,
ex)
LOG.error(msg)
return api_500(msg=msg)
def update_net_if(net_if,
name=None,
unique_id=None,
updated_by=None,
ip_address_id=None,
bond_master=None,
port_description=None,
port_number=None,
port_switch=None,
port_vlan=None,):
'''Update an existing network_interface.'''
try:
# Convert everything that is defined to a string.
my_attribs = locals().copy()
my_attribs.pop('net_if')
for my_attr in my_attribs:
if my_attribs.get(my_attr):
my_attribs[my_attr] = str(my_attribs[my_attr])
# Guarantee unique_id is lowercase
my_attribs['unique_id'] = my_attribs['unique_id'].lower()
# Guarantee ip_address_id is an int
try:
my_attribs['ip_address_id'] = int(my_attribs['ip_address_id'])
except TypeError:
pass
LOG.info('Updating network_interface.unique_id: '
'{0}'.format(my_attribs['unique_id']))
utcnow = datetime.utcnow()
for attribute in my_attribs:
if attribute == 'unique_id':
LOG.debug('Skipping update to unique_id.')
continue
old_value = getattr(net_if, attribute)
new_value = my_attribs[attribute]
if old_value != new_value and new_value:
if not old_value:
old_value = 'None'
LOG.debug('Updating network_interface: {0} attribute: '
'{1} new_value: {2}'.format(my_attribs['unique_id'],
attribute,
new_value))
net_if_audit = NetworkInterfaceAudit(object_id=net_if.id,
field=attribute,
old_value=old_value,
new_value=new_value,
updated_by=updated_by,
created=utcnow)
DBSession.add(net_if_audit)
setattr(net_if, attribute, new_value)
if attribute == 'ip_address_id':
LOG.debug('Creating audit entry for ip_address assignment '
'to network_interface...')
ip_addr_audit = IpAddressAudit(object_id=my_attribs['ip_address_id'],
field='net_if_assignment',
old_value=old_value,
new_value=new_value,
updated_by=updated_by,
created=utcnow)
DBSession.add(ip_addr_audit)
DBSession.flush()
return net_if
except Exception as ex:
msg = 'Error updating network_interface name: {0} unique_id: {1} ' \
'ip_address_id: {2} user: {3} bond_master: {4} port_description: ' \
'{5} port_number: {6} port_switch: {7} port_vlan: {8} ' \
'exception: {9}'.format(name,
my_attribs['unique_id'],
ip_address_id,
updated_by,
bond_master,
port_description,
port_number,
port_switch,
port_vlan,
ex)
LOG.error(msg)
raise
def net_ifs_to_node(network_interfaces, node, action, user_id):
'''Manage network_interface assignment/deassignments to a node. Takes a
list if network_interface objects and assigns/deassigns them to/from the node.
network_interfaces: a list of NetworkInterface objects to assign to a node.
node: A Node object to assign the network_interfaces to.
action: A string defining whether to assign ('PUT') or de-assign
('DELETE') the network interfaces to/from the node.
user_id: A sting representing the user_id making this change.
'''
resp = {node.name: []}
try:
for net_if in network_interfaces:
resp[node.name].append(net_if.unique_id)
utcnow = datetime.utcnow()
if action == 'PUT':
if not net_if in node.network_interfaces:
node.network_interfaces.append(net_if)
audit = NodeAudit(object_id=node.id,
field='network_interface_id',
old_value='assigned',
new_value=net_if.id,
updated_by=user_id,
created=utcnow)
DBSession.add(audit)
if action == 'DELETE':
try:
node.network_interfaces.remove(net_if)
audit = NodeAudit(object_id=node.id,
field='network_interface_id',
old_value=net_if.id,
new_value='deassigned',
updated_by=user_id,
created=utcnow)
DBSession.add(audit)
except (ValueError, AttributeError):
try:
DBSession.remove(audit)
except UnboundLocalError:
pass
DBSession.add(node)
DBSession.flush()
except (NoResultFound, AttributeError):
return api_404(msg='node not found')
except Exception as ex:
msg = 'Error updating node: exception={0}'.format(ex)
LOG.error(msg)
return api_500(msg=msg)
return api_200(results=resp)
# Routes
@view_config(route_name='api_network_interfaces', request_method='GET', request_param='schema=true', renderer='json')
def api_node_groups_schema(request):
'''Schema document for the network_interfaces API.'''
network_interfaces = {
}
return network_interfaces
# FIXME: Need to create the perms if we start allowing manual updates
@view_config(route_name='api_network_interfaces', permission='network_interface_write', request_method='PUT', renderer='json')
def api_network_interfaces_write(request):
'''Process write requests for /api/network_interfaces route.'''
try:
req_params = [
'name',
'unique_id',
]
opt_params = [
'ip_address',
'bond_master',
'port_description',
'port_number',
'port_switch',
'port_vlan',
]
params = collect_params(request, req_params, opt_params)
params['unique_id'] = params['unique_id'].lower()
try:
net_if = find_net_if_by_unique_id(params['unique_id'])
update_net_if(net_if, **params)
except NoResultFound:
net_if = create_net_if(**params)
return net_if
except Exception as ex:
msg = 'Error writing to network_interfaces API={0},exception={1}'.format(request.url, ex)
LOG.error(msg)
return api_500(msg=msg)
# FIXME: Need to create the perms if we start allowing manual add/delete
@view_config(route_name='api_network_interface_r', permission='network_interface_delete', request_method='DELETE', renderer='json')
@view_config(route_name='api_network_interface_r', permission='network_interface_write', request_method='PUT', renderer='json')
def api_network_interface_write_attrib(request):
'''Process write requests for the /api/network_interfaces/{id}/{resource} route.'''
resource = request.matchdict['resource']
payload = request.json_body
auth_user = get_authenticated_user(request)
LOG.debug('Updating {0}'.format(request.url))
# First get the network_interfaces, then figure out what to do to it.
net_if = find_net_if_by_id(request.matchdict['id'])
LOG.debug('net_if is: {0}'.format(net_if))
# List of resources allowed
resources = [
'undef',
]
if resource in resources:
try:
actionable = payload[resource]
if resource == 'undef':
LOG.warn('Not allowed.')
resp = []
except KeyError:
msg = 'Missing required parameter: {0}'.format(resource)
return api_400(msg=msg)
except Exception as ex:
LOG.error('Error updating network_interfaces: {0} '
'exception: {1}'.format(request.url, ex))
return api_500(msg=str(ex))
else:
return api_501()
return resp
|
{
"content_hash": "4230296456d1c9429762f0ecd92162fc",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 131,
"avg_line_length": 38.484771573604064,
"alnum_prop": 0.4971311745696762,
"repo_name": "CityGrid/arsenal",
"id": "5920b99de3c4781d5c2fba697b0c62f3bcbc2ac1",
"size": "15163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/arsenalweb/views/api/network_interfaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33167"
},
{
"name": "JavaScript",
"bytes": "17879"
},
{
"name": "Python",
"bytes": "191013"
},
{
"name": "Shell",
"bytes": "1367"
}
],
"symlink_target": ""
}
|
import copy
import errno
import os
import time
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
import six
from six.moves.urllib import parse as urllib
from tempest.common import glance_http
from tempest import exceptions
from tempest.lib.common import rest_client
from tempest.lib.common.utils import misc as misc_utils
from tempest.lib import exceptions as lib_exc
LOG = logging.getLogger(__name__)
class ImagesClient(rest_client.RestClient):
def __init__(self, auth_provider, catalog_type, region, **kwargs):
super(ImagesClient, self).__init__(
auth_provider, catalog_type, region, **kwargs)
self._http = None
self.dscv = kwargs.get("disable_ssl_certificate_validation")
self.ca_certs = kwargs.get("ca_certs")
def _image_meta_from_headers(self, headers):
meta = {'properties': {}}
for key, value in six.iteritems(headers):
if key.startswith('x-image-meta-property-'):
_key = key[22:]
meta['properties'][_key] = value
elif key.startswith('x-image-meta-'):
_key = key[13:]
meta[_key] = value
for key in ['is_public', 'protected', 'deleted']:
if key in meta:
meta[key] = meta[key].strip().lower() in ('t', 'true', 'yes',
'1')
for key in ['size', 'min_ram', 'min_disk']:
if key in meta:
try:
meta[key] = int(meta[key])
except ValueError:
pass
return meta
def _image_meta_to_headers(self, fields):
headers = {}
fields_copy = copy.deepcopy(fields)
copy_from = fields_copy.pop('copy_from', None)
if copy_from is not None:
headers['x-glance-api-copy-from'] = copy_from
for key, value in six.iteritems(fields_copy.pop('properties', {})):
headers['x-image-meta-property-%s' % key] = str(value)
for key, value in six.iteritems(fields_copy.pop('api', {})):
headers['x-glance-api-property-%s' % key] = str(value)
for key, value in six.iteritems(fields_copy):
headers['x-image-meta-%s' % key] = str(value)
return headers
def _get_file_size(self, obj):
"""Analyze file-like object and attempt to determine its size.
:param obj: file-like object, typically redirected from stdin.
:retval The file's size or None if it cannot be determined.
"""
# For large images, we need to supply the size of the
# image file. See LP Bugs #827660 and #845788.
if hasattr(obj, 'seek') and hasattr(obj, 'tell'):
try:
obj.seek(0, os.SEEK_END)
obj_size = obj.tell()
obj.seek(0)
return obj_size
except IOError as e:
if e.errno == errno.ESPIPE:
# Illegal seek. This means the user is trying
# to pipe image data to the client, e.g.
# echo testdata | bin/glance add blah..., or
# that stdin is empty, or that a file-like
# object which doesn't support 'seek/tell' has
# been supplied.
return None
else:
raise
else:
# Cannot determine size of input image
return None
def _get_http(self):
return glance_http.HTTPClient(auth_provider=self.auth_provider,
filters=self.filters,
insecure=self.dscv,
ca_certs=self.ca_certs)
def _create_with_data(self, headers, data):
resp, body_iter = self.http.raw_request('POST', '/v1/images',
headers=headers, body=data)
self._error_checker('POST', '/v1/images', headers, data, resp,
body_iter)
body = json.loads(''.join([c for c in body_iter]))
return rest_client.ResponseBody(resp, body)
def _update_with_data(self, image_id, headers, data):
url = '/v1/images/%s' % image_id
resp, body_iter = self.http.raw_request('PUT', url, headers=headers,
body=data)
self._error_checker('PUT', url, headers, data,
resp, body_iter)
body = json.loads(''.join([c for c in body_iter]))
return rest_client.ResponseBody(resp, body)
@property
def http(self):
if self._http is None:
self._http = self._get_http()
return self._http
def create_image(self, **kwargs):
headers = {}
data = kwargs.pop('data', None)
headers.update(self._image_meta_to_headers(kwargs))
if data is not None:
return self._create_with_data(headers, data)
resp, body = self.post('v1/images', None, headers)
self.expected_success(201, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def update_image(self, image_id, **kwargs):
headers = {}
data = kwargs.pop('data', None)
headers.update(self._image_meta_to_headers(kwargs))
if data is not None:
return self._update_with_data(image_id, headers, data)
url = 'v1/images/%s' % image_id
resp, body = self.put(url, None, headers)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.delete(url)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def list_images(self, detail=False, **kwargs):
"""Return a list of all images filtered by input parameters.
Available params: see http://developer.openstack.org/
api-ref-image-v1.html#listImage-v1
Most parameters except the following are passed to the API without
any changes.
:param changes_since: The name is changed to changes-since
"""
url = 'v1/images'
if detail:
url += '/detail'
properties = kwargs.pop('properties', {})
for key, value in six.iteritems(properties):
kwargs['property-%s' % key] = value
if kwargs.get('changes_since'):
kwargs['changes-since'] = kwargs.pop('changes_since')
if len(kwargs) > 0:
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def get_image_meta(self, image_id):
url = 'v1/images/%s' % image_id
resp, __ = self.head(url)
self.expected_success(200, resp.status)
body = self._image_meta_from_headers(resp)
return rest_client.ResponseBody(resp, body)
def show_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
return rest_client.ResponseBodyData(resp, body)
def is_resource_deleted(self, id):
try:
if self.get_image_meta(id)['status'] == 'deleted':
return True
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'image_meta'
def list_image_members(self, image_id):
url = 'v1/images/%s/members' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_shared_images(self, tenant_id):
"""List shared images with the specified tenant"""
url = 'v1/shared-images/%s' % tenant_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def add_member(self, member_id, image_id, **kwargs):
"""Add a member to an image.
Available params: see http://developer.openstack.org/
api-ref-image-v1.html#addMember-v1
"""
url = 'v1/images/%s/members/%s' % (image_id, member_id)
body = json.dumps({'member': kwargs})
resp, __ = self.put(url, body)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def delete_member(self, member_id, image_id):
url = 'v1/images/%s/members/%s' % (image_id, member_id)
resp, __ = self.delete(url)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
# NOTE(afazekas): just for the wait function
def _get_image_status(self, image_id):
meta = self.get_image_meta(image_id)
status = meta['status']
return status
# NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_image_status(self, image_id, status):
"""Waits for a Image to reach a given status."""
start_time = time.time()
old_value = value = self._get_image_status(image_id)
while True:
dtime = time.time() - start_time
time.sleep(self.build_interval)
if value != old_value:
LOG.info('Value transition from "%s" to "%s"'
'in %d second(s).', old_value,
value, dtime)
if value == status:
return value
if value == 'killed':
raise exceptions.ImageKilledException(image_id=image_id,
status=status)
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
value = self._get_image_status(image_id)
|
{
"content_hash": "47fd580d368a8445bb6575d36ea5fc54",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 77,
"avg_line_length": 38.21785714285714,
"alnum_prop": 0.5509765442482011,
"repo_name": "zsoltdudas/lis-tempest",
"id": "e29ff89466a1cfb488659200169187be948ce10f",
"size": "11326",
"binary": false,
"copies": "4",
"ref": "refs/heads/LIS",
"path": "tempest/services/image/v1/json/images_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3681961"
},
{
"name": "Shell",
"bytes": "106383"
}
],
"symlink_target": ""
}
|
for codec in ['latin_1', 'utf_8', 'utf_16']:
try:
print(codec, 'El 你好'.encode(codec), sep='\t')
except UnicodeEncodeError as e:
print(e)
########################
# decode demo
octets = b'Hello\xe9al'
print(octets.decode('cp1252'))
print(octets.decode('iso8859_7'))
try:
print(octets.decode('utf_8'))
except UnicodeDecodeError as e:
print(e)
###########################################
# handle text files, get default encoding
print('------------get default encoding------------')
import sys, locale
expressions = '''
locale.getpreferredencoding()
type(my_file)
my_file.encoding
sys.stdout.isatty()
sys.stdout.encoding
sys.stdin.isatty()
sys.stderr.isatty()
sys.stderr.encoding
sys.getdefaultencoding()
sys.getfilesystemencoding()
'''
my_file = open('dummy.test', 'w')
for expression in expressions.split():
value = eval(expression)
print(expression.rjust(30), '->', repr(value))
#############################################
# Normalizing Unicode for Saner Comparisons
print('---------------Normalizing Unicode----------------')
s1,s2 = ('café', 'cafe\u0301')
print(s1,s2)
print(len(s1), len(s2), s1 == s2)
from unicodedata import normalize, name
print('NFC', len(normalize('NFC', s1)), len(normalize('NFC', s2)))
print('NFD', len(normalize('NFD', s1)), len(normalize('NFD', s2)))
print('NFC==NFD', normalize('NFC', s1)==normalize('NFD', s1))
half = '\u00BD' #'1⁄2'
print(half, normalize('NFKC', half))
micro = 'μ'
micro_kc = normalize('NFKC', micro)
print(micro, micro_kc)
print('ord', ord(micro), ord(micro_kc))
print('name', name(micro), ', ', name(micro_kc))
############################################
# case folding
print('---------------case folding----------------')
micro_cf = micro.casefold()
print(micro_cf,',', name(micro_cf))
eszett = 'ß'
print(eszett, name(eszett))
eszett_cf = eszett.casefold()
print(eszett, eszett_cf)
|
{
"content_hash": "46bff5ac37bb64daede7e82c815483a7",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 66,
"avg_line_length": 28.36764705882353,
"alnum_prop": 0.5800933125972006,
"repo_name": "stoneflyop1/fluent_py",
"id": "d3e4dce464a340301ab57936a8345e4fa7263835",
"size": "1976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ch04/coder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59464"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.