text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from django.conf.urls import patterns, include, url
from tastypie.api import Api
from testapp.resources import (
UserResource, HomePageResource, ThrottledHomePageResource
)
v1 = Api(api_name='v1')
v1.register(UserResource())
v1.register(HomePageResource())
v1.register(ThrottledHomePageResource())
urlpatterns = patterns(
'',
url(r'^api/', include(v1.urls))
)
|
{
"content_hash": "1eab0055dfcad3c99c2470af5bc80a9e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 61,
"avg_line_length": 22.11764705882353,
"alnum_prop": 0.7473404255319149,
"repo_name": "uranusjr/django-tastypie-crust",
"id": "301547cc37176860a99d99f2438ef0189a198c6d",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testproj/testproj/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26594"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import signal
import time
import traceback
from contextlib import contextmanager
import psutil
from pants.base.build_environment import get_buildroot
from pants.init.subprocess import Subprocess
from pants.process.lock import OwnerPrintingInterProcessFileLock
from pants.util.dirutil import read_file, rm_rf, safe_file_dump, safe_mkdir
from pants.util.memo import memoized_property
from pants.util.process_handler import subprocess
logger = logging.getLogger(__name__)
@contextmanager
def swallow_psutil_exceptions():
"""A contextmanager that swallows standard psutil access exceptions."""
try:
yield
except (psutil.AccessDenied, psutil.NoSuchProcess):
# This masks common, but usually benign psutil process access exceptions that might be seen
# when accessing attributes/methods on psutil.Process objects.
pass
class ProcessGroup(object):
"""Wraps a logical group of processes and provides convenient access to ProcessManager objects."""
def __init__(self, name, metadata_base_dir=None):
self._name = name
self._metadata_base_dir = metadata_base_dir
def _instance_from_process(self, process):
"""Default converter from psutil.Process to process instance classes for subclassing."""
return ProcessManager(name=process.name(),
pid=process.pid,
process_name=process.name(),
metadata_base_dir=self._metadata_base_dir)
def iter_processes(self, proc_filter=None):
"""Yields processes from psutil.process_iter with an optional filter and swallows psutil errors.
If a psutil exception is raised during execution of the filter, that process will not be
yielded but subsequent processes will. On the other hand, if psutil.process_iter raises
an exception, no more processes will be yielded.
"""
with swallow_psutil_exceptions(): # process_iter may raise
for proc in psutil.process_iter():
with swallow_psutil_exceptions(): # proc_filter may raise
if (proc_filter is None) or proc_filter(proc):
yield proc
def iter_instances(self, *args, **kwargs):
for item in self.iter_processes(*args, **kwargs):
yield self._instance_from_process(item)
class ProcessMetadataManager(object):
""""Manages contextual, on-disk process metadata."""
class MetadataError(Exception): pass
class Timeout(Exception): pass
FAIL_WAIT_SEC = 10
INFO_INTERVAL_SEC = 5
WAIT_INTERVAL_SEC = .1
def __init__(self, metadata_base_dir=None):
"""
:param str metadata_base_dir: The base directory for process metadata.
"""
super(ProcessMetadataManager, self).__init__()
self._metadata_base_dir = (
metadata_base_dir or
Subprocess.Factory.global_instance().create().get_subprocess_dir()
)
@staticmethod
def _maybe_cast(item, caster):
"""Given a casting function, attempt to cast to that type while masking common cast exceptions.
N.B. This is mostly suitable for casting string types to numeric types - e.g. a port number
read from disk into an int.
:param func caster: A casting callable (e.g. `int`).
:returns: The result of caster(item) or item if TypeError or ValueError are raised during cast.
"""
try:
return caster(item)
except (TypeError, ValueError):
# N.B. the TypeError catch here (already) protects against the case that caster is None.
return item
@classmethod
def _deadline_until(cls, closure, action_msg, timeout=FAIL_WAIT_SEC,
wait_interval=WAIT_INTERVAL_SEC, info_interval=INFO_INTERVAL_SEC):
"""Execute a function/closure repeatedly until a True condition or timeout is met.
:param func closure: the function/closure to execute (should not block for long periods of time
and must return True on success).
:param str action_msg: a description of the action that is being executed, to be rendered as
info while we wait, and as part of any rendered exception.
:param float timeout: the maximum amount of time to wait for a true result from the closure in
seconds. N.B. this is timing based, so won't be exact if the runtime of
the closure exceeds the timeout.
:param float wait_interval: the amount of time to sleep between closure invocations.
:param float info_interval: the amount of time to wait before and between reports via info
logging that we're still waiting for the closure to succeed.
:raises: :class:`ProcessManager.Timeout` on execution timeout.
"""
now = time.time()
deadline = now + timeout
info_deadline = now + info_interval
while 1:
if closure():
return True
now = time.time()
if now > deadline:
raise cls.Timeout('exceeded timeout of {} seconds while waiting for {}'.format(timeout, action_msg))
if now > info_deadline:
logger.info('waiting for {}...'.format(action_msg))
info_deadline = info_deadline + info_interval
elif wait_interval:
time.sleep(wait_interval)
@classmethod
def _wait_for_file(cls, filename, timeout=FAIL_WAIT_SEC, want_content=True):
"""Wait up to timeout seconds for filename to appear with a non-zero size or raise Timeout()."""
def file_waiter():
return os.path.exists(filename) and (not want_content or os.path.getsize(filename))
action_msg = 'file {} to appear'.format(filename)
return cls._deadline_until(file_waiter, action_msg, timeout=timeout)
@staticmethod
def _get_metadata_dir_by_name(name, metadata_base_dir):
"""Retrieve the metadata dir by name.
This should always live outside of the workdir to survive a clean-all.
"""
return os.path.join(metadata_base_dir, name)
def _maybe_init_metadata_dir_by_name(self, name):
"""Initialize the metadata directory for a named identity if it doesn't exist."""
safe_mkdir(self.__class__._get_metadata_dir_by_name(name, self._metadata_base_dir))
def _metadata_file_path(self, name, metadata_key):
return self.metadata_file_path(name, metadata_key, self._metadata_base_dir)
@classmethod
def metadata_file_path(cls, name, metadata_key, metadata_base_dir):
return os.path.join(cls._get_metadata_dir_by_name(name, metadata_base_dir), metadata_key)
def read_metadata_by_name(self, name, metadata_key, caster=None):
"""Read process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param func caster: A casting callable to apply to the read value (e.g. `int`).
"""
file_path = self._metadata_file_path(name, metadata_key)
try:
return self._maybe_cast(read_file(file_path).strip(), caster)
except (IOError, OSError):
return None
def write_metadata_by_name(self, name, metadata_key, metadata_value):
"""Write process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param string metadata_value: The metadata value (e.g. '1729').
"""
self._maybe_init_metadata_dir_by_name(name)
file_path = self._metadata_file_path(name, metadata_key)
safe_file_dump(file_path, metadata_value)
def await_metadata_by_name(self, name, metadata_key, timeout, caster=None):
"""Block up to a timeout for process metadata to arrive on disk.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param int timeout: The deadline to write metadata.
:param type caster: A type-casting callable to apply to the read value (e.g. int, str).
:returns: The value of the metadata key (read from disk post-write).
:raises: :class:`ProcessMetadataManager.Timeout` on timeout.
"""
file_path = self._metadata_file_path(name, metadata_key)
self._wait_for_file(file_path, timeout=timeout)
return self.read_metadata_by_name(name, metadata_key, caster)
def purge_metadata_by_name(self, name):
"""Purge a processes metadata directory.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
meta_dir = self._get_metadata_dir_by_name(name, self._metadata_base_dir)
logger.debug('purging metadata directory: {}'.format(meta_dir))
try:
rm_rf(meta_dir)
except OSError as e:
raise self.MetadataError('failed to purge metadata directory {}: {!r}'.format(meta_dir, e))
class ProcessManager(ProcessMetadataManager):
"""Subprocess/daemon management mixin/superclass. Not intended to be thread-safe."""
class InvalidCommandOutput(Exception): pass
class NonResponsiveProcess(Exception): pass
class ExecutionError(Exception):
def __init__(self, message, output=None):
super(ProcessManager.ExecutionError, self).__init__(message)
self.message = message
self.output = output
def __repr__(self):
return '{}(message={!r}, output={!r})'.format(type(self).__name__, self.message, self.output)
KILL_WAIT_SEC = 5
KILL_CHAIN = (signal.SIGTERM, signal.SIGKILL)
def __init__(self, name, pid=None, socket=None, process_name=None, socket_type=int,
metadata_base_dir=None):
"""
:param string name: The process identity/name (e.g. 'pantsd' or 'ng_Zinc').
:param int pid: The process pid. Overrides fetching of the self.pid @property.
:param string socket: The socket metadata. Overrides fetching of the self.socket @property.
:param string process_name: The process name for cmdline executable name matching.
:param type socket_type: The type to be used for socket type casting (e.g. int).
:param str metadata_base_dir: The overridden base directory for process metadata.
"""
super(ProcessManager, self).__init__(metadata_base_dir)
self._name = name.lower().strip()
self._pid = pid
self._socket = socket
self._socket_type = socket_type
self._process_name = process_name
self._buildroot = get_buildroot()
self._process = None
@property
def name(self):
"""The logical name/label of the process."""
return self._name
@property
def process_name(self):
"""The logical process name. If defined, this is compared to exe_name for stale pid checking."""
return self._process_name
@memoized_property
def lifecycle_lock(self):
"""An identity-keyed inter-process lock for safeguarding lifecycle and other operations."""
safe_mkdir(self._metadata_base_dir)
return OwnerPrintingInterProcessFileLock(
# N.B. This lock can't key into the actual named metadata dir (e.g. `.pids/pantsd/lock`
# via `ProcessMetadataManager._get_metadata_dir_by_name()`) because of a need to purge
# the named metadata dir on startup to avoid stale metadata reads.
os.path.join(self._metadata_base_dir, '.lock.{}'.format(self._name))
)
@property
def cmdline(self):
"""The process commandline. e.g. ['/usr/bin/python2.7', 'pants.pex'].
:returns: The command line or else `None` if the underlying process has died.
"""
with swallow_psutil_exceptions():
process = self._as_process()
if process:
return process.cmdline()
return None
@property
def cmd(self):
"""The first element of the process commandline e.g. '/usr/bin/python2.7'.
:returns: The first element of the process command line or else `None` if the underlying
process has died.
"""
return (self.cmdline or [None])[0]
@property
def pid(self):
"""The running processes pid (or None)."""
return self._pid or self.read_metadata_by_name(self._name, 'pid', int)
@property
def socket(self):
"""The running processes socket/port information (or None)."""
return self._socket or self.read_metadata_by_name(self._name, 'socket', self._socket_type)
@classmethod
def get_subprocess_output(cls, command, ignore_stderr=True, **kwargs):
"""Get the output of an executed command.
:param command: An iterable representing the command to execute (e.g. ['ls', '-al']).
:param ignore_stderr: Whether or not to ignore stderr output vs interleave it with stdout.
:raises: `ProcessManager.ExecutionError` on `OSError` or `CalledProcessError`.
:returns: The output of the command.
"""
if ignore_stderr is False:
kwargs.setdefault('stderr', subprocess.STDOUT)
try:
return subprocess.check_output(command, **kwargs)
except (OSError, subprocess.CalledProcessError) as e:
subprocess_output = getattr(e, 'output', '').strip()
raise cls.ExecutionError(str(e), subprocess_output)
def await_pid(self, timeout):
"""Wait up to a given timeout for a process to write pid metadata."""
return self.await_metadata_by_name(self._name, 'pid', timeout, int)
def await_socket(self, timeout):
"""Wait up to a given timeout for a process to write socket info."""
return self.await_metadata_by_name(self._name, 'socket', timeout, self._socket_type)
def write_pid(self, pid=None):
"""Write the current processes PID to the pidfile location"""
pid = pid or os.getpid()
self.write_metadata_by_name(self._name, 'pid', str(pid))
def write_socket(self, socket_info):
"""Write the local processes socket information (TCP port or UNIX socket)."""
self.write_metadata_by_name(self._name, 'socket', str(socket_info))
def write_named_socket(self, socket_name, socket_info):
"""A multi-tenant, named alternative to ProcessManager.write_socket()."""
self.write_metadata_by_name(self._name, 'socket_{}'.format(socket_name), str(socket_info))
def read_named_socket(self, socket_name, socket_type):
"""A multi-tenant, named alternative to ProcessManager.socket."""
return self.read_metadata_by_name(self._name, 'socket_{}'.format(socket_name), socket_type)
def _as_process(self):
"""Returns a psutil `Process` object wrapping our pid.
NB: Even with a process object in hand, subsequent method calls against it can always raise
`NoSuchProcess`. Care is needed to document the raises in the public API or else trap them and
do something sensible for the API.
:returns: a psutil Process object or else None if we have no pid.
:rtype: :class:`psutil.Process`
:raises: :class:`psutil.NoSuchProcess` if the process identified by our pid has died.
"""
if self._process is None and self.pid:
self._process = psutil.Process(self.pid)
return self._process
def is_dead(self):
"""Return a boolean indicating whether the process is dead or not."""
return not self.is_alive()
def is_alive(self, extended_check=None):
"""Return a boolean indicating whether the process is running or not.
:param func extended_check: An additional callable that will be invoked to perform an extended
liveness check. This callable should take a single argument of a
`psutil.Process` instance representing the context-local process
and return a boolean True/False to indicate alive vs not alive.
"""
try:
process = self._as_process()
return not (
# Can happen if we don't find our pid.
(not process) or
# Check for walkers.
(process.status() == psutil.STATUS_ZOMBIE) or
# Check for stale pids.
(self.process_name and self.process_name != process.name()) or
# Extended checking.
(extended_check and not extended_check(process))
)
except (psutil.NoSuchProcess, psutil.AccessDenied):
# On some platforms, accessing attributes of a zombie'd Process results in NoSuchProcess.
return False
def purge_metadata(self, force=False):
"""Instance-based version of ProcessMetadataManager.purge_metadata_by_name() that checks
for process liveness before purging metadata.
:param bool force: If True, skip process liveness check before purging metadata.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
if not force and self.is_alive():
raise self.MetadataError('cannot purge metadata for a running process!')
super(ProcessManager, self).purge_metadata_by_name(self._name)
def _kill(self, kill_sig):
"""Send a signal to the current process."""
if self.pid:
os.kill(self.pid, kill_sig)
def terminate(self, signal_chain=KILL_CHAIN, kill_wait=KILL_WAIT_SEC, purge=True):
"""Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL)."""
alive = self.is_alive()
if alive:
logger.debug('terminating {}'.format(self._name))
for signal_type in signal_chain:
pid = self.pid
try:
logger.debug('sending signal {} to pid {}'.format(signal_type, pid))
self._kill(signal_type)
except OSError as e:
logger.warning('caught OSError({e!s}) during attempt to kill -{signal} {pid}!'
.format(e=e, signal=signal_type, pid=pid))
# Wait up to kill_wait seconds to terminate or move onto the next signal.
try:
if self._deadline_until(self.is_dead, 'daemon to exit', timeout=kill_wait):
alive = False
logger.debug('successfully terminated pid {}'.format(pid))
break
except self.Timeout:
# Loop to the next kill signal on timeout.
pass
if alive:
raise self.NonResponsiveProcess('failed to kill pid {pid} with signals {chain}'
.format(pid=self.pid, chain=signal_chain))
if purge:
self.purge_metadata(force=True)
def daemonize(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None,
write_pid=True):
"""Perform a double-fork, execute callbacks and write the child pid file.
The double-fork here is necessary to truly daemonize the subprocess such that it can never
take control of a tty. The initial fork and setsid() creates a new, isolated process group
and also makes the first child a session leader (which can still acquire a tty). By forking a
second time, we ensure that the second child can never acquire a controlling terminal because
it's no longer a session leader - but it now has its own separate process group.
Additionally, a normal daemon implementation would typically perform an os.umask(0) to reset
the processes file mode creation mask post-fork. We do not do this here (and in daemon_spawn
below) due to the fact that the daemons that pants would run are typically personal user
daemons. Having a disparate umask from pre-vs-post fork causes files written in each phase to
differ in their permissions without good reason - in this case, we want to inherit the umask.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
logger.debug('forking %s', self)
pid = os.fork()
if pid == 0:
os.setsid()
second_pid = os.fork()
if second_pid == 0:
try:
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
try:
if write_pid: self.write_pid(second_pid)
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
# This prevents un-reaped, throw-away parent processes from lingering in the process table.
os.waitpid(pid, 0)
def daemon_spawn(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None):
"""Perform a single-fork to run a subprocess and write the child pid file.
Use this if your post_fork_child block invokes a subprocess via subprocess.Popen(). In this
case, a second fork such as used in daemonize() is extraneous given that Popen() also forks.
Using this daemonization method vs daemonize() leaves the responsibility of writing the pid
to the caller to allow for library-agnostic flexibility in subprocess execution.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
pid = os.fork()
if pid == 0:
try:
os.setsid()
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
try:
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logger.critical(traceback.format_exc())
def pre_fork(self):
"""Pre-fork callback for subclasses."""
def post_fork_child(self):
"""Pre-fork child callback for subclasses."""
def post_fork_parent(self):
"""Post-fork parent callback for subclasses."""
class FingerprintedProcessManager(ProcessManager):
"""A `ProcessManager` subclass that provides a general strategy for process fingerprinting."""
FINGERPRINT_KEY = 'fingerprint'
FINGERPRINT_CMD_KEY = None
FINGERPRINT_CMD_SEP = '='
@property
def fingerprint(self):
"""The fingerprint of the current process.
This can either read the current fingerprint from the running process's psutil.Process.cmdline
(if the managed process supports that) or from the `ProcessManager` metadata.
:returns: The fingerprint of the running process as read from the process table, ProcessManager
metadata or `None`.
:rtype: string
"""
return (
self.parse_fingerprint(self.cmdline) or
self.read_metadata_by_name(self.name, self.FINGERPRINT_KEY)
)
def parse_fingerprint(self, cmdline, key=None, sep=None):
"""Given a psutil.Process.cmdline, parse and return a fingerprint.
:param list cmdline: The psutil.Process.cmdline of the current process.
:param string key: The key for fingerprint discovery.
:param string sep: The key/value separator for fingerprint discovery.
:returns: The parsed fingerprint or `None`.
:rtype: string or `None`
"""
key = key or self.FINGERPRINT_CMD_KEY
if key:
sep = sep or self.FINGERPRINT_CMD_SEP
cmdline = cmdline or []
for cmd_part in cmdline:
if cmd_part.startswith('{}{}'.format(key, sep)):
return cmd_part.split(sep)[1]
def has_current_fingerprint(self, fingerprint):
"""Determines if a new fingerprint is the current fingerprint of the running process.
:param string fingerprint: The new fingerprint to compare to.
:rtype: bool
"""
return fingerprint == self.fingerprint
def needs_restart(self, fingerprint):
"""Determines if the current ProcessManager needs to be started or restarted.
:param string fingerprint: The new fingerprint to compare to.
:rtype: bool
"""
return self.is_dead() or not self.has_current_fingerprint(fingerprint)
|
{
"content_hash": "02aea623c2cce052b2a1191b25472194",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 108,
"avg_line_length": 40.95454545454545,
"alnum_prop": 0.6795867839153078,
"repo_name": "baroquebobcat/pants",
"id": "4d49c6a1f6c7c87af3f4add2e24360e324feeaae",
"size": "23573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/pantsd/process_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "343"
},
{
"name": "C++",
"bytes": "1138"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2347"
},
{
"name": "HTML",
"bytes": "49289"
},
{
"name": "Java",
"bytes": "490864"
},
{
"name": "JavaScript",
"bytes": "33289"
},
{
"name": "Python",
"bytes": "5907131"
},
{
"name": "Rust",
"bytes": "494664"
},
{
"name": "Scala",
"bytes": "76065"
},
{
"name": "Shell",
"bytes": "75742"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
}
|
import abc
from typing import Awaitable, Callable, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v10.services.types import (
campaign_extension_setting_service,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-ads",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class CampaignExtensionSettingServiceTransport(abc.ABC):
"""Abstract transport class for CampaignExtensionSettingService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
DEFAULT_HOST: str = "googleads.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id,
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(
service_account.Credentials, "with_always_use_jwt_access"
)
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.mutate_campaign_extension_settings: gapic_v1.method.wrap_method(
self.mutate_campaign_extension_settings,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def mutate_campaign_extension_settings(
self,
) -> Callable[
[
campaign_extension_setting_service.MutateCampaignExtensionSettingsRequest
],
Union[
campaign_extension_setting_service.MutateCampaignExtensionSettingsResponse,
Awaitable[
campaign_extension_setting_service.MutateCampaignExtensionSettingsResponse
],
],
]:
raise NotImplementedError()
__all__ = ("CampaignExtensionSettingServiceTransport",)
|
{
"content_hash": "77091404cf34f4e0043a36f39a39dbbb",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 101,
"avg_line_length": 38.17687074829932,
"alnum_prop": 0.6245545260156807,
"repo_name": "googleads/google-ads-python",
"id": "18ad0ff93a92539397465963bac67891f5028511",
"size": "6212",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v10/services/services/campaign_extension_setting_service/transports/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'battleground.settings')
app = Celery('battleground')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
{
"content_hash": "7072eab80a4e41c10ce1a8e7545aa6ec",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 72,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.782608695652174,
"repo_name": "rmariano/pywars",
"id": "09f2cbb825b55767059222283a87c88aa5835ae9",
"size": "483",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "battleground/celery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5782"
},
{
"name": "JavaScript",
"bytes": "24737"
},
{
"name": "Python",
"bytes": "70645"
}
],
"symlink_target": ""
}
|
"""Perturb a `LinearOperator` with a rank `K` update."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"LinearOperatorLowRankUpdate",
]
@tf_export("linalg.LinearOperatorLowRankUpdate")
class LinearOperatorLowRankUpdate(linear_operator.LinearOperator):
"""Perturb a `LinearOperator` with a rank `K` update.
This operator acts like a [batch] matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
`LinearOperatorLowRankUpdate` represents `A = L + U D V^H`, where
```
L, is a LinearOperator representing [batch] M x N matrices
U, is a [batch] M x K matrix. Typically K << M.
D, is a [batch] K x K matrix.
V, is a [batch] N x K matrix. Typically K << N.
V^H is the Hermitian transpose (adjoint) of V.
```
If `M = N`, determinants and solves are done using the matrix determinant
lemma and Woodbury identities, and thus require L and D to be non-singular.
Solves and determinants will be attempted unless the "is_non_singular"
property of L and D is False.
In the event that L and D are positive-definite, and U = V, solves and
determinants can be done using a Cholesky factorization.
```python
# Create a 3 x 3 diagonal linear operator.
diag_operator = LinearOperatorDiag(
diag_update=[1., 2., 3.], is_non_singular=True, is_self_adjoint=True,
is_positive_definite=True)
# Perturb with a rank 2 perturbation
operator = LinearOperatorLowRankUpdate(
operator=diag_operator,
u=[[1., 2.], [-1., 3.], [0., 0.]],
diag_update=[11., 12.],
v=[[1., 2.], [-1., 3.], [10., 10.]])
operator.shape
==> [3, 3]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [3, 4] Tensor
operator.matmul(x)
==> Shape [3, 4] Tensor
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
### Performance
Suppose `operator` is a `LinearOperatorLowRankUpdate` of shape `[M, N]`,
made from a rank `K` update of `base_operator` which performs `.matmul(x)` on
`x` having `x.shape = [N, R]` with `O(L_matmul*N*R)` complexity (and similarly
for `solve`, `determinant`. Then, if `x.shape = [N, R]`,
* `operator.matmul(x)` is `O(L_matmul*N*R + K*N*R)`
and if `M = N`,
* `operator.solve(x)` is `O(L_matmul*N*R + N*K*R + K^2*R + K^3)`
* `operator.determinant()` is `O(L_determinant + L_solve*N*K + K^2*N + K^3)`
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular`, `self_adjoint`, `positive_definite`,
`diag_update_positive` and `square`. These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
base_operator,
u,
diag_update=None,
v=None,
is_diag_update_positive=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorLowRankUpdate"):
"""Initialize a `LinearOperatorLowRankUpdate`.
This creates a `LinearOperator` of the form `A = L + U D V^H`, with
`L` a `LinearOperator`, `U, V` both [batch] matrices, and `D` a [batch]
diagonal matrix.
If `L` is non-singular, solves and determinants are available.
Solves/determinants both involve a solve/determinant of a `K x K` system.
In the event that L and D are self-adjoint positive-definite, and U = V,
this can be done using a Cholesky factorization. The user should set the
`is_X` matrix property hints, which will trigger the appropriate code path.
Args:
base_operator: Shape `[B1,...,Bb, M, N]`.
u: Shape `[B1,...,Bb, M, K]` `Tensor` of same `dtype` as `base_operator`.
This is `U` above.
diag_update: Optional shape `[B1,...,Bb, K]` `Tensor` with same `dtype`
as `base_operator`. This is the diagonal of `D` above.
Defaults to `D` being the identity operator.
v: Optional `Tensor` of same `dtype` as `u` and shape `[B1,...,Bb, N, K]`
Defaults to `v = u`, in which case the perturbation is symmetric.
If `M != N`, then `v` must be set since the perturbation is not square.
is_diag_update_positive: Python `bool`.
If `True`, expect `diag_update > 0`.
is_non_singular: Expect that this operator is non-singular.
Default is `None`, unless `is_positive_definite` is auto-set to be
`True` (see below).
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. Default is `None`, unless `base_operator` is self-adjoint
and `v = None` (meaning `u=v`), in which case this defaults to `True`.
is_positive_definite: Expect that this operator is positive definite.
Default is `None`, unless `base_operator` is positive-definite
`v = None` (meaning `u=v`), and `is_diag_update_positive`, in which case
this defaults to `True`.
Note that we say an operator is positive definite when the quadratic
form `x^H A x` has positive real part for all nonzero `x`.
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If `is_X` flags are set in an inconsistent way.
"""
parameters = dict(
base_operator=base_operator,
u=u,
diag_update=diag_update,
v=v,
is_diag_update_positive=is_diag_update_positive,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
dtype = base_operator.dtype
if diag_update is not None:
if is_diag_update_positive and dtype.is_complex:
logging.warn("Note: setting is_diag_update_positive with a complex "
"dtype means that diagonal is real and positive.")
if diag_update is None:
if is_diag_update_positive is False:
raise ValueError(
"Default diagonal is the identity, which is positive. However, "
"user set 'is_diag_update_positive' to False.")
is_diag_update_positive = True
# In this case, we can use a Cholesky decomposition to help us solve/det.
self._use_cholesky = (
base_operator.is_positive_definite and base_operator.is_self_adjoint
and is_diag_update_positive
and v is None)
# Possibly auto-set some characteristic flags from None to True.
# If the Flags were set (by the user) incorrectly to False, then raise.
if base_operator.is_self_adjoint and v is None and not dtype.is_complex:
if is_self_adjoint is False:
raise ValueError(
"A = L + UDU^H, with L self-adjoint and D real diagonal. Since"
" UDU^H is self-adjoint, this must be a self-adjoint operator.")
is_self_adjoint = True
# The condition for using a cholesky is sufficient for SPD, and
# we no weaker choice of these hints leads to SPD. Therefore,
# the following line reads "if hints indicate SPD..."
if self._use_cholesky:
if (
is_positive_definite is False
or is_self_adjoint is False
or is_non_singular is False):
raise ValueError(
"Arguments imply this is self-adjoint positive-definite operator.")
is_positive_definite = True
is_self_adjoint = True
values = base_operator.graph_parents + [u, diag_update, v]
with ops.name_scope(name, values=values):
# Create U and V.
self._u = linear_operator_util.convert_nonref_to_tensor(u, name="u")
if v is None:
self._v = self._u
else:
self._v = linear_operator_util.convert_nonref_to_tensor(v, name="v")
if diag_update is None:
self._diag_update = None
else:
self._diag_update = linear_operator_util.convert_nonref_to_tensor(
diag_update, name="diag_update")
# Create base_operator L.
self._base_operator = base_operator
graph_parents = base_operator.graph_parents + [
self.u, self._diag_update, self.v]
graph_parents = [p for p in graph_parents if p is not None]
super(LinearOperatorLowRankUpdate, self).__init__(
dtype=self._base_operator.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
self._set_graph_parents(graph_parents)
# Create the diagonal operator D.
self._set_diag_operators(diag_update, is_diag_update_positive)
self._is_diag_update_positive = is_diag_update_positive
self._check_shapes()
def _check_shapes(self):
"""Static check that shapes are compatible."""
# Broadcast shape also checks that u and v are compatible.
uv_shape = array_ops.broadcast_static_shape(
self.u.shape, self.v.shape)
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape, uv_shape[:-2])
tensor_shape.Dimension(
self.base_operator.domain_dimension).assert_is_compatible_with(
uv_shape[-2])
if self._diag_update is not None:
tensor_shape.dimension_at_index(uv_shape, -1).assert_is_compatible_with(
self._diag_update.shape[-1])
array_ops.broadcast_static_shape(
batch_shape, self._diag_update.shape[:-1])
def _set_diag_operators(self, diag_update, is_diag_update_positive):
"""Set attributes self._diag_update and self._diag_operator."""
if diag_update is not None:
self._diag_operator = linear_operator_diag.LinearOperatorDiag(
self._diag_update, is_positive_definite=is_diag_update_positive)
else:
if tensor_shape.dimension_value(self.u.shape[-1]) is not None:
r = tensor_shape.dimension_value(self.u.shape[-1])
else:
r = array_ops.shape(self.u)[-1]
self._diag_operator = linear_operator_identity.LinearOperatorIdentity(
num_rows=r, dtype=self.dtype)
@property
def u(self):
"""If this operator is `A = L + U D V^H`, this is the `U`."""
return self._u
@property
def v(self):
"""If this operator is `A = L + U D V^H`, this is the `V`."""
return self._v
@property
def is_diag_update_positive(self):
"""If this operator is `A = L + U D V^H`, this hints `D > 0` elementwise."""
return self._is_diag_update_positive
@property
def diag_update(self):
"""If this operator is `A = L + U D V^H`, this is the diagonal of `D`."""
return self._diag_update
@property
def diag_operator(self):
"""If this operator is `A = L + U D V^H`, this is `D`."""
return self._diag_operator
@property
def base_operator(self):
"""If this operator is `A = L + U D V^H`, this is the `L`."""
return self._base_operator
def _shape(self):
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape,
self.diag_operator.batch_shape)
batch_shape = array_ops.broadcast_static_shape(
batch_shape,
self.u.shape[:-2])
batch_shape = array_ops.broadcast_static_shape(
batch_shape,
self.v.shape[:-2])
return batch_shape.concatenate(self.base_operator.shape[-2:])
def _shape_tensor(self):
batch_shape = array_ops.broadcast_dynamic_shape(
self.base_operator.batch_shape_tensor(),
self.diag_operator.batch_shape_tensor())
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape,
array_ops.shape(self.u)[:-2])
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape,
array_ops.shape(self.v)[:-2])
return array_ops.concat(
[batch_shape, self.base_operator.shape_tensor()[-2:]], axis=0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
u = self.u
v = self.v
l = self.base_operator
d = self.diag_operator
leading_term = l.matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
if adjoint:
uh_x = math_ops.matmul(u, x, adjoint_a=True, adjoint_b=adjoint_arg)
d_uh_x = d.matmul(uh_x, adjoint=adjoint)
v_d_uh_x = math_ops.matmul(v, d_uh_x)
return leading_term + v_d_uh_x
else:
vh_x = math_ops.matmul(v, x, adjoint_a=True, adjoint_b=adjoint_arg)
d_vh_x = d.matmul(vh_x, adjoint=adjoint)
u_d_vh_x = math_ops.matmul(u, d_vh_x)
return leading_term + u_d_vh_x
def _determinant(self):
if self.is_positive_definite:
return math_ops.exp(self.log_abs_determinant())
# The matrix determinant lemma gives
# https://en.wikipedia.org/wiki/Matrix_determinant_lemma
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
# where C is sometimes known as the capacitance matrix,
# C := D^{-1} + V^H L^{-1} U
det_c = linalg_ops.matrix_determinant(self._make_capacitance())
det_d = self.diag_operator.determinant()
det_l = self.base_operator.determinant()
return det_c * det_d * det_l
def _log_abs_determinant(self):
# Recall
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
log_abs_det_d = self.diag_operator.log_abs_determinant()
log_abs_det_l = self.base_operator.log_abs_determinant()
if self._use_cholesky:
chol_cap_diag = array_ops.matrix_diag_part(
linalg_ops.cholesky(self._make_capacitance()))
log_abs_det_c = 2 * math_ops.reduce_sum(
math_ops.log(chol_cap_diag), axis=[-1])
else:
det_c = linalg_ops.matrix_determinant(self._make_capacitance())
log_abs_det_c = math_ops.log(math_ops.abs(det_c))
if self.dtype.is_complex:
log_abs_det_c = math_ops.cast(log_abs_det_c, dtype=self.dtype)
return log_abs_det_c + log_abs_det_d + log_abs_det_l
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
if self.base_operator.is_non_singular is False:
raise ValueError(
"Solve not implemented unless this is a perturbation of a "
"non-singular LinearOperator.")
# The Woodbury formula gives:
# https://en.wikipedia.org/wiki/Woodbury_matrix_identity
# (L + UDV^H)^{-1}
# = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1}
# = L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
# where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U
# Note also that, with ^{-H} being the inverse of the adjoint,
# (L + UDV^H)^{-H}
# = L^{-H} - L^{-H} V C^{-H} U^H L^{-H}
l = self.base_operator
if adjoint:
v = self.u
u = self.v
else:
v = self.v
u = self.u
# L^{-1} rhs
linv_rhs = l.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
# V^H L^{-1} rhs
vh_linv_rhs = math_ops.matmul(v, linv_rhs, adjoint_a=True)
# C^{-1} V^H L^{-1} rhs
if self._use_cholesky:
capinv_vh_linv_rhs = linalg_ops.cholesky_solve(
linalg_ops.cholesky(self._make_capacitance()), vh_linv_rhs)
else:
capinv_vh_linv_rhs = linear_operator_util.matrix_solve_with_broadcast(
self._make_capacitance(), vh_linv_rhs, adjoint=adjoint)
# U C^{-1} V^H M^{-1} rhs
u_capinv_vh_linv_rhs = math_ops.matmul(u, capinv_vh_linv_rhs)
# L^{-1} U C^{-1} V^H L^{-1} rhs
linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs, adjoint=adjoint)
# L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
return linv_rhs - linv_u_capinv_vh_linv_rhs
def _make_capacitance(self):
# C := D^{-1} + V^H L^{-1} U
# which is sometimes known as the "capacitance" matrix.
# L^{-1} U
linv_u = self.base_operator.solve(self.u)
# V^H L^{-1} U
vh_linv_u = math_ops.matmul(self.v, linv_u, adjoint_a=True)
# D^{-1} + V^H L^{-1} V
capacitance = self._diag_operator.inverse().add_to_tensor(vh_linv_u)
return capacitance
|
{
"content_hash": "4a923ee3b515dad2ac491bacf04fc196",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 80,
"avg_line_length": 38.47912087912088,
"alnum_prop": 0.6282842129312315,
"repo_name": "annarev/tensorflow",
"id": "2e60a10e226c31dc01d851b321a75e29c68bd8f9",
"size": "18197",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/linalg/linear_operator_low_rank_update.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49343974"
},
{
"name": "CMake",
"bytes": "195286"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "863222"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41289329"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "469612"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import sys
import tensorflow as tf
from sacrebleu import corpus_bleu
def bleu_score(pred_file, ref_file):
with tf.io.gfile.GFile(pred_file) as pred_stream, tf.io.gfile.GFile(ref_file) as ref_stream:
pred_stream_txt = pred_stream.readlines()
ref_stream_txt = ref_stream.readlines()
bleu = corpus_bleu(pred_stream_txt, [ref_stream_txt], force=True)
print(" bleu score: {:6.2f}".format(bleu.score))
print(" bleu counts: {}".format(bleu.counts))
print(" bleu totals: {}".format(bleu.totals))
print(" bleu precisions: {}".format(bleu.precisions))
print(" bleu sys_len: {}; ref_len: {}".format(bleu.sys_len, bleu.ref_len))
return bleu
if __name__ == "__main__":
if len(sys.argv) != 3:
print("[ERROR] bleu_score.py needs a result file and a solution file. \n e.g. python bleu_score.py f1.txt f2.txt")
sys.exit(0)
bleu_score(sys.argv[1], sys.argv[2])
|
{
"content_hash": "9aca2fb82f4c4400cca7ef89d3abf393",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 122,
"avg_line_length": 46.61904761904762,
"alnum_prop": 0.6067415730337079,
"repo_name": "NVIDIA/FasterTransformer",
"id": "a4c95adc8bf2bf28eb22200cb31324fb64c73187",
"size": "1590",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/tensorflow/decoding/utils/bleu_score.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2444"
},
{
"name": "C++",
"bytes": "3361167"
},
{
"name": "CMake",
"bytes": "117845"
},
{
"name": "Cuda",
"bytes": "1734491"
},
{
"name": "HCL",
"bytes": "1482"
},
{
"name": "Python",
"bytes": "73804"
},
{
"name": "Shell",
"bytes": "46724"
}
],
"symlink_target": ""
}
|
import lynx
def test_encode_one_section():
section = lynx.Section("mysection", [], {})
result = lynx.dumps(section)
config = lynx.loads(result)
assert(len(config) == 1)
assert(config[0].name() == "mysection")
def test_encode_multiple_sections():
sub_section = lynx.Section("mysection2", [], {})
section = lynx.Section("mysection", [sub_section], {})
result = lynx.dumps([section, section])
config = lynx.loads(result)
assert(len(config) == 2)
assert(config[0].name() == "mysection")
assert(config[0].sub_sections()[0].name() == "mysection2")
def test_encode_fields():
fields = {"myfield": "value", "field2": "hello#$%"}
section = lynx.Section("mysection", [], fields)
result = lynx.dumps(section)
config = lynx.loads(result)
r_fields = config[0].fields()
assert(len(r_fields) == 2)
assert(r_fields["myfield"] == "value")
assert(r_fields["field2"] == "hello#$%")
def test_encode_lists():
fields = {"myfield": ["hello", "world", 75, 88.32]}
section = lynx.Section("mysection", [], fields)
result = lynx.dumps(section)
config = lynx.loads(result)
r_fields = config[0].fields()
assert(len(r_fields) == 1)
assert(r_fields["myfield"] == ["hello", "world", 75, 88.32])
def test_encode_nums():
fields = {"myfield": 52, "field2": 37.68}
section = lynx.Section("mysection", [], fields)
result = lynx.dumps(section)
config = lynx.loads(result)
r_fields = config[0].fields()
assert(len(r_fields) == 2)
assert(r_fields["myfield"] == 52)
assert(r_fields["field2"] == 37.68)
|
{
"content_hash": "8039999415e18e9045b8965812625ad9",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 64,
"avg_line_length": 34.891304347826086,
"alnum_prop": 0.609968847352025,
"repo_name": "omershelef/lynx",
"id": "02a993a98ded3b19fd1cbeec6dd676441e34e38e",
"size": "1605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_encode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10560"
}
],
"symlink_target": ""
}
|
"""Tests for make_template."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import traceback
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def variable_scoped_function(trainable=True):
return variable_scope.get_variable(
"dummy", shape=[1], trainable=trainable,
initializer=init_ops.zeros_initializer())
def internally_variable_scoped_function(scope_name):
with variable_scope.variable_scope(scope_name):
return variable_scope.get_variable(
"dummy", shape=[1], initializer=init_ops.zeros_initializer())
def function_with_create(trainable):
"""Creates a variable as a side effect using tf.Variable."""
variables.Variable(0, trainable=trainable)
return variable_scope.get_variable(
"dummy", shape=[1], initializer=init_ops.zeros_initializer())
def function_with_side_create(trainable, name="side"):
"""Creates a variable as a side effect using tf.get_variable."""
variable_scope.get_variable(name, shape=[1], trainable=trainable)
return variable_scope.get_variable(
"dummy", shape=[1], initializer=init_ops.zeros_initializer())
def variable_scoped_function_with_local_variable():
variable_scope.get_local_variable(
"local", shape=[1], initializer=init_ops.zeros_initializer())
return variable_scope.get_variable(
"dummy", shape=[1], initializer=init_ops.zeros_initializer())
class TemplateTest(test.TestCase):
@test_util.run_deprecated_v1
def test_end_to_end(self):
"""This test shows a very simple line model with test_loss.
The template is used to share parameters between a training and test model.
"""
# y = 2x + 1
training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7])
test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17])
random_seed.set_random_seed(1234)
def test_line(x):
m = variable_scope.get_variable(
"w", shape=[], initializer=init_ops.truncated_normal_initializer())
b = variable_scope.get_variable(
"b", shape=[], initializer=init_ops.truncated_normal_initializer())
return x * m + b
line_template = template.make_template("line", test_line)
train_prediction = line_template(training_input)
test_prediction = line_template(test_input)
train_loss = math_ops.reduce_mean(
math_ops.square(train_prediction - training_output))
test_loss = math_ops.reduce_mean(
math_ops.square(test_prediction - test_output))
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train_op = optimizer.minimize(train_loss)
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
initial_test_loss = self.evaluate(test_loss)
self.evaluate(train_op)
final_test_loss = self.evaluate(test_loss)
# Parameters are tied, so the loss should have gone down when we trained it.
self.assertLess(final_test_loss, initial_test_loss)
def test_end_to_end_eager(self):
"""This test shows a very simple line model with test_loss in eager mode.
The template is used to share parameters between a training and test model.
"""
with context.eager_mode():
# y = 2x + 1
training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7])
test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17])
random_seed.set_random_seed(1234)
def test_line(x):
m = variable_scope.get_variable(
"w", shape=[], initializer=init_ops.truncated_normal_initializer())
b = variable_scope.get_variable(
"b", shape=[], initializer=init_ops.truncated_normal_initializer())
return x * m + b
line_template = template.make_template("line", test_line)
def train_loss():
train_prediction = line_template(training_input)
return math_ops.reduce_mean(
math_ops.square(train_prediction - training_output))
def test_loss():
test_prediction = line_template(test_input)
return math_ops.reduce_mean(
math_ops.square(test_prediction - test_output))
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
initial_test_loss = test_loss()
optimizer.minimize(train_loss)
final_test_loss = test_loss()
# Parameters are tied, so the loss should have gone down after training.
self.assertLess(final_test_loss.numpy(), initial_test_loss.numpy())
def test_eager_delayed_store_pickup(self):
"""This test shows a very simple line model with test_loss in eager mode.
The template is used to share parameters between a training and test model.
This test also shows that it can pick up explicitly set variable stores
even if they are only set before the first template usage.
"""
with context.eager_mode():
training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7])
test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17])
random_seed.set_random_seed(1234)
def test_line(x):
m = variable_scope.get_variable(
"w", shape=[], initializer=init_ops.truncated_normal_initializer())
b = variable_scope.get_variable(
"b", shape=[], initializer=init_ops.truncated_normal_initializer())
return x * m + b
line_template = template.make_template("line", test_line)
def train_loss():
train_prediction = line_template(training_input)
return math_ops.reduce_mean(
math_ops.square(train_prediction - training_output))
def test_loss():
test_prediction = line_template(test_input)
return math_ops.reduce_mean(
math_ops.square(test_prediction - test_output))
store = variable_scope._VariableStore()
store._store_eager_variables = True
with variable_scope.with_variable_store(store):
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
initial_test_loss = test_loss()
optimizer.minimize(train_loss)
final_test_loss = test_loss()
# Parameters are tied, so the loss should have gone down after training.
self.assertLess(final_test_loss.numpy(), initial_test_loss.numpy())
# Verify that the explicitly set store is not empty
# and the make_template picked it up
self.assertEqual(set(store._vars.keys()), {"line/w", "line/b"})
# But the store should only get picked up once, so a second
# store will go unused:
second_store = variable_scope._VariableStore()
second_store._store_eager_variables = True
with variable_scope.with_variable_store(second_store):
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
test_loss()
optimizer.minimize(train_loss)
test_loss()
self.assertEmpty(second_store._vars)
@test_util.run_in_graph_and_eager_modes
def test_skip_stack_frames(self):
first = traceback.format_stack()
second = traceback.format_stack()
result = template._skip_common_stack_elements(first, second)
self.assertEqual(1, len(result))
self.assertNotEqual(len(first), len(result))
@test_util.run_in_graph_and_eager_modes
def test_template_with_empty_name(self):
tpl = template.make_template("", variable_scoped_function)
with variable_scope.variable_scope("outer"):
x = variable_scope.get_variable("x", [])
v = tpl()
self.assertEqual("outer/", tpl.variable_scope_name)
self.assertEqual("outer//dummy:0", v.name)
if context.executing_eagerly():
# In eager mode `x` is not visible to the template since the template does
# not rely on global collections.
self.assertEqual(1, len(tpl.variables))
self.assertIs(v, tpl.variables[0])
else:
self.assertEqual([x, v], tpl.variables)
@test_util.run_in_graph_and_eager_modes
def test_template_with_name(self):
tmpl1 = template.make_template("s1", variable_scoped_function)
tmpl2 = template.make_template("s1", variable_scoped_function)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertIs(v1, v2)
self.assertIsNot(v1, v3)
self.assertEqual("s1/dummy:0", v1.name)
self.assertEqual("s1_1/dummy:0", v3.name)
@test_util.run_deprecated_v1
def test_same_unique_name_raise_error(self):
tmpl1 = template.make_template(
"_", variable_scoped_function, unique_name_="s1")
tmpl1()
tmpl2 = template.make_template(
"_", variable_scoped_function, unique_name_="s1")
with self.assertRaisesRegex(
ValueError, "Variable s1/dummy already exists, disallowed.*"):
tmpl2()
def test_unique_name_raise_error_in_eager(self):
with context.eager_mode():
with self.assertRaisesRegex(
ValueError,
"unique_name_ cannot be used when eager execution is enabled."):
template.make_template(
"_", variable_scoped_function, unique_name_="s1")
@test_util.run_deprecated_v1
def test_unique_name_and_reuse(self):
tmpl1 = template.make_template(
"_", variable_scoped_function, unique_name_="s1")
v1 = tmpl1()
v2 = tmpl1()
variable_scope.get_variable_scope().reuse_variables()
tmpl2 = template.make_template(
"_", variable_scoped_function, unique_name_="s1")
v3 = tmpl2()
self.assertIs(v1, v2)
self.assertIs(v1, v3)
self.assertEqual("s1/dummy:0", v1.name)
@test_util.run_in_graph_and_eager_modes
def test_template_in_scope(self):
tmpl1 = template.make_template("s1", variable_scoped_function)
tmpl2 = template.make_template("s1", variable_scoped_function)
with variable_scope.variable_scope("scope"):
v1 = tmpl1()
v3 = tmpl2()
# The template contract requires the following to ignore scope2.
with variable_scope.variable_scope("scope2"):
v2 = tmpl1()
self.assertIs(v1, v2)
self.assertIsNot(v1, v3)
self.assertEqual("scope/s1/dummy:0", v1.name)
self.assertEqual("scope/s1_1/dummy:0", v3.name)
@test_util.run_in_graph_and_eager_modes
def test_template_with_internal_reuse(self):
tmpl1 = template.make_template("s1", internally_variable_scoped_function)
tmpl2 = template.make_template("s1", internally_variable_scoped_function)
v1 = tmpl1("test")
v2 = tmpl1("test")
v3 = tmpl2("test")
self.assertIs(v1, v2)
self.assertIsNot(v1, v3)
self.assertEqual("s1/test/dummy:0", v1.name)
self.assertEqual("s1_1/test/dummy:0", v3.name)
with self.assertRaises(ValueError):
tmpl1("not_test")
@test_util.run_in_graph_and_eager_modes
def test_template_without_name(self):
with self.assertRaisesRegex(ValueError, "name cannot be None."):
template.make_template(None, variable_scoped_function)
@test_util.run_in_graph_and_eager_modes
def test_make_template(self):
# Test both that we can call it with positional and keywords.
tmpl1 = template.make_template(
"s1", internally_variable_scoped_function, scope_name="test")
tmpl2 = template.make_template(
"s1", internally_variable_scoped_function, scope_name="test")
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertIs(v1, v2)
self.assertIsNot(v1, v3)
self.assertEqual("s1/test/dummy:0", v1.name)
self.assertEqual("s1_1/test/dummy:0", v3.name)
@test_util.run_deprecated_v1
def test_enforces_no_extra_trainable_variables(self):
tmpl = template.make_template("s", function_with_create, trainable=True)
tmpl()
with self.assertRaises(ValueError):
tmpl()
@test_util.run_in_graph_and_eager_modes
def test_enforces_no_extra_trainable_variables_eager(self):
tmpl = template.make_template("s",
function_with_side_create,
trainable=True)
tmpl(name="1")
with self.assertRaises(ValueError):
tmpl(name="2")
def test_permits_extra_non_trainable_variables(self):
tmpl = template.make_template("s", function_with_create, trainable=False)
self.assertIs(tmpl(), tmpl())
def test_permits_extra_non_trainable_variables_eager(self):
with context.eager_mode():
tmpl = template.make_template("s",
function_with_side_create,
trainable=False)
self.assertIs(tmpl(name="1"), tmpl(name="2"))
@test_util.run_in_graph_and_eager_modes
def test_internal_variable_reuse(self):
def nested():
with variable_scope.variable_scope("nested") as vs:
v1 = variable_scope.get_variable(
"x", initializer=init_ops.zeros_initializer(), shape=[])
with variable_scope.variable_scope(vs, reuse=True):
v2 = variable_scope.get_variable("x")
self.assertIs(v1, v2)
return v1
tmpl1 = template.make_template("s1", nested)
tmpl2 = template.make_template("s1", nested)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertIs(v1, v2)
self.assertIsNot(v1, v3)
self.assertEqual("s1/nested/x:0", v1.name)
self.assertEqual("s1_1/nested/x:0", v3.name)
@test_util.run_in_graph_and_eager_modes
def test_nested_templates(self):
def nested_template():
nested1 = template.make_template("nested", variable_scoped_function)
nested2 = template.make_template("nested", variable_scoped_function)
v1 = nested1()
v2 = nested2()
# nested1 and nested2 should not share variables
self.assertIsNot(v1, v2)
# Variables created by nested1 should be isolated from variables
# created by nested2.
self.assertEqual(1, len(nested1.variables))
self.assertEqual(1, len(nested2.variables))
self.assertIs(nested1.variables[0], v1)
self.assertIs(nested2.variables[0], v2)
self.assertEqual(1, len(nested1.trainable_variables))
self.assertEqual(1, len(nested2.trainable_variables))
self.assertIs(nested1.trainable_variables[0], v1)
self.assertIs(nested2.trainable_variables[0], v2)
self.assertEqual(len(nested1.non_trainable_variables), 0)
self.assertEqual(len(nested2.non_trainable_variables), 0)
return v1, v2
tmpl1 = template.make_template("s1", nested_template)
tmpl2 = template.make_template("s1", nested_template)
v1, v2 = tmpl1()
v3, v4 = tmpl1()
v5, v6 = tmpl2()
# The second invocation of tmpl1 should reuse the variables
# created in the first invocation.
self.assertIs(v1, v3)
self.assertIs(v2, v4)
for v, w in zip(tmpl1.variables, [v1, v2]):
self.assertIs(v, w)
for v, w in zip(tmpl1.trainable_variables, [v1, v2]):
self.assertIs(v, w)
self.assertEqual(len(tmpl1.non_trainable_variables), 0)
# tmpl1 and tmpl2 should not share variables.
self.assertIsNot(v1, v5)
self.assertIsNot(v2, v6)
for v, w in zip(tmpl2.variables, [v5, v6]):
self.assertIs(v, w)
for v, w in zip(tmpl2.trainable_variables, [v5, v6]):
self.assertIs(v, w)
self.assertEqual(len(tmpl2.non_trainable_variables), 0)
self.assertEqual("s1/nested/dummy:0", v1.name)
self.assertEqual("s1/nested_1/dummy:0", v2.name)
self.assertEqual("s1_1/nested/dummy:0", v5.name)
self.assertEqual("s1_1/nested_1/dummy:0", v6.name)
self.assertEqual(2, len(tmpl1._checkpoint_dependencies))
self.assertEqual("nested", tmpl1._checkpoint_dependencies[0].name)
self.assertEqual("nested_1", tmpl1._checkpoint_dependencies[1].name)
@test_util.run_in_graph_and_eager_modes
def test_nested_templates_with_defun(self):
def variable_scoped_function_no_return_value(trainable=True):
# defun cannot compile functions that return non-Tensor objects
_ = variable_scope.get_variable(
"dummy",
shape=[1],
trainable=trainable,
initializer=init_ops.zeros_initializer())
def nested_template():
nested1 = template.make_template_internal(
"nested",
variable_scoped_function_no_return_value,
create_graph_function_=True)
nested2 = template.make_template_internal(
"nested",
variable_scoped_function_no_return_value,
create_graph_function_=True)
nested1()
nested2()
v1 = nested1.variables
v2 = nested2.variables
self.assertEqual(len(v1), 1)
self.assertEqual(len(v2), 1)
# nested1 and nested2 should not share variables
self.assertIsNot(v1[0], v2[0])
self.assertIs(nested1.trainable_variables[0], v1[0])
self.assertIs(nested2.trainable_variables[0], v2[0])
self.assertEqual(len(nested1.non_trainable_variables), 0)
self.assertEqual(len(nested2.non_trainable_variables), 0)
tmpl1 = template.make_template("s1", nested_template)
tmpl2 = template.make_template("s1", nested_template)
tmpl1()
v1 = tmpl1.variables
tmpl1()
v2 = tmpl1.variables
tmpl2()
v3 = tmpl2.variables
# The second invocation of tmpl1 should reuse the variables
# created in the first invocation.
for v, w in zip(v1, v2):
self.assertIs(v, w)
# tmpl1 and tmpl2 should not share variables.
for v, w in zip(v1, v3):
self.assertIsNot(v, w)
self.assertEqual("s1/nested/dummy:0", v1[0].name)
self.assertEqual("s1/nested_1/dummy:0", v1[1].name)
self.assertEqual("s1_1/nested/dummy:0", v3[0].name)
self.assertEqual("s1_1/nested_1/dummy:0", v3[1].name)
def test_graph_function_no_name(self):
with context.eager_mode():
def f(_, y):
return y + 1
partial = functools.partial(f, 1.0)
tmpl = template.make_template_internal(
"a", partial, create_graph_function_=True)
self.assertAllEqual(tmpl(ops.convert_to_tensor(1.0)), 2.0)
@test_util.run_in_graph_and_eager_modes
def test_immediate_scope_creation(self):
# Create templates in scope a then call in scope b. make_template should
# capture the scope the first time it is called, and make_immediate_template
# should capture the scope at construction time.
with variable_scope.variable_scope("ctor_scope"):
# Create scope here:
tmpl_immed = template.make_template("a", variable_scoped_function,
True)
# default: create scope at __call__
tmpl_defer = template.make_template(
"b", variable_scoped_function, False)
with variable_scope.variable_scope("call_scope"):
inner_imm_var = tmpl_immed()
inner_defer_var = tmpl_defer()
outer_imm_var = tmpl_immed()
outer_defer_var = tmpl_defer()
self.assertIsNot(inner_imm_var, inner_defer_var)
self.assertIs(outer_imm_var, inner_imm_var)
self.assertIs(outer_defer_var, inner_defer_var)
self.assertEqual("ctor_scope/a/dummy:0", inner_imm_var.name)
self.assertEqual("call_scope/b/dummy:0", inner_defer_var.name)
@test_util.run_in_graph_and_eager_modes
def test_scope_access(self):
# Ensure that we can access the scope inside the template, because the name
# of that scope may be different from the name we pass to make_template, due
# to having been made unique by variable_scope.
with variable_scope.variable_scope("foo"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", variable_scoped_function, True)
tb = template.make_template("bar", variable_scoped_function, True)
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(ta.variable_scope.name, "foo/bar")
self.assertEqual(tb.variable_scope.name, "foo/bar_1")
with variable_scope.variable_scope("foo_2"):
# Create a template which defers scope creation.
tc = template.make_template("blah", variable_scoped_function, False)
# Before we call the template, the scope property will be set to None.
self.assertEqual(tc.variable_scope, None)
tc()
# Template is called at the top level, so there is no preceding "foo_2".
self.assertEqual(tc.variable_scope.name, "blah")
@test_util.run_in_graph_and_eager_modes
def test_custom_getter(self):
# Custom getter that maintains call count and forwards to true getter
custom_getter_count = [0]
def custom_getter(getter, name, *args, **kwargs):
custom_getter_count[0] += 1
return getter(name, *args, **kwargs)
# Test that custom getter is called both when variables are created and
# subsequently accessed
tmpl1 = template.make_template(
"s1", variable_scoped_function, custom_getter_=custom_getter)
self.assertEqual(custom_getter_count[0], 0)
tmpl1()
self.assertEqual(custom_getter_count[0], 1)
tmpl1()
self.assertEqual(custom_getter_count[0], 2)
# Test that custom getter is called when the variable scope is created
# during construction
custom_getter_count[0] = 0
tmpl2 = template.make_template(
"s2",
variable_scoped_function,
custom_getter_=custom_getter,
create_scope_now_=True)
self.assertEqual(custom_getter_count[0], 0)
tmpl2()
self.assertEqual(custom_getter_count[0], 1)
tmpl2()
self.assertEqual(custom_getter_count[0], 2)
@test_util.run_in_graph_and_eager_modes
def test_fails_gracefully(self):
for create_scope_now in [True, False]:
def module_function_with_one_arg(inputs):
w = variable_scope.get_variable(
"w", shape=[1], initializer=init_ops.zeros_initializer())
return inputs * w
templatized_function = template.make_template(
"f1", module_function_with_one_arg,
create_scope_now_=create_scope_now)
data = array_ops.zeros([1])
try:
# Try to connect with a kwarg which is unsupported.
templatized_function(data, is_training=True)
except TypeError:
pass
# The failed __call__ hasn't modified the inner state.
self.assertFalse(templatized_function._variables_created)
templatized_function(data)
self.assertTrue(templatized_function._variables_created)
@test_util.run_in_graph_and_eager_modes
def test_name_scopes_for_variable_scopes(self):
# Test that name scopes are not unnecessarily uniquified (but are
# still uniquified when necessary).
def linear_module(x, output_size):
w = variable_scope.get_variable(
"w", shape=[x.get_shape()[1], output_size],
initializer=init_ops.zeros_initializer())
b = variable_scope.get_variable(
"b", shape=[output_size],
initializer=init_ops.zeros_initializer())
return (math_ops.matmul(x, w) + b), w
def make_linear_module(output_size, name):
return template.make_template(
name,
linear_module,
output_size=output_size,
create_scope_now_=True)
inputs = array_ops.ones((3, 4))
linear1 = make_linear_module(output_size=2, name="foo")
outputs_a, w1 = linear1(inputs)
outputs_b, _ = linear1(inputs)
self.assertEqual("foo", linear1.variable_scope.name)
self.assertEqual("foo/w:0", w1.name)
if not context.executing_eagerly():
self.assertEqual(
"foo/add:0", outputs_a.name,
"First application of template should get "
"same name scope as variables.")
self.assertEqual(
"foo_1/add:0", outputs_b.name,
"Second application of template should get "
"a freshly uniquified name scope.")
linear2 = make_linear_module(output_size=2, name="foo")
outputs_c, w2 = linear2(inputs)
outputs_d, _ = linear2(inputs)
self.assertEqual(
"foo_1", linear2.variable_scope.name,
"New template gets a freshly uniquified variable scope "
"because 'foo' is already taken.")
self.assertEqual("foo_1/w:0", w2.name)
if not context.executing_eagerly():
self.assertEqual(
"foo_1_1/add:0", outputs_c.name,
"First application of template would get "
"same name scope as variables, but 'foo_1' is already "
"a name scope.")
self.assertEqual(
"foo_1_2/add:0", outputs_d.name,
"Second application of template should also get "
"a freshly uniquified name scope.")
@test_util.run_in_graph_and_eager_modes
def test_global_variables(self):
# Make sure global_variables are created.
with variable_scope.variable_scope("foo"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", variable_scoped_function, True)
if context.executing_eagerly():
tb = template.make_template("s", function_with_side_create,
trainable=False)
else:
tb = template.make_template("s", function_with_create, trainable=False)
# Initially there are not variables created.
self.assertEqual([], list(ta.global_variables))
self.assertEqual([], list(tb.global_variables))
# After calling there are variables created.
ta()
tb()
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(1, len(ta.global_variables))
self.assertEqual(2, len(tb.global_variables))
@test_util.run_in_graph_and_eager_modes
def test_trainable_variables(self):
# Make sure trainable_variables are created.
with variable_scope.variable_scope("foo2"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", variable_scoped_function, True)
tb = template.make_template("bar", variable_scoped_function, True)
# Initially there are not variables created.
self.assertEqual([], list(ta.trainable_variables))
self.assertEqual([], list(tb.trainable_variables))
# After calling there are variables created.
ta()
tb()
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(1, len(ta.trainable_variables))
self.assertEqual(1, len(tb.trainable_variables))
# None non-trainable variable was created.
self.assertEqual([], list(ta.non_trainable_variables))
self.assertEqual([], list(tb.non_trainable_variables))
# Ensure variables returns all the variables.
self.assertEqual(1, len(ta.variables))
self.assertEqual(1, len(tb.variables))
@test_util.run_in_graph_and_eager_modes
def test_non_trainable_variables(self):
# Make sure non_trainable_variables are created.
with variable_scope.variable_scope("foo2"):
ta = template.make_template("a", variable_scoped_function,
trainable=True)
tb = template.make_template("b", variable_scoped_function,
trainable=False)
# Initially there are not variables created.
self.assertEqual([], list(ta.variables))
self.assertEqual([], list(tb.variables))
# After calling there are variables created.
ta()
tb()
# Check the trainable and non_trainable variables.
self.assertEqual(1, len(ta.trainable_variables))
self.assertEqual([], list(ta.non_trainable_variables))
self.assertEqual([], list(tb.trainable_variables))
self.assertEqual(1, len(tb.non_trainable_variables))
# Ensure variables returns all the variables.
self.assertEqual(1, len(ta.variables))
self.assertEqual(1, len(tb.variables))
# TODO(apassos) handle local variables in Eager
@test_util.run_deprecated_v1
def test_local_variables(self):
# Make sure trainable_variables are created.
with variable_scope.variable_scope("foo3"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", variable_scoped_function, True)
tb = template.make_template("bar",
variable_scoped_function_with_local_variable)
# Initially there are not variables created.
self.assertEqual([], list(ta.local_variables))
self.assertEqual([], list(tb.local_variables))
# After calling there are variables created.
ta()
tb()
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(0, len(ta.local_variables))
self.assertEqual(1, len(tb.local_variables))
@test_util.run_in_graph_and_eager_modes
def test_make_template_with_defun(self):
def variable_scoped_function_no_return_value(scope_name):
# defun cannot compile functions that return non-Tensor objects
with variable_scope.variable_scope(scope_name):
_ = variable_scope.get_variable(
"dummy", shape=[1], initializer=init_ops.zeros_initializer())
tmpl = template.make_template_internal(
"s1",
variable_scoped_function_no_return_value,
create_graph_function_=True,
scope_name="test")
# The first invocation of tmpl1 creates variables, the second should
# be executed as a graph function.
tmpl()
v1 = tmpl.variables
tmpl()
v2 = tmpl.variables
self.assertEqual(len(v1), len(v2))
for v, w in zip(v1, v2):
self.assertIs(v, w)
self.assertEqual("s1/test/dummy:0", v1[0].name)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "015f3a472e18d155e7151afbbd3cdcd0",
"timestamp": "",
"source": "github",
"line_count": 790,
"max_line_length": 80,
"avg_line_length": 37.72278481012658,
"alnum_prop": 0.6655481359685916,
"repo_name": "frreiss/tensorflow-fred",
"id": "45a1d423fb7a6a403210ca336202e8470277647f",
"size": "30490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/template_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
from utils import setup_syspath
setup_syspath()
from elftools.elf.elffile import ELFFile
from elftools.common.exceptions import ELFError
from elftools.elf.dynamic import DynamicTag
class TestDynamicTag(unittest.TestCase):
"""Tests for the DynamicTag class."""
def test_requires_stringtable(self):
with self.assertRaises(ELFError):
dt = DynamicTag('', None)
class TestDynamic(unittest.TestCase):
"""Tests for the Dynamic class."""
def test_missing_sections(self):
"""Verify we can get dynamic strings w/out section headers"""
libs = []
with open(os.path.join('test', 'testfiles_for_unittests',
'aarch64_super_stripped.elf'), 'rb') as f:
elf = ELFFile(f)
for segment in elf.iter_segments():
if segment.header.p_type != 'PT_DYNAMIC':
continue
for t in segment.iter_tags():
if t.entry.d_tag == 'DT_NEEDED':
libs.append(t.needed.decode('utf-8'))
exp = ['libc.so.6']
self.assertEqual(libs, exp)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "3df8f34dec010e8d7046746e6269c53f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 73,
"avg_line_length": 28.066666666666666,
"alnum_prop": 0.5977830562153602,
"repo_name": "endlessm/chromium-browser",
"id": "f25febafff33b5c7ab59da1a15238f0f513121c8",
"size": "1515",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/pyelftools/test/test_dynamic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from threading import Thread
import requests
from django.contrib.auth import get_user_model
User = get_user_model()
class OPENDOTA:
PLAYERS = "https://api.opendota.com/api/players/{account_id}"
RANK_TIERS = [
'Uncalibrated',
'Herald',
'Guardian',
'Crusader',
'Archon',
'Legend',
'Ancient',
'Divine',
]
STEAM32_MODIFIER = 76561197960265728
def rank_tier_to_string(rank_tier, leaderboard_tier):
rank_tier_int = int(rank_tier)
tier = rank_tier // 10
tier_name = RANK_TIERS[tier]
if rank_tier_int > 9:
tier_name = "{} [{}]".format(tier_name, rank_tier_int % 10)
return tier_name
def _update_player_rank(user_id):
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return
social_auth = user.social_auth.filter(provider='steam').first()
if not social_auth:
return
steamid = social_auth.extra_data['player'].get('steamid')
if not steamid:
return
steamid32 = int(steamid) - STEAM32_MODIFIER
url = OPENDOTA.PLAYERS.format(account_id=steamid32)
response = requests.get(url)
json = response.json()
rank_tier = int(json['rank_tier'])
leaderboard_rank = json['leaderboard_rank']
rank_data = {
'rank_int': rank_tier,
'rank_leaderboard': leaderboard_rank,
'rank': rank_tier_to_string(rank_tier, leaderboard_rank)
}
social_auth.extra_data['rank_data'] = rank_data
social_auth.save()
def update_player_rank(user_id, async=True):
if async:
Thread(target=_update_player_rank, args=(user_id, )).start()
else:
_update_player_rank(user_id)
|
{
"content_hash": "096620fbb80f1533395b4f55b268a38b",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 68,
"avg_line_length": 22.186666666666667,
"alnum_prop": 0.6316105769230769,
"repo_name": "prattl/wepickheroes",
"id": "08b00c937e81cac745dc399fb732900502219c68",
"size": "1664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/nucleus/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2768"
},
{
"name": "JavaScript",
"bytes": "16586"
},
{
"name": "Python",
"bytes": "11233"
},
{
"name": "Shell",
"bytes": "4079"
}
],
"symlink_target": ""
}
|
# coding: utf-8
from __future__ import absolute_import
import pytest
import os.path
import responses
from mock import patch
from django.conf import settings
from sentry.models import Event, File, Release, ReleaseFile
from sentry.testutils import TestCase
BASE64_SOURCEMAP = 'data:application/json;base64,' + (
'{"version":3,"file":"generated.js","sources":["/test.js"],"names":[],"mappings":"AAAA","sourcesContent":["console.log(\\"hello, World!\\")"]}'.
encode('base64').replace('\n', '')
)
def get_fixture_path(name):
return os.path.join(os.path.dirname(__file__), 'fixtures', name)
def load_fixture(name):
with open(get_fixture_path(name), 'rb') as fp:
return fp.read()
class JavascriptIntegrationTest(TestCase):
@pytest.mark.skipif(
settings.SENTRY_TAGSTORE == 'sentry.tagstore.v2.V2TagStorage',
reason='Queries are completly different when using tagstore'
)
def test_adds_contexts_without_device(self):
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Http': {
'url':
'http://example.com',
'headers': [
[
'User-Agent',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36'
],
],
}
}
# We do a preflight post, because there are many queries polluting the array
# before the actual "processing" happens (like, auth_user)
self._postWithHeader(data)
with self.assertWriteQueries({
'nodestore_node': 2,
'sentry_environmentproject': 1,
'sentry_eventtag': 1,
'sentry_eventuser': 1,
'sentry_filtervalue': 6,
'sentry_groupedmessage': 1,
'sentry_message': 1,
'sentry_messagefiltervalue': 6,
'sentry_userreport': 1
}, debug=True): # debug=True is for coverage
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.first()
contexts = event.interfaces['contexts'].to_json()
assert contexts.get('os') == {
'name': 'Windows 8',
'type': 'os',
}
assert contexts.get('browser') == {
'name': 'Chrome',
'type': 'browser',
'version': '28.0.1500',
}
assert contexts.get('device') is None
def test_adds_contexts_with_device(self):
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Http': {
'url':
'http://example.com',
'headers': [
[
'User-Agent',
'Mozilla/5.0 (Linux; U; Android 4.3; en-us; SCH-R530U Build/JSS15J) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30 USCC-R530U'
],
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
contexts = event.interfaces['contexts'].to_json()
assert contexts.get('os') == {
'name': 'Android',
'type': 'os',
'version': '4.3',
}
assert contexts.get('browser') == {
'name': 'Android',
'type': 'browser',
'version': '4.3',
}
assert contexts.get('device') == {
'family': 'Samsung SCH-R530U',
'type': 'device',
'model': 'SCH-R530U',
'brand': 'Samsung',
}
def test_adds_contexts_with_ps4_device(self):
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Http': {
'url':
'http://example.com',
'headers': [
[
'User-Agent',
'Mozilla/5.0 (PlayStation 4 3.55) AppleWebKit/537.78 (KHTML, like Gecko)'
],
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
contexts = event.interfaces['contexts'].to_json()
assert contexts.get('os') is None
assert contexts.get('browser') is None
assert contexts.get('device') == {
'family': 'PlayStation 4',
'type': 'device',
'model': 'PlayStation 4',
'brand': 'Sony',
}
@patch('sentry.lang.javascript.processor.fetch_file')
def test_source_expansion(self, mock_fetch_file):
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/foo.js',
'filename': 'foo.js',
'lineno': 4,
'colno': 0,
},
{
'abs_path': 'http://example.com/foo.js',
'filename': 'foo.js',
'lineno': 1,
'colno': 0,
},
],
},
}
],
}
}
mock_fetch_file.return_value.body = '\n'.join('hello world')
mock_fetch_file.return_value.encoding = None
resp = self._postWithHeader(data)
assert resp.status_code, 200
mock_fetch_file.assert_called_once_with(
'http://example.com/foo.js',
project=self.project,
release=None,
dist=None,
allow_scraping=True,
)
event = Event.objects.get()
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.pre_context == ['h', 'e', 'l']
assert frame.context_line == 'l'
assert frame.post_context == ['o', ' ', 'w', 'o', 'r']
frame = frame_list[1]
assert not frame.pre_context
assert frame.context_line == 'h'
assert frame.post_context == ['e', 'l', 'l', 'o', ' ']
# no source map means no raw_stacktrace
assert exception.values[0].raw_stacktrace is None
@patch('sentry.lang.javascript.processor.fetch_file')
@patch('sentry.lang.javascript.processor.discover_sourcemap')
def test_inlined_sources(self, mock_discover_sourcemap, mock_fetch_file):
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/test.min.js',
'filename': 'test.js',
'lineno': 1,
'colno': 1,
},
],
},
}
],
}
}
mock_discover_sourcemap.return_value = BASE64_SOURCEMAP
mock_fetch_file.return_value.url = 'http://example.com/test.min.js'
mock_fetch_file.return_value.body = '\n'.join('<generated source>')
mock_fetch_file.return_value.encoding = None
resp = self._postWithHeader(data)
assert resp.status_code, 200
mock_fetch_file.assert_called_once_with(
'http://example.com/test.min.js',
project=self.project,
release=None,
dist=None,
allow_scraping=True,
)
event = Event.objects.get()
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert not frame.pre_context
assert frame.context_line == 'console.log("hello, World!")'
assert not frame.post_context
assert frame.data['sourcemap'] == 'http://example.com/test.min.js'
@responses.activate
def test_error_message_translations(self):
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Message': {
'message': u'ReferenceError: Impossible de d\xe9finir une propri\xe9t\xe9 \xab foo \xbb : objet non extensible'
},
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'value': u'P\u0159\xedli\u0161 mnoho soubor\u016f'
},
{
'type': 'Error',
'value': u'foo: wyst\u0105pi\u0142 nieoczekiwany b\u0142\u0105d podczas pr\xf3by uzyskania informacji o metadanych'
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
message = event.interfaces['sentry.interfaces.Message']
assert message.message == 'ReferenceError: Cannot define property \'foo\': object is not extensible'
exception = event.interfaces['sentry.interfaces.Exception']
assert exception.values[0].value == 'Too many files'
assert exception.values[1].value == 'foo: an unexpected failure occurred while trying to obtain metadata information'
@responses.activate
def test_sourcemap_source_expansion(self):
responses.add(
responses.GET,
'http://example.com/file.min.js',
body=load_fixture('file.min.js'),
content_type='application/javascript; charset=utf-8'
)
responses.add(
responses.GET,
'http://example.com/file1.js',
body=load_fixture('file1.js'),
content_type='application/javascript; charset=utf-8'
)
responses.add(
responses.GET,
'http://example.com/file2.js',
body=load_fixture('file2.js'),
content_type='application/javascript; charset=utf-8'
)
responses.add(
responses.GET,
'http://example.com/file.sourcemap.js',
body=load_fixture('file.sourcemap.js'),
content_type='application/javascript; charset=utf-8'
)
responses.add(responses.GET, 'http://example.com/index.html', body='Not Found', status=404)
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/file.min.js',
'filename': 'file.min.js',
'lineno': 1,
'colno': 39,
},
# NOTE: Intentionally source is not retrieved from this HTML file
{
'function': 'function: "HTMLDocument.<anonymous>"',
'abs_path': "http//example.com/index.html",
'filename': 'index.html',
'lineno': 283,
'colno': 17,
'in_app': False,
}
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
assert event.data['errors'] == [
{
'type': 'js_no_source',
'url': 'http//example.com/index.html'
}
]
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.pre_context == [
'function add(a, b) {',
'\t"use strict";',
]
expected = u'\treturn a + b; // fôo'
assert frame.context_line == expected
assert frame.post_context == ['}', '']
raw_frame_list = exception.values[0].raw_stacktrace.frames
raw_frame = raw_frame_list[0]
assert not raw_frame.pre_context
assert raw_frame.context_line == 'function add(a,b){"use strict";return a+b}function multiply(a,b){"use strict";return a*b}function divide(a,b){"use strict";try{return multip {snip}'
assert raw_frame.post_context == ['//@ sourceMappingURL=file.sourcemap.js']
assert raw_frame.lineno == 1
# Since we couldn't expand source for the 2nd frame, both
# its raw and original form should be identical
assert raw_frame_list[1] == frame_list[1]
@responses.activate
def test_sourcemap_embedded_source_expansion(self):
responses.add(
responses.GET,
'http://example.com/embedded.js',
body=load_fixture('embedded.js'),
content_type='application/javascript; charset=utf-8'
)
responses.add(
responses.GET,
'http://example.com/embedded.js.map',
body=load_fixture('embedded.js.map'),
content_type='application/json; charset=utf-8'
)
responses.add(responses.GET, 'http://example.com/index.html', body='Not Found', status=404)
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/embedded.js',
'filename': 'file.min.js',
'lineno': 1,
'colno': 39,
},
# NOTE: Intentionally source is not retrieved from this HTML file
{
'function': 'function: "HTMLDocument.<anonymous>"',
'abs_path': "http//example.com/index.html",
'filename': 'index.html',
'lineno': 283,
'colno': 17,
'in_app': False,
}
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
assert event.data['errors'] == [
{
'type': 'js_no_source',
'url': 'http//example.com/index.html'
}
]
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.pre_context == [
'function add(a, b) {',
'\t"use strict";',
]
expected = u'\treturn a + b; // fôo'
assert frame.context_line == expected
assert frame.post_context == ['}', '']
@responses.activate
def test_sourcemap_nofiles_source_expansion(self):
project = self.project
release = Release.objects.create(
organization_id=project.organization_id,
version='abc',
)
release.add_project(project)
f_minified = File.objects.create(
name='nofiles.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f_minified.putfile(open(get_fixture_path('nofiles.js'), 'rb'))
ReleaseFile.objects.create(
name='~/{}'.format(f_minified.name),
release=release,
organization_id=project.organization_id,
file=f_minified,
)
f_sourcemap = File.objects.create(
name='nofiles.js.map',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f_sourcemap.putfile(open(get_fixture_path('nofiles.js.map'), 'rb'))
ReleaseFile.objects.create(
name='app:///{}'.format(f_sourcemap.name),
release=release,
organization_id=project.organization_id,
file=f_sourcemap,
)
data = {
'message': 'hello',
'platform': 'javascript',
'release': 'abc',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'app:///nofiles.js',
'lineno': 1,
'colno': 39,
}
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
assert not event.data['errors']
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
assert len(frame_list) == 1
frame = frame_list[0]
assert frame.pre_context == [
'function multiply(a, b) {',
'\t"use strict";',
]
assert frame.context_line == u'\treturn a * b;'
assert frame.post_context == [
'}',
'function divide(a, b) {',
'\t"use strict";',
'\ttry {',
'\t\treturn multiply(add(a, b), a, b) / c;'
]
@responses.activate
def test_indexed_sourcemap_source_expansion(self):
responses.add(
responses.GET,
'http://example.com/indexed.min.js',
body=load_fixture('indexed.min.js'),
content_type='application/javascript; charset=utf-8'
)
responses.add(
responses.GET,
'http://example.com/file1.js',
body=load_fixture('file1.js'),
content_type='application/javascript; charset=utf-8'
)
responses.add(
responses.GET,
'http://example.com/file2.js',
body=load_fixture('file2.js'),
content_type='application/javascript; charset=utf-8'
)
responses.add(
responses.GET,
'http://example.com/indexed.sourcemap.js',
body=load_fixture('indexed.sourcemap.js'),
content_type='application/json; charset=utf-8'
)
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/indexed.min.js',
'filename': 'indexed.min.js',
'lineno': 1,
'colno': 39,
},
{
'abs_path': 'http://example.com/indexed.min.js',
'filename': 'indexed.min.js',
'lineno': 2,
'colno': 44,
},
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
assert not event.data['errors']
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.pre_context == [
'function add(a, b) {',
'\t"use strict";',
]
expected = u'\treturn a + b; // fôo'
assert frame.context_line == expected
assert frame.post_context == ['}', '']
raw_frame_list = exception.values[0].raw_stacktrace.frames
raw_frame = raw_frame_list[0]
assert not raw_frame.pre_context
assert raw_frame.context_line == 'function add(a,b){"use strict";return a+b}'
assert raw_frame.post_context == [
'function multiply(a,b){"use strict";return a*b}function divide(a,b){"use strict";try{return multiply(add(a,b),a,b)/c}catch(e){Raven.captureE {snip}',
'//# sourceMappingURL=indexed.sourcemap.js', ''
]
assert raw_frame.lineno == 1
frame = frame_list[1]
assert frame.pre_context == [
'function multiply(a, b) {',
'\t"use strict";',
]
assert frame.context_line == '\treturn a * b;'
assert frame.post_context == [
'}',
'function divide(a, b) {',
'\t"use strict";',
'\ttry {',
'\t\treturn multiply(add(a, b), a, b) / c;',
]
raw_frame = raw_frame_list[1]
assert raw_frame.pre_context == ['function add(a,b){"use strict";return a+b}']
assert raw_frame.context_line == 'function multiply(a,b){"use strict";return a*b}function divide(a,b){"use strict";try{return multiply(add(a,b),a,b)/c}catch(e){Raven.captureE {snip}'
assert raw_frame.post_context == ['//# sourceMappingURL=indexed.sourcemap.js', '']
assert raw_frame.lineno == 2
@responses.activate
def test_expansion_via_release_artifacts(self):
project = self.project
release = Release.objects.create(
organization_id=project.organization_id,
version='abc',
)
release.add_project(project)
# file.min.js
# ------------
f_minified = File.objects.create(
name='file.min.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f_minified.putfile(open(get_fixture_path('file.min.js'), 'rb'))
# Intentionally omit hostname - use alternate artifact path lookup instead
# /file1.js vs http://example.com/file1.js
ReleaseFile.objects.create(
name='~/{}?foo=bar'.format(f_minified.name),
release=release,
organization_id=project.organization_id,
file=f_minified,
)
# file1.js
# ---------
f1 = File.objects.create(
name='file1.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f1.putfile(open(get_fixture_path('file1.js'), 'rb'))
ReleaseFile.objects.create(
name='http://example.com/{}'.format(f1.name),
release=release,
organization_id=project.organization_id,
file=f1,
)
# file2.js
# ----------
f2 = File.objects.create(
name='file2.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f2.putfile(open(get_fixture_path('file2.js'), 'rb'))
ReleaseFile.objects.create(
name='http://example.com/{}'.format(f2.name),
release=release,
organization_id=project.organization_id,
file=f2,
)
# To verify that the full url has priority over the relative url,
# we will also add a second ReleaseFile alias for file2.js (f3) w/o
# hostname that points to an empty file. If the processor chooses
# this empty file over the correct file2.js, it will not locate
# context for the 2nd frame.
f2_empty = File.objects.create(
name='empty.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f2_empty.putfile(open(get_fixture_path('empty.js'), 'rb'))
ReleaseFile.objects.create(
name='~/{}'.format(f2.name), # intentionally using f2.name ("file2.js")
release=release,
organization_id=project.organization_id,
file=f2_empty,
)
# sourcemap
# ----------
f_sourcemap = File.objects.create(
name='file.sourcemap.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f_sourcemap.putfile(open(get_fixture_path('file.sourcemap.js'), 'rb'))
ReleaseFile.objects.create(
name='http://example.com/{}'.format(f_sourcemap.name),
release=release,
organization_id=project.organization_id,
file=f_sourcemap,
)
data = {
'message': 'hello',
'platform': 'javascript',
'release': 'abc',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/file.min.js?foo=bar',
'filename': 'file.min.js',
'lineno': 1,
'colno': 39,
}, {
'abs_path': 'http://example.com/file.min.js?foo=bar',
'filename': 'file.min.js',
'lineno': 1,
'colno': 79,
}
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
assert not event.data['errors']
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.pre_context == [
'function add(a, b) {',
'\t"use strict";',
]
assert frame.context_line == u'\treturn a + b; // fôo'
assert frame.post_context == ['}', '']
frame = frame_list[1]
assert frame.pre_context == [
'function multiply(a, b) {',
'\t"use strict";',
]
assert frame.context_line == '\treturn a * b;'
assert frame.post_context == [
'}', 'function divide(a, b) {', '\t"use strict";', u'\ttry {',
'\t\treturn multiply(add(a, b), a, b) / c;'
]
@responses.activate
def test_expansion_via_distribution_release_artifacts(self):
project = self.project
release = Release.objects.create(
organization_id=project.organization_id,
version='abc',
)
release.add_project(project)
dist = release.add_dist('foo')
# file.min.js
# ------------
f_minified = File.objects.create(
name='file.min.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f_minified.putfile(open(get_fixture_path('file.min.js'), 'rb'))
# Intentionally omit hostname - use alternate artifact path lookup instead
# /file1.js vs http://example.com/file1.js
ReleaseFile.objects.create(
name='~/{}?foo=bar'.format(f_minified.name),
release=release,
dist=dist,
organization_id=project.organization_id,
file=f_minified,
)
# file1.js
# ---------
f1 = File.objects.create(
name='file1.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f1.putfile(open(get_fixture_path('file1.js'), 'rb'))
ReleaseFile.objects.create(
name='http://example.com/{}'.format(f1.name),
release=release,
dist=dist,
organization_id=project.organization_id,
file=f1,
)
# file2.js
# ----------
f2 = File.objects.create(
name='file2.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f2.putfile(open(get_fixture_path('file2.js'), 'rb'))
ReleaseFile.objects.create(
name='http://example.com/{}'.format(f2.name),
release=release,
dist=dist,
organization_id=project.organization_id,
file=f2,
)
# To verify that the full url has priority over the relative url,
# we will also add a second ReleaseFile alias for file2.js (f3) w/o
# hostname that points to an empty file. If the processor chooses
# this empty file over the correct file2.js, it will not locate
# context for the 2nd frame.
f2_empty = File.objects.create(
name='empty.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f2_empty.putfile(open(get_fixture_path('empty.js'), 'rb'))
ReleaseFile.objects.create(
name='~/{}'.format(f2.name), # intentionally using f2.name ("file2.js")
release=release,
dist=dist,
organization_id=project.organization_id,
file=f2_empty,
)
# sourcemap
# ----------
f_sourcemap = File.objects.create(
name='file.sourcemap.js',
type='release.file',
headers={'Content-Type': 'application/json'},
)
f_sourcemap.putfile(open(get_fixture_path('file.sourcemap.js'), 'rb'))
ReleaseFile.objects.create(
name='http://example.com/{}'.format(f_sourcemap.name),
release=release,
dist=dist,
organization_id=project.organization_id,
file=f_sourcemap,
)
data = {
'message': 'hello',
'platform': 'javascript',
'release': 'abc',
'dist': 'foo',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/file.min.js?foo=bar',
'filename': 'file.min.js',
'lineno': 1,
'colno': 39,
}, {
'abs_path': 'http://example.com/file.min.js?foo=bar',
'filename': 'file.min.js',
'lineno': 1,
'colno': 79,
}
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
assert not event.data['errors']
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
assert frame.pre_context == [
'function add(a, b) {',
'\t"use strict";',
]
assert frame.context_line == u'\treturn a + b; // fôo'
assert frame.post_context == ['}', '']
frame = frame_list[1]
assert frame.pre_context == [
'function multiply(a, b) {',
'\t"use strict";',
]
assert frame.context_line == '\treturn a * b;'
assert frame.post_context == [
'}', 'function divide(a, b) {', '\t"use strict";', u'\ttry {',
'\t\treturn multiply(add(a, b), a, b) / c;'
]
@responses.activate
def test_sourcemap_expansion_with_missing_source(self):
"""
Tests a successful sourcemap expansion that points to source files
that are not found.
"""
responses.add(
responses.GET,
'http://example.com/file.min.js',
body=load_fixture('file.min.js'),
content_type='application/javascript; charset=utf-8'
)
responses.add(
responses.GET,
'http://example.com/file.sourcemap.js',
body=load_fixture('file.sourcemap.js'),
content_type='application/json; charset=utf-8'
)
responses.add(responses.GET, 'http://example.com/file1.js', body='Not Found', status=404)
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
# Add two frames. We only want to see the
# error once though.
'frames': [
{
'abs_path': 'http://example.com/file.min.js',
'filename': 'file.min.js',
'lineno': 1,
'colno': 39,
},
{
'abs_path': 'http://example.com/file.min.js',
'filename': 'file.min.js',
'lineno': 1,
'colno': 39,
},
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
assert event.data['errors'] == [
{
'url': u'http://example.com/file1.js',
'type': 'fetch_invalid_http_code',
'value': 404
}
]
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
frame = frame_list[0]
# no context information ...
assert not frame.pre_context
assert not frame.context_line
assert not frame.post_context
# ... but line, column numbers are still correctly mapped
assert frame.lineno == 3
assert frame.colno == 9
@responses.activate
def test_failed_sourcemap_expansion(self):
"""
Tests attempting to parse an indexed source map where each section has a "url"
property - this is unsupported and should fail.
"""
responses.add(
responses.GET,
'http://example.com/unsupported.min.js',
body=load_fixture('unsupported.min.js'),
content_type='application/javascript; charset=utf-8'
)
responses.add(
responses.GET,
'http://example.com/unsupported.sourcemap.js',
body=load_fixture('unsupported.sourcemap.js'),
content_type='application/json; charset=utf-8'
)
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/unsupported.min.js',
'filename': 'indexed.min.js',
'lineno': 1,
'colno': 39,
},
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
assert event.data['errors'] == [
{
'url': u'http://example.com/unsupported.sourcemap.js',
'type': 'js_invalid_source'
}
]
def test_failed_sourcemap_expansion_data_url(self):
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'data:application/javascript,base46,asfasf',
'filename': 'indexed.min.js',
'lineno': 1,
'colno': 39,
},
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
assert event.data['errors'] == [{'url': u'<data url>', 'type': 'js_no_source'}]
@responses.activate
def test_failed_sourcemap_expansion_missing_location_entirely(self):
responses.add(
responses.GET,
'http://example.com/indexed.min.js',
body='//# sourceMappingURL=indexed.sourcemap.js',
)
responses.add(
responses.GET,
'http://example.com/indexed.sourcemap.js',
body='{}'
)
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/indexed.min.js',
'filename': 'indexed.min.js',
'lineno': 1,
'colno': 1,
},
{
'abs_path': 'http://example.com/indexed.min.js',
'filename': 'indexed.min.js',
},
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code == 200
event = Event.objects.get()
assert event.data['errors'] == []
@responses.activate
def test_html_response_for_js(self):
responses.add(
responses.GET,
'http://example.com/file1.js',
body=' <!DOCTYPE html><html><head></head><body></body></html>'
)
responses.add(
responses.GET,
'http://example.com/file2.js',
body='<!doctype html><html><head></head><body></body></html>'
)
responses.add(
responses.GET,
'http://example.com/file.html',
body=(
'<!doctype html><html><head></head><body><script>/*legit case*/</script></body></html>'
)
)
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/file1.js',
'filename': 'file.min.js',
'lineno': 1,
'colno': 39,
},
{
'abs_path': 'http://example.com/file2.js',
'filename': 'file.min.js',
'lineno': 1,
'colno': 39,
},
{
'abs_path': 'http://example.com/file.html',
'filename': 'file.html',
'lineno': 1,
'colno': 1,
},
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
assert event.data['errors'] == [
{
'url': u'http://example.com/file1.js',
'type': 'js_invalid_content'
}, {
'url': u'http://example.com/file2.js',
'type': 'js_invalid_content'
}
]
def test_node_processing(self):
project = self.project
release = Release.objects.create(
organization_id=project.organization_id,
version='nodeabc123',
)
release.add_project(project)
f_minified = File.objects.create(
name='dist.bundle.js',
type='release.file',
headers={'Content-Type': 'application/javascript'},
)
f_minified.putfile(open(get_fixture_path('dist.bundle.js'), 'rb'))
ReleaseFile.objects.create(
name='~/{}'.format(f_minified.name),
release=release,
organization_id=project.organization_id,
file=f_minified,
)
f_sourcemap = File.objects.create(
name='dist.bundle.js.map',
type='release.file',
headers={'Content-Type': 'application/javascript'},
)
f_sourcemap.putfile(open(get_fixture_path('dist.bundle.js.map'), 'rb'))
ReleaseFile.objects.create(
name='~/{}'.format(f_sourcemap.name),
release=release,
organization_id=project.organization_id,
file=f_sourcemap,
)
data = {
'message': 'hello',
'platform': 'node',
'release': 'nodeabc123',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'filename': 'app:///dist.bundle.js',
'function': 'bar',
'lineno': 9,
'colno': 2321,
},
{
'filename': 'app:///dist.bundle.js',
'function': 'foo',
'lineno': 3,
'colno': 2308,
},
{
'filename': 'app:///dist.bundle.js',
'function': 'App',
'lineno': 3,
'colno': 1011,
},
{
'filename': 'app:///dist.bundle.js',
'function': 'Object.<anonymous>',
'lineno': 1,
'colno': 1014,
},
{
'filename': 'app:///dist.bundle.js',
'function': '__webpack_require__',
'lineno': 20,
'colno': 30,
},
{
'filename': 'app:///dist.bundle.js',
'function': '<unknown>',
'lineno': 18,
'colno': 63,
}
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
assert len(frame_list) == 6
import pprint
pprint.pprint(frame_list[0].__dict__)
pprint.pprint(frame_list[1].__dict__)
pprint.pprint(frame_list[2].__dict__)
pprint.pprint(frame_list[3].__dict__)
pprint.pprint(frame_list[4].__dict__)
pprint.pprint(frame_list[5].__dict__)
assert frame_list[0].abs_path == 'webpack:///webpack/bootstrap d9a5a31d9276b73873d3'
assert frame_list[0].function == 'bar'
assert frame_list[0].lineno == 8
assert frame_list[1].abs_path == 'webpack:///webpack/bootstrap d9a5a31d9276b73873d3'
assert frame_list[1].function == 'foo'
assert frame_list[1].lineno == 2
assert frame_list[2].abs_path == 'webpack:///webpack/bootstrap d9a5a31d9276b73873d3'
assert frame_list[2].function == 'App'
assert frame_list[2].lineno == 2
assert frame_list[3].abs_path == 'app:///dist.bundle.js'
assert frame_list[3].function == 'Object.<anonymous>'
assert frame_list[3].lineno == 1
assert frame_list[4].abs_path == 'webpack:///webpack/bootstrap d9a5a31d9276b73873d3'
assert frame_list[4].function == '__webpack_require__'
assert frame_list[4].lineno == 19
assert frame_list[5].abs_path == 'webpack:///webpack/bootstrap d9a5a31d9276b73873d3'
assert frame_list[5].function == '<unknown>'
assert frame_list[5].lineno == 16
@responses.activate
def test_no_fetch_from_http(self):
responses.add(
responses.GET,
'http://example.com/node_app.min.js',
body=load_fixture('node_app.min.js'),
content_type='application/javascript; charset=utf-8'
)
responses.add(
responses.GET,
'http://example.com/node_app.min.js.map',
body=load_fixture('node_app.min.js.map'),
content_type='application/javascript; charset=utf-8'
)
data = {
'message': 'hello',
'platform': 'node',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'node_bootstrap.js',
'filename': 'node_bootstrap.js',
'lineno': 1,
'colno': 38,
},
{
'abs_path': 'timers.js',
'filename': 'timers.js',
'lineno': 1,
'colno': 39,
},
{
'abs_path': 'webpack:///internal',
'filename': 'internal',
'lineno': 1,
'colno': 43,
},
{
'abs_path': 'webpack:///~/some_dep/file.js',
'filename': 'file.js',
'lineno': 1,
'colno': 41,
},
{
'abs_path': 'webpack:///./node_modules/file.js',
'filename': 'file.js',
'lineno': 1,
'colno': 42,
},
{
'abs_path': 'http://example.com/node_app.min.js',
'filename': 'node_app.min.js',
'lineno': 1,
'colno': 40,
},
],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code, 200
event = Event.objects.get()
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
# This one should not process, so this one should be none.
assert exception.values[0].raw_stacktrace is None
# None of the in app should update
for x in range(6):
assert not frame_list[x].in_app
|
{
"content_hash": "5617e9459689332882ace56c6bf1b441",
"timestamp": "",
"source": "github",
"line_count": 1434,
"max_line_length": 190,
"avg_line_length": 35.95536959553696,
"alnum_prop": 0.4299069045771916,
"repo_name": "looker/sentry",
"id": "3adad1e3de5c54d81778dcedc137bbfbb79e727e",
"size": "51565",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/sentry/lang/javascript/test_plugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289931"
},
{
"name": "HTML",
"bytes": "241322"
},
{
"name": "JavaScript",
"bytes": "3112298"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "36341504"
},
{
"name": "Ruby",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import copy
import six
from keystone.common import kvs
from keystone import config
from keystone import exception
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import log
from keystone.openstack.common import timeutils
from keystone import token
CONF = config.CONF
LOG = log.getLogger(__name__)
class Token(token.Driver):
"""KeyValueStore backend for tokens.
This is the base implementation for any/all key-value-stores (e.g.
memcached) for the Token backend. It is recommended to only use the base
in-memory implementation for testing purposes.
"""
revocation_key = 'revocation-list'
kvs_backend = 'openstack.kvs.Memory'
def __init__(self, backing_store=None, **kwargs):
super(Token, self).__init__()
self._store = kvs.get_key_value_store('token-driver')
if backing_store is not None:
self.kvs_backend = backing_store
self._store.configure(backing_store=self.kvs_backend, **kwargs)
if self.__class__ == Token:
# NOTE(morganfainberg): Only warn if the base KVS implementation
# is instantiated.
LOG.warn(_('It is recommended to only use the base '
'key-value-store implementation for the token driver '
'for testing purposes. '
'Please use keystone.token.backends.memcache.Token '
'or keystone.token.backends.sql.Token instead.'))
def _prefix_token_id(self, token_id):
return 'token-%s' % token_id.encode('utf-8')
def _prefix_user_id(self, user_id):
return 'usertokens-%s' % user_id.encode('utf-8')
def _get_key_or_default(self, key, default=None):
try:
return self._store.get(key)
except exception.NotFound:
return default
def _get_key(self, key):
return self._store.get(key)
def _set_key(self, key, value, lock=None):
self._store.set(key, value, lock)
def _delete_key(self, key):
return self._store.delete(key)
def get_token(self, token_id):
ptk = self._prefix_token_id(token_id)
try:
token_ref = self._get_key(ptk)
except exception.NotFound:
raise exception.TokenNotFound(token_id=token_id)
return token_ref
def create_token(self, token_id, data):
"""Create a token by id and data.
It is assumed the caller has performed data validation on the "data"
parameter.
"""
data_copy = copy.deepcopy(data)
ptk = self._prefix_token_id(token_id)
if not data_copy.get('expires'):
data_copy['expires'] = token.default_expire_time()
if not data_copy.get('user_id'):
data_copy['user_id'] = data_copy['user']['id']
# NOTE(morganfainberg): for ease of manipulating the data without
# concern about the backend, always store the value(s) in the
# index as the isotime (string) version so this is where the string is
# built.
expires_str = timeutils.isotime(data_copy['expires'], subsecond=True)
self._set_key(ptk, data_copy)
user_id = data['user']['id']
user_key = self._prefix_user_id(user_id)
self._update_user_token_list(user_key, token_id, expires_str)
if CONF.trust.enabled and data.get('trust_id'):
# NOTE(morganfainberg): If trusts are enabled and this is a trust
# scoped token, we add the token to the trustee list as well. This
# allows password changes of the trustee to also expire the token.
# There is no harm in placing the token in multiple lists, as
# _list_tokens is smart enough to handle almost any case of
# valid/invalid/expired for a given token.
token_data = data_copy['token_data']
if data_copy['token_version'] == token.provider.V2:
trustee_user_id = token_data['access']['trust'][
'trustee_user_id']
elif data_copy['token_version'] == token.provider.V3:
trustee_user_id = token_data['OS-TRUST:trust'][
'trustee_user_id']
else:
raise token.provider.UnsupportedTokenVersionException(
_('Unknown token version %s') %
data_copy.get('token_version'))
trustee_key = self._prefix_user_id(trustee_user_id)
self._update_user_token_list(trustee_key, token_id, expires_str)
return data_copy
def _get_user_token_list_with_expiry(self, user_key):
"""Return a list of tuples in the format (token_id, token_expiry) for
the user_key.
"""
return self._get_key_or_default(user_key, default=[])
def _get_user_token_list(self, user_key):
"""Return a list of token_ids for the user_key."""
token_list = self._get_user_token_list_with_expiry(user_key)
# Each element is a tuple of (token_id, token_expiry). Most code does
# not care about the expiry, it is stripped out and only a
# list of token_ids are returned.
return [t[0] for t in token_list]
def _update_user_token_list(self, user_key, token_id, expires_isotime_str):
current_time = self._get_current_time()
revoked_token_list = set([t['id'] for t in
self.list_revoked_tokens()])
with self._store.get_lock(user_key) as lock:
filtered_list = []
token_list = self._get_user_token_list_with_expiry(user_key)
for item in token_list:
try:
item_id, expires = self._format_token_index_item(item)
except (ValueError, TypeError):
# NOTE(morganfainberg): Skip on expected errors
# possibilities from the `_format_token_index_item` method.
continue
if expires < current_time:
LOG.debug(_('Token `%(token_id)s` is expired, removing '
'from `%(user_key)s`.'),
{'token_id': item_id, 'user_key': user_key})
continue
if item_id in revoked_token_list:
# NOTE(morganfainberg): If the token has been revoked, it
# can safely be removed from this list. This helps to keep
# the user_token_list as reasonably small as possible.
LOG.debug(_('Token `%(token_id)s` is revoked, removing '
'from `%(user_key)s`.'),
{'token_id': item_id, 'user_key': user_key})
continue
filtered_list.append(item)
filtered_list.append((token_id, expires_isotime_str))
self._set_key(user_key, filtered_list, lock)
return filtered_list
def _get_current_time(self):
return timeutils.normalize_time(timeutils.utcnow())
def _add_to_revocation_list(self, data, lock):
filtered_list = []
revoked_token_data = {}
current_time = self._get_current_time()
expires = data['expires']
if isinstance(expires, six.string_types):
expires = timeutils.parse_isotime(expires)
expires = timeutils.normalize_time(expires)
if expires < current_time:
LOG.warning(_('Token `%s` is expired, not adding to the '
'revocation list.'), data['id'])
return
revoked_token_data['expires'] = timeutils.isotime(expires,
subsecond=True)
revoked_token_data['id'] = data['id']
token_list = self._get_key_or_default(self.revocation_key, default=[])
if not isinstance(token_list, list):
# NOTE(morganfainberg): In the case that the revocation list is not
# in a format we understand, reinitialize it. This is an attempt to
# not allow the revocation list to be completely broken if
# somehow the key is changed outside of keystone (e.g. memcache
# that is shared by multiple applications). Logging occurs at error
# level so that the cloud administrators have some awareness that
# the revocation_list needed to be cleared out. In all, this should
# be recoverable. Keystone cannot control external applications
# from changing a key in some backends, however, it is possible to
# gracefully handle and notify of this event.
LOG.error(_('Reinitializing revocation list due to error '
'in loading revocation list from backend. '
'Expected `list` type got `%(type)s`. Old '
'revocation list data: %(list)r'),
{'type': type(token_list), 'list': token_list})
token_list = []
# NOTE(morganfainberg): on revocation, cleanup the expired entries, try
# to keep the list of tokens revoked at the minimum.
for token_data in token_list:
try:
expires_at = timeutils.normalize_time(
timeutils.parse_isotime(token_data['expires']))
except ValueError:
LOG.warning(_('Removing `%s` from revocation list due to '
'invalid expires data in revocation list.'),
token_data.get('id', 'INVALID_TOKEN_DATA'))
continue
if expires_at > current_time:
filtered_list.append(token_data)
filtered_list.append(revoked_token_data)
self._set_key(self.revocation_key, filtered_list, lock)
def delete_token(self, token_id):
# Test for existence
with self._store.get_lock(self.revocation_key) as lock:
data = self.get_token(token_id)
ptk = self._prefix_token_id(token_id)
result = self._delete_key(ptk)
self._add_to_revocation_list(data, lock)
return result
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
return super(Token, self).delete_tokens(
user_id=user_id,
tenant_id=tenant_id,
trust_id=trust_id,
consumer_id=consumer_id,
)
def _format_token_index_item(self, item):
try:
token_id, expires = item
except (TypeError, ValueError):
LOG.debug(_('Invalid token entry expected tuple of '
'`(<token_id>, <expires>)` got: `%(item)r`'),
dict(item=item))
raise
try:
expires = timeutils.normalize_time(
timeutils.parse_isotime(expires))
except ValueError:
LOG.debug(_('Invalid expires time on token `%(token_id)s`:'
' %(expires)r'),
dict(token_id=token_id, expires=expires))
raise
return token_id, expires
def _token_match_tenant(self, token_ref, tenant_id):
if token_ref.get('tenant'):
return token_ref['tenant'].get('id') == tenant_id
return False
def _token_match_trust(self, token_ref, trust_id):
if not token_ref.get('trust_id'):
return False
return token_ref['trust_id'] == trust_id
def _token_match_consumer(self, token_ref, consumer_id):
try:
oauth = token_ref['token_data']['token']['OS-OAUTH1']
return oauth.get('consumer_id') == consumer_id
except KeyError:
return False
def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
# This function is used to generate the list of tokens that should be
# revoked when revoking by token identifiers. This approach will be
# deprecated soon, probably in the Juno release. Setting revoke_by_id
# to False indicates that this kind of recording should not be
# performed. In order to test the revocation events, tokens shouldn't
# be deleted from the backends. This check ensures that tokens are
# still recorded.
if not CONF.token.revoke_by_id:
return []
tokens = []
user_key = self._prefix_user_id(user_id)
token_list = self._get_user_token_list_with_expiry(user_key)
current_time = self._get_current_time()
for item in token_list:
try:
token_id, expires = self._format_token_index_item(item)
except (TypeError, ValueError):
# NOTE(morganfainberg): Skip on expected error possibilities
# from the `_format_token_index_item` method.
continue
if expires < current_time:
continue
try:
token_ref = self.get_token(token_id)
except exception.TokenNotFound:
# NOTE(morganfainberg): Token doesn't exist, skip it.
continue
if token_ref:
if tenant_id is not None:
if not self._token_match_tenant(token_ref, tenant_id):
continue
if trust_id is not None:
if not self._token_match_trust(token_ref, trust_id):
continue
if consumer_id is not None:
if not self._token_match_consumer(token_ref, consumer_id):
continue
tokens.append(token_id)
return tokens
def list_revoked_tokens(self):
revoked_token_list = self._get_key_or_default(self.revocation_key,
default=[])
if isinstance(revoked_token_list, list):
return revoked_token_list
return []
def flush_expired_tokens(self):
"""Archive or delete tokens that have expired."""
raise exception.NotImplemented()
|
{
"content_hash": "5fa7ab956841969c7845d6cae939fd59",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 79,
"avg_line_length": 42.11538461538461,
"alnum_prop": 0.5681067790656832,
"repo_name": "sanket4373/keystone",
"id": "395f5ab0e6472d4b99e7aa7d936d4111ffdb91c5",
"size": "14879",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "keystone/token/backends/kvs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3009738"
},
{
"name": "Shell",
"bytes": "4619"
}
],
"symlink_target": ""
}
|
import os, sys, getopt, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.storagelevel import StorageLevel
from pyspark.accumulators import Accumulator, AccumulatorParam
from pyspark.broadcast import Broadcast
from pyspark.serializers import MarshalSerializer, PickleSerializer
import warnings
import ast
import traceback
import warnings
# for back compatibility
from pyspark.sql import SQLContext, HiveContext, Row
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PyZeppelinContext(dict):
def __init__(self, zc):
self.z = zc
self._displayhook = lambda *args: None
def show(self, obj):
from pyspark.sql import DataFrame
if isinstance(obj, DataFrame):
print(gateway.jvm.org.apache.zeppelin.spark.ZeppelinContext.showDF(self.z, obj._jdf))
else:
print(str(obj))
# By implementing special methods it makes operating on it more Pythonic
def __setitem__(self, key, item):
self.z.put(key, item)
def __getitem__(self, key):
return self.z.get(key)
def __delitem__(self, key):
self.z.remove(key)
def __contains__(self, item):
return self.z.containsKey(item)
def add(self, key, value):
self.__setitem__(key, value)
def put(self, key, value):
self.__setitem__(key, value)
def get(self, key):
return self.__getitem__(key)
def getInterpreterContext(self):
return self.z.getInterpreterContext()
def input(self, name, defaultValue=""):
return self.z.input(name, defaultValue)
def select(self, name, options, defaultValue=""):
# auto_convert to ArrayList doesn't match the method signature on JVM side
tuples = list(map(lambda items: self.__tupleToScalaTuple2(items), options))
iterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(tuples)
return self.z.select(name, defaultValue, iterables)
def checkbox(self, name, options, defaultChecked=None):
if defaultChecked is None:
defaultChecked = []
optionTuples = list(map(lambda items: self.__tupleToScalaTuple2(items), options))
optionIterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(optionTuples)
defaultCheckedIterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(defaultChecked)
checkedItems = gateway.jvm.scala.collection.JavaConversions.seqAsJavaList(self.z.checkbox(name, defaultCheckedIterables, optionIterables))
result = []
for checkedItem in checkedItems:
result.append(checkedItem)
return result;
def registerHook(self, event, cmd, replName=None):
if replName is None:
self.z.registerHook(event, cmd)
else:
self.z.registerHook(event, cmd, replName)
def unregisterHook(self, event, replName=None):
if replName is None:
self.z.unregisterHook(event)
else:
self.z.unregisterHook(event, replName)
def getHook(self, event, replName=None):
if replName is None:
return self.z.getHook(event)
return self.z.getHook(event, replName)
def _setup_matplotlib(self):
# If we don't have matplotlib installed don't bother continuing
try:
import matplotlib
except ImportError:
return
# Make sure custom backends are available in the PYTHONPATH
rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd())
mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python')
if mpl_path not in sys.path:
sys.path.append(mpl_path)
# Finally check if backend exists, and if so configure as appropriate
try:
matplotlib.use('module://backend_zinline')
import backend_zinline
# Everything looks good so make config assuming that we are using
# an inline backend
self._displayhook = backend_zinline.displayhook
self.configure_mpl(width=600, height=400, dpi=72, fontsize=10,
interactive=True, format='png', context=self.z)
except ImportError:
# Fall back to Agg if no custom backend installed
matplotlib.use('Agg')
warnings.warn("Unable to load inline matplotlib backend, "
"falling back to Agg")
def configure_mpl(self, **kwargs):
import mpl_config
mpl_config.configure(**kwargs)
def __tupleToScalaTuple2(self, tuple):
if (len(tuple) == 2):
return gateway.jvm.scala.Tuple2(tuple[0], tuple[1])
else:
raise IndexError("options must be a list of tuple of 2")
class SparkVersion(object):
SPARK_1_4_0 = 10400
SPARK_1_3_0 = 10300
SPARK_2_0_0 = 20000
def __init__(self, versionNumber):
self.version = versionNumber
def isAutoConvertEnabled(self):
return self.version >= self.SPARK_1_4_0
def isImportAllPackageUnderSparkSql(self):
return self.version >= self.SPARK_1_3_0
def isSpark2(self):
return self.version >= self.SPARK_2_0_0
class PySparkCompletion:
def __init__(self, interpreterObject):
self.interpreterObject = interpreterObject
def getGlobalCompletion(self):
objectDefList = []
try:
for completionItem in list(globals().keys()):
objectDefList.append(completionItem)
except:
return None
else:
return objectDefList
def getMethodCompletion(self, text_value):
execResult = locals()
if text_value == None:
return None
completion_target = text_value
try:
if len(completion_target) <= 0:
return None
if text_value[-1] == ".":
completion_target = text_value[:-1]
exec("{} = dir({})".format("objectDefList", completion_target), globals(), execResult)
except:
return None
else:
return list(execResult['objectDefList'])
def getCompletion(self, text_value):
completionList = set()
globalCompletionList = self.getGlobalCompletion()
if globalCompletionList != None:
for completionItem in list(globalCompletionList):
completionList.add(completionItem)
if text_value != None:
objectCompletionList = self.getMethodCompletion(text_value)
if objectCompletionList != None:
for completionItem in list(objectCompletionList):
completionList.add(completionItem)
if len(completionList) <= 0:
self.interpreterObject.setStatementsFinished("", False)
else:
result = json.dumps(list(filter(lambda x : not re.match("^__.*", x), list(completionList))))
self.interpreterObject.setStatementsFinished(result, False)
output = Logger()
sys.stdout = output
sys.stderr = output
client = GatewayClient(port=int(sys.argv[1]))
sparkVersion = SparkVersion(int(sys.argv[2]))
if sparkVersion.isSpark2():
from pyspark.sql import SparkSession
else:
from pyspark.sql import SchemaRDD
if sparkVersion.isAutoConvertEnabled():
gateway = JavaGateway(client, auto_convert = True)
else:
gateway = JavaGateway(client)
java_import(gateway.jvm, "org.apache.spark.SparkEnv")
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
intp = gateway.entry_point
intp.onPythonScriptInitialized(os.getpid())
jsc = intp.getJavaSparkContext()
if sparkVersion.isImportAllPackageUnderSparkSql():
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
else:
java_import(gateway.jvm, "org.apache.spark.sql.SQLContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.HiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.LocalHiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.TestHiveContext")
java_import(gateway.jvm, "scala.Tuple2")
_zcUserQueryNameSpace = {}
jconf = intp.getSparkConf()
conf = SparkConf(_jvm = gateway.jvm, _jconf = jconf)
sc = _zsc_ = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
_zcUserQueryNameSpace["_zsc_"] = _zsc_
_zcUserQueryNameSpace["sc"] = sc
if sparkVersion.isSpark2():
spark = __zSpark__ = SparkSession(sc, intp.getSparkSession())
sqlc = __zSqlc__ = __zSpark__._wrapped
_zcUserQueryNameSpace["sqlc"] = sqlc
_zcUserQueryNameSpace["__zSqlc__"] = __zSqlc__
_zcUserQueryNameSpace["spark"] = spark
_zcUserQueryNameSpace["__zSpark__"] = __zSpark__
else:
sqlc = __zSqlc__ = SQLContext(sparkContext=sc, sqlContext=intp.getSQLContext())
_zcUserQueryNameSpace["sqlc"] = sqlc
_zcUserQueryNameSpace["__zSqlc__"] = sqlc
sqlContext = __zSqlc__
_zcUserQueryNameSpace["sqlContext"] = sqlContext
completion = __zeppelin_completion__ = PySparkCompletion(intp)
_zcUserQueryNameSpace["completion"] = completion
_zcUserQueryNameSpace["__zeppelin_completion__"] = __zeppelin_completion__
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext())
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["z"] = z
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
while True :
req = intp.getStatements()
try:
stmts = req.statements().split("\n")
jobGroup = req.jobGroup()
final_code = []
# Get post-execute hooks
try:
global_hook = intp.getHook('post_exec_dev')
except:
global_hook = None
try:
user_hook = __zeppelin__.getHook('post_exec')
except:
user_hook = None
nhooks = 0
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
for s in stmts:
if s == None:
continue
# skip comment
s_stripped = s.strip()
if len(s_stripped) == 0 or s_stripped.startswith("#"):
continue
final_code.append(s)
if final_code:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
sc.setJobGroup(jobGroup, "Zeppelin")
code = compile('\n'.join(final_code), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
except:
raise Exception(traceback.format_exc())
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
|
{
"content_hash": "dd2d0ed161be1f10d256d00d2167fb34",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 142,
"avg_line_length": 31.447802197802197,
"alnum_prop": 0.6832357823010395,
"repo_name": "Nova-Boy/zeppelin",
"id": "da4d7943e7c88b9e062308ab83c79f2b37bb3d8e",
"size": "12232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spark/src/main/resources/python/zeppelin_pyspark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11970"
},
{
"name": "CSS",
"bytes": "80589"
},
{
"name": "Groovy",
"bytes": "4891"
},
{
"name": "HTML",
"bytes": "289818"
},
{
"name": "Java",
"bytes": "3958681"
},
{
"name": "JavaScript",
"bytes": "509877"
},
{
"name": "Python",
"bytes": "74543"
},
{
"name": "R",
"bytes": "21301"
},
{
"name": "Roff",
"bytes": "60995"
},
{
"name": "Ruby",
"bytes": "3101"
},
{
"name": "Scala",
"bytes": "344366"
},
{
"name": "Shell",
"bytes": "76968"
},
{
"name": "Thrift",
"bytes": "5084"
},
{
"name": "XSLT",
"bytes": "1326"
}
],
"symlink_target": ""
}
|
from common import *
def test_create_out_of_session(m):
n = TNode()
randval = n.randval = n.randval
assert m.session.count == 1
ret = create_out_of_session(m, n)
assert m.session.count == 0
assert isinstance(ret, int)
n = TNode.get(ret)
assert m.session.count == 1
assert n.randval == randval
def test_update_out_of_session(m):
n = TNode()
n.string = 'value1'
m.session.commit()
assert n in m.session
n.properties.reload()
assert n.string == 'value1'
ret = update_out_of_session(m, n, {'string': 'value2'})
assert ret == True
assert n in m.session
assert n.string == 'value1'
n.properties.reload()
assert n.string == 'value2'
def test_delete_out_of_session(m):
n = TNode()
m.session.commit()
assert not n.is_phantom()
n_id = n.id
ret = delete_out_of_session(m, n)
assert ret == True
assert m.session.count == 1
assert n in m.session
assert not n.is_deleted()
m.session.clear()
with raises(EntityNotFoundException):
TNode.get(n_id)
m.session.clear()
n1 = TNode()
n2 = TNode()
rel = n1.rel_out.append(n2)
m.session.commit()
assert not rel.is_phantom()
rel_id = rel.id
ret = delete_out_of_session(m, rel)
assert ret == True
assert m.session.count == 3
assert rel in m.session
assert not rel.is_deleted()
m.session.clear()
with raises(EntityNotFoundException):
Relationship.get(rel_id)
def test_append_out_of_session(m):
n1 = TNode()
n2 = TNode()
m.session.commit()
assert n2 not in n1.rel_out
ret = append_out_of_session(m, n1, 'rel_out', n2)
assert isinstance(ret, int)
assert m.session.count == 2
assert n2 not in n1.rel_out
n1.rel_out.load()
assert n2 in n1.rel_out
assert n1.rel_out.rel(n2).id == ret
def test_remove_out_of_session(m):
n1 = TNode()
n2 = TNode()
rel = n1.rel_out.append(n2)
m.session.commit()
assert n2 in n1.rel_out
n1_id = n1.id
n2_id = n2.id
rel_id = rel.id
ret = remove_out_of_session(m, n1, 'rel_out', n2)
assert ret == True
assert m.session.count == 3
assert n2 in n1.rel_out
m.session.clear()
n1 = TNode.get(n1_id)
n2 = TNode.get(n2_id)
assert n2 not in n1.rel_out
with raises(EntityNotFoundException):
Relationship.get(rel_id)
|
{
"content_hash": "2a7091a648171d731c6381f569e3d06c",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 59,
"avg_line_length": 23.223300970873787,
"alnum_prop": 0.6082775919732442,
"repo_name": "ivotkv/neolixir",
"id": "576343875a704900bc998d12b1fbf52fb378b7b5",
"size": "3520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "188198"
},
{
"name": "Shell",
"bytes": "644"
}
],
"symlink_target": ""
}
|
from .time import hour
from .length import nautical_mile
#
# Definitions of common speed units
# Data taken from Appendix F of Halliday, Resnick, Walker, "Fundamentals of Physics",
# fourth edition, John Willey and Sons, 1993
knot = nautical_mile/hour
|
{
"content_hash": "8ca1d027fc80a7767ad7b68ad44d2c03",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 85,
"avg_line_length": 25.9,
"alnum_prop": 0.752895752895753,
"repo_name": "Heathckliff/cantera",
"id": "93374c83abef6de9e9a40a051dedda7313970ac4",
"size": "259",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "interfaces/cython/cantera/mixmaster/Units/speed.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1935303"
},
{
"name": "C++",
"bytes": "6751664"
},
{
"name": "CSS",
"bytes": "2167"
},
{
"name": "FORTRAN",
"bytes": "1175454"
},
{
"name": "Groff",
"bytes": "2843"
},
{
"name": "HTML",
"bytes": "17002"
},
{
"name": "M",
"bytes": "980"
},
{
"name": "Matlab",
"bytes": "284988"
},
{
"name": "Python",
"bytes": "1055361"
},
{
"name": "Shell",
"bytes": "2662"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SQLApiUser',
fields=[
('id', models.CharField(max_length=255, primary_key=True, serialize=False)),
('password', models.CharField(max_length=255, null=True)),
('permissions', django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(blank=True, max_length=126, null=True), null=True, size=None)),
],
options={
'db_table': 'api_apiuser',
},
),
]
|
{
"content_hash": "6edf459703d2768013fecf451002028b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 111,
"avg_line_length": 28.88888888888889,
"alnum_prop": 0.5717948717948718,
"repo_name": "dimagi/commcare-hq",
"id": "fde12eeba66c30ba7a1c7bffa19affe6bfd3e37a",
"size": "854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/api/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
import json
# read json file into python dictionary
def read_json(json_file):
with open(json_file, 'rb') as data_file:
data = json.loads(data_file.read())
return data
# read csv file into python list of lists
def read_csv(csv_file, delimiter, strip):
with open(csv_file, 'rb') as data_file:
data_list = list()
for line in data_file:
line = line.strip(strip)
tokens = line.split(delimiter)
data_list.append(tokens)
return data_list
|
{
"content_hash": "1ce5f81f0be1a1cf31d4e0c445ba1c14",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 44,
"avg_line_length": 30.41176470588235,
"alnum_prop": 0.6170212765957447,
"repo_name": "baldwmic/portfolio-website",
"id": "02370934935703a83bcccb31c771ca5f26549170",
"size": "517",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/stats/serialize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "14945"
},
{
"name": "HTML",
"bytes": "257"
},
{
"name": "JavaScript",
"bytes": "1165"
},
{
"name": "Python",
"bytes": "4666"
},
{
"name": "Shell",
"bytes": "1209"
}
],
"symlink_target": ""
}
|
import time
import asyncio
import aioprocessing as aioproc
import multiprocessing as mp
from collections import defaultdict
from .common import Configuration, MethodRegistry
from .enums import AnswerErrorCode, ProcCmdType
from .server import Server as ConnectionServer
from .transport import ProcCmd
_min_iter_delay = 0.25
class ProgressProxy:
update_interval = 0.1
__send_method = None
__total = 0
__step_index = 0
__step_percent = 0
__last_time_update = 0
def __init__(self, send_method):
assert callable(send_method)
self.__send_method = send_method
def _send(self, percent):
if callable(self.__send_method):
self.__send_method(int(percent * 100))
def clear(self):
self.__total = self.__step_index = 0
self.__last_time_update = self.__step_percent = 0
def total(self, value):
self.clear()
try:
self.__total = int(value)
except (ValueError, TypeError):
pass
else:
self.__step_percent = self.__total / 100.0
def step(self, delta_step=1):
self.__step_index += delta_step
now_time = time.time()
if now_time - self.__last_time_update >= self.update_interval:
self.__last_time_update = now_time
percent = self.__step_index / (self.__step_percent or 1)
if percent > 99.9:
percent = 100.0
self._send(percent)
def done(self):
self._send(100)
def _call_method(logger, task, method, params):
try:
return {'result': method(**params)}
except Exception as err:
logger.error((
'Execute method "{}" from task {}'
' error: {}').format(
method, task, err))
return {
'error': {
'code': AnswerErrorCode.ExecError.value,
'message': str(err),
}
}
def _worker_handler(index, options, methods, manage_queue, processing_queue):
conf = Configuration(env_var=None, **options)
logger = conf.get_logger()
logger.info('Start worker {}'.format(index))
active = True
answer = None
current_task_data = {'index': index}
def progress_send(percent_value):
progress_cmd = {'result': {'value': percent_value}}
progress_cmd.update(current_task_data)
manage_queue.put_nowai(ProcCmd(ProcCmdType.Progress, progress_cmd))
while active:
answer = None
try:
cmd = processing_queue.get_nowait()
except:
cmd = None
else:
if not isinstance(cmd, ProcCmd):
logger.error(
'Unexpected type cmd: {}'.format(cmd.__class__))
cmd = None
if cmd:
if cmd.has_type(ProcCmdType.Exit):
active = False
manage_queue.put_nowait(
ProcCmd(ProcCmdType.Complete, {'index': index}))
elif cmd.has_type(ProcCmdType.Exec):
method_name = cmd.data.get('method')
task_id = cmd.data.get('method')
params = cmd.data.get('params') or {}
method, default_options = (
methods.get(method_name) or (None, None))
current_task_data.update(task=task_id)
result = {
'index': index,
'task': task_id,
}
if default_options.get('logger'):
params['logger'] = logger
if default_options.get('progress'):
params['progress'] = ProgressProxy(progress_send)
timeout = default_options.get('timeout')
if timeout:
params['timeout'] = timeout
if isinstance(params, dict):
if callable(method):
logger.info(
'Run task: {} at worker: {} method: {}'.format(
task_id, index, method_name))
result.update(_call_method(
logger=logger,
method=method,
params=params,
task=task_id))
else:
result['error'] = {
'code': AnswerErrorCode.NoMethod.value,
'message': (
'Not found method: '
'"{}"').format(method_name),
}
else:
result['error'] = {
'code': AnswerErrorCode.FormatError.value,
'message': 'Params format error',
}
answer = ProcCmd(ProcCmdType.Result, result)
if active:
if answer is None:
answer = ProcCmd(ProcCmdType.Wait, {'index': index})
manage_queue.put_nowait(answer)
time.sleep(_min_iter_delay)
logger.info('Stop worker {}'.format(index))
@asyncio.coroutine
def _process_worker(index, pool):
worker = aioproc.AioProcess(
target=_worker_handler,
kwargs={
'options': pool._conf.as_dict(),
'index': index,
'manage_queue': pool._manage_queue,
'processing_queue': pool._processing_queue,
'methods': pool._methods,
})
worker.start()
pid = int(worker.pid)
pool._workers[index] = pid
pool.logger.info('\tworker {}: {}'.format(index, pid))
yield from worker.coro_join()
@asyncio.coroutine
def _manage_workers(pool, state, to_connection, from_connection):
"""
Connection <=> workers
"""
assert isinstance(pool, WorkerPool)
workers_pid = pool.get_workers_pid()
while state['active']:
try:
cmd = from_connection.get_nowait()
except asyncio.QueueEmpty:
cmd = None
if cmd:
yield from pool.send_cmd(cmd)
cmd = yield from pool.get_answer()
if cmd:
if cmd.has_type(ProcCmdType.Complete):
index = cmd.index
workers_pid[index] = 0
elif cmd.has_type(ProcCmdType.Wait):
# wait
pool.logger.debug((
'process {index} wait command from '
'connection').format(**cmd.data))
else:
to_connection.put_nowait(cmd)
state['active'] = any(pid > 0 for __, pid in workers_pid.items())
pool.logger.info('All workers complited.')
class WorkerPool:
Q_SIZE = 1024
logger = None
_conf = None
_workers = defaultdict(int)
_manage_queue = _processing_queue = None
__done_setup = False
_methods = None
def __init__(self, conf, methods):
assert isinstance(conf, Configuration)
assert methods and isinstance(methods, dict)
self._conf = conf
self._methods = methods
worker_count = conf.get('workers') or 0
assert 1 <= worker_count <= 1024
self.logger = conf.get_logger()
def setup(self, state, to_connection, from_connection, coroutines):
assert not self.__done_setup
self.__done_setup = True
worker_count = int(self._conf.get('workers'))
mq = aioproc.AioQueue(self.Q_SIZE)
pq = aioproc.AioQueue(int(self.Q_SIZE / worker_count))
self._manage_queue, self._processing_queue = mq, pq
for idx in range(1, worker_count + 1):
coroutines.append(
asyncio.async(_process_worker(idx, self)))
coroutines.append(
asyncio.async(
_manage_workers(
self, state, to_connection, from_connection)))
def get_answer(self):
return self._manage_queue.coro_get()
def send_cmd(self, cmd):
return self._processing_queue.coro_put(cmd)
def stop(self):
worker_count = int(self._conf.get('workers'))
for _ in range(worker_count):
self.send_cmd(ProcCmd(ProcCmdType.Exit))
def get_workers_pid(self):
return dict(self._workers)
def launch(conf=None):
configuration = Configuration()
workers = WorkerPool(
conf=configuration,
methods=MethodRegistry().as_dict())
server = ConnectionServer(
conf=configuration,
buffer_size=workers.Q_SIZE)
coroutines = []
server.setup(coroutines)
conn_in_q, conn_out_q = server.get_connection_queue()
workers.setup(server.state, conn_in_q, conn_out_q, coroutines)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(coroutines))
loop.close()
|
{
"content_hash": "446aeff99d3619db2e1f71768a15f79c",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 77,
"avg_line_length": 30.037542662116042,
"alnum_prop": 0.5305078968299057,
"repo_name": "unaxfromsibiria/roolet",
"id": "4dacd8edfd238e764d970c89d4110612aea12379",
"size": "8891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clients/python3/libroolet/processing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "141906"
},
{
"name": "Python",
"bytes": "82170"
}
],
"symlink_target": ""
}
|
import ast
import builtins
import collections
import decimal
import fractions
import io
import locale
import os
import pickle
import platform
import random
import re
import sys
import traceback
import types
import unittest
import warnings
from operator import neg
from test.support import TESTFN, unlink, run_unittest, check_warnings
from test.support.script_helper import assert_python_ok
try:
import pty, signal
except ImportError:
pty = signal = None
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
class StrSquares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max:
raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(str(n*n))
n += 1
return self.sofar[i]
class BitBucket:
def write(self, line):
pass
test_conv_no_sign = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', 314),
('314 ', 314),
(' \t\t 314 \t\t ', 314),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', 1),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
(str(br'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(chr(0x200), ValueError),
]
test_conv_sign = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', ValueError),
('314 ', 314),
(' \t\t 314 \t\t ', ValueError),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', ValueError),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
(str(br'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(chr(0x200), ValueError),
]
class TestFailingBool:
def __bool__(self):
raise RuntimeError
class TestFailingIter:
def __iter__(self):
raise RuntimeError
def filter_char(arg):
return ord(arg) > ord("d")
def map_char(arg):
return chr(ord(arg)+1)
class BuiltinTest(unittest.TestCase):
# Helper to check picklability
def check_iter_pickle(self, it, seq, proto):
itorg = it
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), seq)
#test the iterator after dropping one from it
it = pickle.loads(d)
try:
next(it)
except StopIteration:
return
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), seq[1:])
def test_import(self):
__import__('sys')
__import__('time')
__import__('string')
__import__(name='sys')
__import__(name='time', level=0)
self.assertRaises(ImportError, __import__, 'spamspam')
self.assertRaises(TypeError, __import__, 1, 2, 3, 4)
self.assertRaises(ValueError, __import__, '')
self.assertRaises(TypeError, __import__, 'sys', name='sys')
def test_abs(self):
# int
self.assertEqual(abs(0), 0)
self.assertEqual(abs(1234), 1234)
self.assertEqual(abs(-1234), 1234)
self.assertTrue(abs(-sys.maxsize-1) > 0)
# float
self.assertEqual(abs(0.0), 0.0)
self.assertEqual(abs(3.14), 3.14)
self.assertEqual(abs(-3.14), 3.14)
# str
self.assertRaises(TypeError, abs, 'a')
# bool
self.assertEqual(abs(True), 1)
self.assertEqual(abs(False), 0)
# other
self.assertRaises(TypeError, abs)
self.assertRaises(TypeError, abs, None)
class AbsClass(object):
def __abs__(self):
return -5
self.assertEqual(abs(AbsClass()), -5)
def test_all(self):
self.assertEqual(all([2, 4, 6]), True)
self.assertEqual(all([2, None, 6]), False)
self.assertRaises(RuntimeError, all, [2, TestFailingBool(), 6])
self.assertRaises(RuntimeError, all, TestFailingIter())
self.assertRaises(TypeError, all, 10) # Non-iterable
self.assertRaises(TypeError, all) # No args
self.assertRaises(TypeError, all, [2, 4, 6], []) # Too many args
self.assertEqual(all([]), True) # Empty iterator
self.assertEqual(all([0, TestFailingBool()]), False)# Short-circuit
S = [50, 60]
self.assertEqual(all(x > 42 for x in S), True)
S = [50, 40, 60]
self.assertEqual(all(x > 42 for x in S), False)
def test_any(self):
self.assertEqual(any([None, None, None]), False)
self.assertEqual(any([None, 4, None]), True)
self.assertRaises(RuntimeError, any, [None, TestFailingBool(), 6])
self.assertRaises(RuntimeError, any, TestFailingIter())
self.assertRaises(TypeError, any, 10) # Non-iterable
self.assertRaises(TypeError, any) # No args
self.assertRaises(TypeError, any, [2, 4, 6], []) # Too many args
self.assertEqual(any([]), False) # Empty iterator
self.assertEqual(any([1, TestFailingBool()]), True) # Short-circuit
S = [40, 60, 30]
self.assertEqual(any(x > 42 for x in S), True)
S = [10, 20, 30]
self.assertEqual(any(x > 42 for x in S), False)
def test_ascii(self):
self.assertEqual(ascii(''), '\'\'')
self.assertEqual(ascii(0), '0')
self.assertEqual(ascii(()), '()')
self.assertEqual(ascii([]), '[]')
self.assertEqual(ascii({}), '{}')
a = []
a.append(a)
self.assertEqual(ascii(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(ascii(a), '{0: {...}}')
# Advanced checks for unicode strings
def _check_uni(s):
self.assertEqual(ascii(s), repr(s))
_check_uni("'")
_check_uni('"')
_check_uni('"\'')
_check_uni('\0')
_check_uni('\r\n\t .')
# Unprintable non-ASCII characters
_check_uni('\x85')
_check_uni('\u1fff')
_check_uni('\U00012fff')
# Lone surrogates
_check_uni('\ud800')
_check_uni('\udfff')
# Issue #9804: surrogates should be joined even for printable
# wide characters (UCS-2 builds).
self.assertEqual(ascii('\U0001d121'), "'\\U0001d121'")
# All together
s = "'\0\"\n\r\t abcd\x85é\U00012fff\uD800\U0001D121xxx."
self.assertEqual(ascii(s),
r"""'\'\x00"\n\r\t abcd\x85\xe9\U00012fff\ud800\U0001d121xxx.'""")
def test_neg(self):
x = -sys.maxsize-1
self.assertTrue(isinstance(x, int))
self.assertEqual(-x, sys.maxsize+1)
def test_callable(self):
self.assertTrue(callable(len))
self.assertFalse(callable("a"))
self.assertTrue(callable(callable))
self.assertTrue(callable(lambda x, y: x + y))
self.assertFalse(callable(__builtins__))
def f(): pass
self.assertTrue(callable(f))
class C1:
def meth(self): pass
self.assertTrue(callable(C1))
c = C1()
self.assertTrue(callable(c.meth))
self.assertFalse(callable(c))
# __call__ is looked up on the class, not the instance
c.__call__ = None
self.assertFalse(callable(c))
c.__call__ = lambda self: 0
self.assertFalse(callable(c))
del c.__call__
self.assertFalse(callable(c))
class C2(object):
def __call__(self): pass
c2 = C2()
self.assertTrue(callable(c2))
c2.__call__ = None
self.assertTrue(callable(c2))
class C3(C2): pass
c3 = C3()
self.assertTrue(callable(c3))
def test_chr(self):
self.assertEqual(chr(32), ' ')
self.assertEqual(chr(65), 'A')
self.assertEqual(chr(97), 'a')
self.assertEqual(chr(0xff), '\xff')
self.assertRaises(ValueError, chr, 1<<24)
self.assertEqual(chr(sys.maxunicode),
str('\\U0010ffff'.encode("ascii"), 'unicode-escape'))
self.assertRaises(TypeError, chr)
self.assertEqual(chr(0x0000FFFF), "\U0000FFFF")
self.assertEqual(chr(0x00010000), "\U00010000")
self.assertEqual(chr(0x00010001), "\U00010001")
self.assertEqual(chr(0x000FFFFE), "\U000FFFFE")
self.assertEqual(chr(0x000FFFFF), "\U000FFFFF")
self.assertEqual(chr(0x00100000), "\U00100000")
self.assertEqual(chr(0x00100001), "\U00100001")
self.assertEqual(chr(0x0010FFFE), "\U0010FFFE")
self.assertEqual(chr(0x0010FFFF), "\U0010FFFF")
self.assertRaises(ValueError, chr, -1)
self.assertRaises(ValueError, chr, 0x00110000)
self.assertRaises((OverflowError, ValueError), chr, 2**32)
def test_cmp(self):
self.assertTrue(not hasattr(builtins, "cmp"))
def test_compile(self):
compile('print(1)\n', '', 'exec')
bom = b'\xef\xbb\xbf'
compile(bom + b'print(1)\n', '', 'exec')
compile(source='pass', filename='?', mode='exec')
compile(dont_inherit=0, filename='tmp', source='0', mode='eval')
compile('pass', '?', dont_inherit=1, mode='exec')
compile(memoryview(b"text"), "name", "exec")
self.assertRaises(TypeError, compile)
self.assertRaises(ValueError, compile, 'print(42)\n', '<string>', 'badmode')
self.assertRaises(ValueError, compile, 'print(42)\n', '<string>', 'single', 0xff)
self.assertRaises(ValueError, compile, chr(0), 'f', 'exec')
self.assertRaises(TypeError, compile, 'pass', '?', 'exec',
mode='eval', source='0', filename='tmp')
compile('print("\xe5")\n', '', 'exec')
self.assertRaises(ValueError, compile, chr(0), 'f', 'exec')
self.assertRaises(ValueError, compile, str('a = 1'), 'f', 'bad')
# test the optimize argument
codestr = '''def f():
"""doc"""
try:
assert False
except AssertionError:
return (True, f.__doc__)
else:
return (False, f.__doc__)
'''
def f(): """doc"""
values = [(-1, __debug__, f.__doc__),
(0, True, 'doc'),
(1, False, 'doc'),
(2, False, None)]
for optval, debugval, docstring in values:
# test both direct compilation and compilation via AST
codeobjs = []
codeobjs.append(compile(codestr, "<test>", "exec", optimize=optval))
tree = ast.parse(codestr)
codeobjs.append(compile(tree, "<test>", "exec", optimize=optval))
for code in codeobjs:
ns = {}
exec(code, ns)
rv = ns['f']()
self.assertEqual(rv, (debugval, docstring))
def test_delattr(self):
sys.spam = 1
delattr(sys, 'spam')
self.assertRaises(TypeError, delattr)
def test_dir(self):
# dir(wrong number of arguments)
self.assertRaises(TypeError, dir, 42, 42)
# dir() - local scope
local_var = 1
self.assertIn('local_var', dir())
# dir(module)
self.assertIn('exit', dir(sys))
# dir(module_with_invalid__dict__)
class Foo(types.ModuleType):
__dict__ = 8
f = Foo("foo")
self.assertRaises(TypeError, dir, f)
# dir(type)
self.assertIn("strip", dir(str))
self.assertNotIn("__mro__", dir(str))
# dir(obj)
class Foo(object):
def __init__(self):
self.x = 7
self.y = 8
self.z = 9
f = Foo()
self.assertIn("y", dir(f))
# dir(obj_no__dict__)
class Foo(object):
__slots__ = []
f = Foo()
self.assertIn("__repr__", dir(f))
# dir(obj_no__class__with__dict__)
# (an ugly trick to cause getattr(f, "__class__") to fail)
class Foo(object):
__slots__ = ["__class__", "__dict__"]
def __init__(self):
self.bar = "wow"
f = Foo()
self.assertNotIn("__repr__", dir(f))
self.assertIn("bar", dir(f))
# dir(obj_using __dir__)
class Foo(object):
def __dir__(self):
return ["kan", "ga", "roo"]
f = Foo()
self.assertTrue(dir(f) == ["ga", "kan", "roo"])
# dir(obj__dir__tuple)
class Foo(object):
def __dir__(self):
return ("b", "c", "a")
res = dir(Foo())
self.assertIsInstance(res, list)
self.assertTrue(res == ["a", "b", "c"])
# dir(obj__dir__not_sequence)
class Foo(object):
def __dir__(self):
return 7
f = Foo()
self.assertRaises(TypeError, dir, f)
# dir(traceback)
try:
raise IndexError
except:
self.assertEqual(len(dir(sys.exc_info()[2])), 4)
# test that object has a __dir__()
self.assertEqual(sorted([].__dir__()), dir([]))
def test_divmod(self):
self.assertEqual(divmod(12, 7), (1, 5))
self.assertEqual(divmod(-12, 7), (-2, 2))
self.assertEqual(divmod(12, -7), (-2, -2))
self.assertEqual(divmod(-12, -7), (1, -5))
self.assertEqual(divmod(-sys.maxsize-1, -1), (sys.maxsize+1, 0))
for num, denom, exp_result in [ (3.25, 1.0, (3.0, 0.25)),
(-3.25, 1.0, (-4.0, 0.75)),
(3.25, -1.0, (-4.0, -0.75)),
(-3.25, -1.0, (3.0, -0.25))]:
result = divmod(num, denom)
self.assertAlmostEqual(result[0], exp_result[0])
self.assertAlmostEqual(result[1], exp_result[1])
self.assertRaises(TypeError, divmod)
def test_eval(self):
self.assertEqual(eval('1+1'), 2)
self.assertEqual(eval(' 1+1\n'), 2)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
self.assertEqual(eval('a', globals) , 1)
self.assertEqual(eval('a', globals, locals), 1)
self.assertEqual(eval('b', globals, locals), 200)
self.assertEqual(eval('c', globals, locals), 300)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
bom = b'\xef\xbb\xbf'
self.assertEqual(eval(bom + b'a', globals, locals), 1)
self.assertEqual(eval('"\xe5"', globals), "\xe5")
self.assertRaises(TypeError, eval)
self.assertRaises(TypeError, eval, ())
self.assertRaises(SyntaxError, eval, bom[:2] + b'a')
class X:
def __getitem__(self, key):
raise ValueError
self.assertRaises(ValueError, eval, "foo", {}, X())
def test_general_eval(self):
# Tests that general mappings can be used for the locals argument
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def keys(self):
return list('xyz')
m = M()
g = globals()
self.assertEqual(eval('a', g, m), 12)
self.assertRaises(NameError, eval, 'b', g, m)
self.assertEqual(eval('dir()', g, m), list('xyz'))
self.assertEqual(eval('globals()', g, m), g)
self.assertEqual(eval('locals()', g, m), m)
self.assertRaises(TypeError, eval, 'a', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, eval, 'a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
def keys(self):
return list('xyz')
d = D()
self.assertEqual(eval('a', g, d), 12)
self.assertRaises(NameError, eval, 'b', g, d)
self.assertEqual(eval('dir()', g, d), list('xyz'))
self.assertEqual(eval('globals()', g, d), g)
self.assertEqual(eval('locals()', g, d), d)
# Verify locals stores (used by list comps)
eval('[locals() for i in (2,3)]', g, d)
eval('[locals() for i in (2,3)]', g, collections.UserDict())
class SpreadSheet:
"Sample application showing nested, calculated lookups."
_cells = {}
def __setitem__(self, key, formula):
self._cells[key] = formula
def __getitem__(self, key):
return eval(self._cells[key], globals(), self)
ss = SpreadSheet()
ss['a1'] = '5'
ss['a2'] = 'a1*6'
ss['a3'] = 'a2*7'
self.assertEqual(ss['a3'], 210)
# Verify that dir() catches a non-list returned by eval
# SF bug #1004669
class C:
def __getitem__(self, item):
raise KeyError(item)
def keys(self):
return 1 # used to be 'a' but that's no longer an error
self.assertRaises(TypeError, eval, 'dir()', globals(), C())
def test_exec(self):
g = {}
exec('z = 1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 1})
exec('z = 1+1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 2})
g = {}
l = {}
with check_warnings():
warnings.filterwarnings("ignore", "global statement",
module="<string>")
exec('global a; a = 1; b = 2', g, l)
if '__builtins__' in g:
del g['__builtins__']
if '__builtins__' in l:
del l['__builtins__']
self.assertEqual((g, l), ({'a': 1}, {'b': 2}))
def test_exec_globals(self):
code = compile("print('Hello World!')", "", "exec")
# no builtin function
self.assertRaisesRegex(NameError, "name 'print' is not defined",
exec, code, {'__builtins__': {}})
# __builtins__ must be a mapping type
self.assertRaises(TypeError,
exec, code, {'__builtins__': 123})
# no __build_class__ function
code = compile("class A: pass", "", "exec")
self.assertRaisesRegex(NameError, "__build_class__ not found",
exec, code, {'__builtins__': {}})
class frozendict_error(Exception):
pass
class frozendict(dict):
def __setitem__(self, key, value):
raise frozendict_error("frozendict is readonly")
# read-only builtins
if isinstance(__builtins__, types.ModuleType):
frozen_builtins = frozendict(__builtins__.__dict__)
else:
frozen_builtins = frozendict(__builtins__)
code = compile("__builtins__['superglobal']=2; print(superglobal)", "test", "exec")
self.assertRaises(frozendict_error,
exec, code, {'__builtins__': frozen_builtins})
# read-only globals
namespace = frozendict({})
code = compile("x=1", "test", "exec")
self.assertRaises(frozendict_error,
exec, code, namespace)
def test_exec_redirected(self):
savestdout = sys.stdout
sys.stdout = None # Whatever that cannot flush()
try:
# Used to raise SystemError('error return without exception set')
exec('a')
except NameError:
pass
finally:
sys.stdout = savestdout
def test_filter(self):
self.assertEqual(list(filter(lambda c: 'a' <= c <= 'z', 'Hello World')), list('elloorld'))
self.assertEqual(list(filter(None, [1, 'hello', [], [3], '', None, 9, 0])), [1, 'hello', [3], 9])
self.assertEqual(list(filter(lambda x: x > 0, [1, -3, 9, 0, 2])), [1, 9, 2])
self.assertEqual(list(filter(None, Squares(10))), [1, 4, 9, 16, 25, 36, 49, 64, 81])
self.assertEqual(list(filter(lambda x: x%2, Squares(10))), [1, 9, 25, 49, 81])
def identity(item):
return 1
filter(identity, Squares(5))
self.assertRaises(TypeError, filter)
class BadSeq(object):
def __getitem__(self, index):
if index<4:
return 42
raise ValueError
self.assertRaises(ValueError, list, filter(lambda x: x, BadSeq()))
def badfunc():
pass
self.assertRaises(TypeError, list, filter(badfunc, range(5)))
# test bltinmodule.c::filtertuple()
self.assertEqual(list(filter(None, (1, 2))), [1, 2])
self.assertEqual(list(filter(lambda x: x>=3, (1, 2, 3, 4))), [3, 4])
self.assertRaises(TypeError, list, filter(42, (1, 2)))
def test_filter_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f1 = filter(filter_char, "abcdeabcde")
f2 = filter(filter_char, "abcdeabcde")
self.check_iter_pickle(f1, list(f2), proto)
def test_getattr(self):
self.assertTrue(getattr(sys, 'stdout') is sys.stdout)
self.assertRaises(TypeError, getattr, sys, 1)
self.assertRaises(TypeError, getattr, sys, 1, "foo")
self.assertRaises(TypeError, getattr)
self.assertRaises(AttributeError, getattr, sys, chr(sys.maxunicode))
# unicode surrogates are not encodable to the default encoding (utf8)
self.assertRaises(AttributeError, getattr, 1, "\uDAD1\uD51E")
def test_hasattr(self):
self.assertTrue(hasattr(sys, 'stdout'))
self.assertRaises(TypeError, hasattr, sys, 1)
self.assertRaises(TypeError, hasattr)
self.assertEqual(False, hasattr(sys, chr(sys.maxunicode)))
# Check that hasattr propagates all exceptions outside of
# AttributeError.
class A:
def __getattr__(self, what):
raise SystemExit
self.assertRaises(SystemExit, hasattr, A(), "b")
class B:
def __getattr__(self, what):
raise ValueError
self.assertRaises(ValueError, hasattr, B(), "b")
def test_hash(self):
hash(None)
self.assertEqual(hash(1), hash(1))
self.assertEqual(hash(1), hash(1.0))
hash('spam')
self.assertEqual(hash('spam'), hash(b'spam'))
hash((0,1,2,3))
def f(): pass
self.assertRaises(TypeError, hash, [])
self.assertRaises(TypeError, hash, {})
# Bug 1536021: Allow hash to return long objects
class X:
def __hash__(self):
return 2**100
self.assertEqual(type(hash(X())), int)
class Z(int):
def __hash__(self):
return self
self.assertEqual(hash(Z(42)), hash(42))
def test_hex(self):
self.assertEqual(hex(16), '0x10')
self.assertEqual(hex(-16), '-0x10')
self.assertRaises(TypeError, hex, {})
def test_id(self):
id(None)
id(1)
id(1.0)
id('spam')
id((0,1,2,3))
id([0,1,2,3])
id({'spam': 1, 'eggs': 2, 'ham': 3})
# Test input() later, alphabetized as if it were raw_input
def test_iter(self):
self.assertRaises(TypeError, iter)
self.assertRaises(TypeError, iter, 42, 42)
lists = [("1", "2"), ["1", "2"], "12"]
for l in lists:
i = iter(l)
self.assertEqual(next(i), '1')
self.assertEqual(next(i), '2')
self.assertRaises(StopIteration, next, i)
def test_isinstance(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(isinstance(c, C))
self.assertTrue(isinstance(d, C))
self.assertTrue(not isinstance(e, C))
self.assertTrue(not isinstance(c, D))
self.assertTrue(not isinstance('foo', E))
self.assertRaises(TypeError, isinstance, E, 'foo')
self.assertRaises(TypeError, isinstance)
def test_issubclass(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(issubclass(D, C))
self.assertTrue(issubclass(C, C))
self.assertTrue(not issubclass(C, D))
self.assertRaises(TypeError, issubclass, 'foo', E)
self.assertRaises(TypeError, issubclass, E, 'foo')
self.assertRaises(TypeError, issubclass)
def test_len(self):
self.assertEqual(len('123'), 3)
self.assertEqual(len(()), 0)
self.assertEqual(len((1, 2, 3, 4)), 4)
self.assertEqual(len([1, 2, 3, 4]), 4)
self.assertEqual(len({}), 0)
self.assertEqual(len({'a':1, 'b': 2}), 2)
class BadSeq:
def __len__(self):
raise ValueError
self.assertRaises(ValueError, len, BadSeq())
class InvalidLen:
def __len__(self):
return None
self.assertRaises(TypeError, len, InvalidLen())
class FloatLen:
def __len__(self):
return 4.5
self.assertRaises(TypeError, len, FloatLen())
class HugeLen:
def __len__(self):
return sys.maxsize + 1
self.assertRaises(OverflowError, len, HugeLen())
class NoLenMethod(object): pass
self.assertRaises(TypeError, len, NoLenMethod())
def test_map(self):
self.assertEqual(
list(map(lambda x: x*x, range(1,4))),
[1, 4, 9]
)
try:
from math import sqrt
except ImportError:
def sqrt(x):
return pow(x, 0.5)
self.assertEqual(
list(map(lambda x: list(map(sqrt, x)), [[16, 4], [81, 9]])),
[[4.0, 2.0], [9.0, 3.0]]
)
self.assertEqual(
list(map(lambda x, y: x+y, [1,3,2], [9,1,4])),
[10, 4, 6]
)
def plus(*v):
accu = 0
for i in v: accu = accu + i
return accu
self.assertEqual(
list(map(plus, [1, 3, 7])),
[1, 3, 7]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2])),
[1+4, 3+9, 7+2]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2], [1, 1, 0])),
[1+4+1, 3+9+1, 7+2+0]
)
self.assertEqual(
list(map(int, Squares(10))),
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
)
def Max(a, b):
if a is None:
return b
if b is None:
return a
return max(a, b)
self.assertEqual(
list(map(Max, Squares(3), Squares(2))),
[0, 1]
)
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, map, lambda x: x, 42)
class BadSeq:
def __iter__(self):
raise ValueError
yield None
self.assertRaises(ValueError, list, map(lambda x: x, BadSeq()))
def badfunc(x):
raise RuntimeError
self.assertRaises(RuntimeError, list, map(badfunc, range(5)))
def test_map_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
m1 = map(map_char, "Is this the real life?")
m2 = map(map_char, "Is this the real life?")
self.check_iter_pickle(m1, list(m2), proto)
def test_max(self):
self.assertEqual(max('123123'), '3')
self.assertEqual(max(1, 2, 3), 3)
self.assertEqual(max((1, 2, 3, 1, 2, 3)), 3)
self.assertEqual(max([1, 2, 3, 1, 2, 3]), 3)
self.assertEqual(max(1, 2, 3.0), 3.0)
self.assertEqual(max(1, 2.0, 3), 3)
self.assertEqual(max(1.0, 2, 3), 3)
self.assertRaises(TypeError, max)
self.assertRaises(TypeError, max, 42)
self.assertRaises(ValueError, max, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, max, BadSeq())
for stmt in (
"max(key=int)", # no args
"max(default=None)",
"max(1, 2, default=None)", # require container for default
"max(default=None, key=int)",
"max(1, key=int)", # single arg not iterable
"max(1, 2, keystone=int)", # wrong keyword
"max(1, 2, key=int, abc=int)", # two many keywords
"max(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(max((1,), key=neg), 1) # one elem iterable
self.assertEqual(max((1,2), key=neg), 1) # two elem iterable
self.assertEqual(max(1, 2, key=neg), 1) # two elems
self.assertEqual(max((), default=None), None) # zero elem iterable
self.assertEqual(max((1,), default=None), 1) # one elem iterable
self.assertEqual(max((1,2), default=None), 2) # two elem iterable
self.assertEqual(max((), default=1, key=neg), 1)
self.assertEqual(max((1, 2), default=3, key=neg), 1)
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(max(data, key=f),
sorted(reversed(data), key=f)[-1])
def test_min(self):
self.assertEqual(min('123123'), '1')
self.assertEqual(min(1, 2, 3), 1)
self.assertEqual(min((1, 2, 3, 1, 2, 3)), 1)
self.assertEqual(min([1, 2, 3, 1, 2, 3]), 1)
self.assertEqual(min(1, 2, 3.0), 1)
self.assertEqual(min(1, 2.0, 3), 1)
self.assertEqual(min(1.0, 2, 3), 1.0)
self.assertRaises(TypeError, min)
self.assertRaises(TypeError, min, 42)
self.assertRaises(ValueError, min, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, min, BadSeq())
for stmt in (
"min(key=int)", # no args
"min(default=None)",
"min(1, 2, default=None)", # require container for default
"min(default=None, key=int)",
"min(1, key=int)", # single arg not iterable
"min(1, 2, keystone=int)", # wrong keyword
"min(1, 2, key=int, abc=int)", # two many keywords
"min(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(min((1,), key=neg), 1) # one elem iterable
self.assertEqual(min((1,2), key=neg), 2) # two elem iterable
self.assertEqual(min(1, 2, key=neg), 2) # two elems
self.assertEqual(min((), default=None), None) # zero elem iterable
self.assertEqual(min((1,), default=None), 1) # one elem iterable
self.assertEqual(min((1,2), default=None), 1) # two elem iterable
self.assertEqual(min((), default=1, key=neg), 1)
self.assertEqual(min((1, 2), default=1, key=neg), 2)
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(min(data, key=f),
sorted(data, key=f)[0])
def test_next(self):
it = iter(range(2))
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertRaises(StopIteration, next, it)
self.assertEqual(next(it, 42), 42)
class Iter(object):
def __iter__(self):
return self
def __next__(self):
raise StopIteration
it = iter(Iter())
self.assertEqual(next(it, 42), 42)
self.assertRaises(StopIteration, next, it)
def gen():
yield 1
return
it = gen()
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertEqual(next(it, 42), 42)
def test_oct(self):
self.assertEqual(oct(100), '0o144')
self.assertEqual(oct(-100), '-0o144')
self.assertRaises(TypeError, oct, ())
def write_testfile(self):
# NB the first 4 lines are also used to test input, below
fp = open(TESTFN, 'w')
self.addCleanup(unlink, TESTFN)
with fp:
fp.write('1+1\n')
fp.write('The quick brown fox jumps over the lazy dog')
fp.write('.\n')
fp.write('Dear John\n')
fp.write('XXX'*100)
fp.write('YYY'*100)
def test_open(self):
self.write_testfile()
fp = open(TESTFN, 'r')
with fp:
self.assertEqual(fp.readline(4), '1+1\n')
self.assertEqual(fp.readline(), 'The quick brown fox jumps over the lazy dog.\n')
self.assertEqual(fp.readline(4), 'Dear')
self.assertEqual(fp.readline(100), ' John\n')
self.assertEqual(fp.read(300), 'XXX'*100)
self.assertEqual(fp.read(1000), 'YYY'*100)
def test_open_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that open() uses the current locale
# encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
self.write_testfile()
current_locale_encoding = locale.getpreferredencoding(False)
fp = open(TESTFN, 'w')
with fp:
self.assertEqual(fp.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
def test_open_non_inheritable(self):
fileobj = open(__file__)
with fileobj:
self.assertFalse(os.get_inheritable(fileobj.fileno()))
def test_ord(self):
self.assertEqual(ord(' '), 32)
self.assertEqual(ord('A'), 65)
self.assertEqual(ord('a'), 97)
self.assertEqual(ord('\x80'), 128)
self.assertEqual(ord('\xff'), 255)
self.assertEqual(ord(b' '), 32)
self.assertEqual(ord(b'A'), 65)
self.assertEqual(ord(b'a'), 97)
self.assertEqual(ord(b'\x80'), 128)
self.assertEqual(ord(b'\xff'), 255)
self.assertEqual(ord(chr(sys.maxunicode)), sys.maxunicode)
self.assertRaises(TypeError, ord, 42)
self.assertEqual(ord(chr(0x10FFFF)), 0x10FFFF)
self.assertEqual(ord("\U0000FFFF"), 0x0000FFFF)
self.assertEqual(ord("\U00010000"), 0x00010000)
self.assertEqual(ord("\U00010001"), 0x00010001)
self.assertEqual(ord("\U000FFFFE"), 0x000FFFFE)
self.assertEqual(ord("\U000FFFFF"), 0x000FFFFF)
self.assertEqual(ord("\U00100000"), 0x00100000)
self.assertEqual(ord("\U00100001"), 0x00100001)
self.assertEqual(ord("\U0010FFFE"), 0x0010FFFE)
self.assertEqual(ord("\U0010FFFF"), 0x0010FFFF)
def test_pow(self):
self.assertEqual(pow(0,0), 1)
self.assertEqual(pow(0,1), 0)
self.assertEqual(pow(1,0), 1)
self.assertEqual(pow(1,1), 1)
self.assertEqual(pow(2,0), 1)
self.assertEqual(pow(2,10), 1024)
self.assertEqual(pow(2,20), 1024*1024)
self.assertEqual(pow(2,30), 1024*1024*1024)
self.assertEqual(pow(-2,0), 1)
self.assertEqual(pow(-2,1), -2)
self.assertEqual(pow(-2,2), 4)
self.assertEqual(pow(-2,3), -8)
self.assertAlmostEqual(pow(0.,0), 1.)
self.assertAlmostEqual(pow(0.,1), 0.)
self.assertAlmostEqual(pow(1.,0), 1.)
self.assertAlmostEqual(pow(1.,1), 1.)
self.assertAlmostEqual(pow(2.,0), 1.)
self.assertAlmostEqual(pow(2.,10), 1024.)
self.assertAlmostEqual(pow(2.,20), 1024.*1024.)
self.assertAlmostEqual(pow(2.,30), 1024.*1024.*1024.)
self.assertAlmostEqual(pow(-2.,0), 1.)
self.assertAlmostEqual(pow(-2.,1), -2.)
self.assertAlmostEqual(pow(-2.,2), 4.)
self.assertAlmostEqual(pow(-2.,3), -8.)
for x in 2, 2.0:
for y in 10, 10.0:
for z in 1000, 1000.0:
if isinstance(x, float) or \
isinstance(y, float) or \
isinstance(z, float):
self.assertRaises(TypeError, pow, x, y, z)
else:
self.assertAlmostEqual(pow(x, y, z), 24.0)
self.assertAlmostEqual(pow(-1, 0.5), 1j)
self.assertAlmostEqual(pow(-1, 1/3), 0.5 + 0.8660254037844386j)
self.assertRaises(ValueError, pow, -1, -2, 3)
self.assertRaises(ValueError, pow, 1, 2, 0)
self.assertRaises(TypeError, pow)
def test_input(self):
self.write_testfile()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
savestdout = sys.stdout # Eats the echo
try:
sys.stdin = fp
sys.stdout = BitBucket()
self.assertEqual(input(), "1+1")
self.assertEqual(input(), 'The quick brown fox jumps over the lazy dog.')
self.assertEqual(input('testing\n'), 'Dear John')
# SF 1535165: don't segfault on closed stdin
# sys.stdout must be a regular file for triggering
sys.stdout = savestdout
sys.stdin.close()
self.assertRaises(ValueError, input)
sys.stdout = BitBucket()
sys.stdin = io.StringIO("NULL\0")
self.assertRaises(TypeError, input, 42, 42)
sys.stdin = io.StringIO(" 'whitespace'")
self.assertEqual(input(), " 'whitespace'")
sys.stdin = io.StringIO()
self.assertRaises(EOFError, input)
del sys.stdout
self.assertRaises(RuntimeError, input, 'prompt')
del sys.stdin
self.assertRaises(RuntimeError, input, 'prompt')
finally:
sys.stdin = savestdin
sys.stdout = savestdout
fp.close()
# test_int(): see test_int.py for tests of built-in function int().
def test_repr(self):
self.assertEqual(repr(''), '\'\'')
self.assertEqual(repr(0), '0')
self.assertEqual(repr(()), '()')
self.assertEqual(repr([]), '[]')
self.assertEqual(repr({}), '{}')
a = []
a.append(a)
self.assertEqual(repr(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(repr(a), '{0: {...}}')
def test_round(self):
self.assertEqual(round(0.0), 0.0)
self.assertEqual(type(round(0.0)), int)
self.assertEqual(round(1.0), 1.0)
self.assertEqual(round(10.0), 10.0)
self.assertEqual(round(1000000000.0), 1000000000.0)
self.assertEqual(round(1e20), 1e20)
self.assertEqual(round(-1.0), -1.0)
self.assertEqual(round(-10.0), -10.0)
self.assertEqual(round(-1000000000.0), -1000000000.0)
self.assertEqual(round(-1e20), -1e20)
self.assertEqual(round(0.1), 0.0)
self.assertEqual(round(1.1), 1.0)
self.assertEqual(round(10.1), 10.0)
self.assertEqual(round(1000000000.1), 1000000000.0)
self.assertEqual(round(-1.1), -1.0)
self.assertEqual(round(-10.1), -10.0)
self.assertEqual(round(-1000000000.1), -1000000000.0)
self.assertEqual(round(0.9), 1.0)
self.assertEqual(round(9.9), 10.0)
self.assertEqual(round(999999999.9), 1000000000.0)
self.assertEqual(round(-0.9), -1.0)
self.assertEqual(round(-9.9), -10.0)
self.assertEqual(round(-999999999.9), -1000000000.0)
self.assertEqual(round(-8.0, -1), -10.0)
self.assertEqual(type(round(-8.0, -1)), float)
self.assertEqual(type(round(-8.0, 0)), float)
self.assertEqual(type(round(-8.0, 1)), float)
# Check even / odd rounding behaviour
self.assertEqual(round(5.5), 6)
self.assertEqual(round(6.5), 6)
self.assertEqual(round(-5.5), -6)
self.assertEqual(round(-6.5), -6)
# Check behavior on ints
self.assertEqual(round(0), 0)
self.assertEqual(round(8), 8)
self.assertEqual(round(-8), -8)
self.assertEqual(type(round(0)), int)
self.assertEqual(type(round(-8, -1)), int)
self.assertEqual(type(round(-8, 0)), int)
self.assertEqual(type(round(-8, 1)), int)
# test new kwargs
self.assertEqual(round(number=-8.0, ndigits=-1), -10.0)
self.assertRaises(TypeError, round)
# test generic rounding delegation for reals
class TestRound:
def __round__(self):
return 23
class TestNoRound:
pass
self.assertEqual(round(TestRound()), 23)
self.assertRaises(TypeError, round, 1, 2, 3)
self.assertRaises(TypeError, round, TestNoRound())
t = TestNoRound()
t.__round__ = lambda *args: args
self.assertRaises(TypeError, round, t)
self.assertRaises(TypeError, round, t, 0)
# Some versions of glibc for alpha have a bug that affects
# float -> integer rounding (floor, ceil, rint, round) for
# values in the range [2**52, 2**53). See:
#
# http://sources.redhat.com/bugzilla/show_bug.cgi?id=5350
#
# We skip this test on Linux/alpha if it would fail.
linux_alpha = (platform.system().startswith('Linux') and
platform.machine().startswith('alpha'))
system_round_bug = round(5e15+1) != 5e15+1
@unittest.skipIf(linux_alpha and system_round_bug,
"test will fail; failure is probably due to a "
"buggy system round function")
def test_round_large(self):
# Issue #1869: integral floats should remain unchanged
self.assertEqual(round(5e15-1), 5e15-1)
self.assertEqual(round(5e15), 5e15)
self.assertEqual(round(5e15+1), 5e15+1)
self.assertEqual(round(5e15+2), 5e15+2)
self.assertEqual(round(5e15+3), 5e15+3)
def test_bug_27936(self):
# Verify that ndigits=None means the same as passing in no argument
for x in [1234,
1234.56,
decimal.Decimal('1234.56'),
fractions.Fraction(123456, 100)]:
self.assertEqual(round(x, None), round(x))
self.assertEqual(type(round(x, None)), type(round(x)))
def test_setattr(self):
setattr(sys, 'spam', 1)
self.assertEqual(sys.spam, 1)
self.assertRaises(TypeError, setattr, sys, 1, 'spam')
self.assertRaises(TypeError, setattr)
# test_str(): see test_unicode.py and test_bytes.py for str() tests.
def test_sum(self):
self.assertEqual(sum([]), 0)
self.assertEqual(sum(list(range(2,8))), 27)
self.assertEqual(sum(iter(list(range(2,8)))), 27)
self.assertEqual(sum(Squares(10)), 285)
self.assertEqual(sum(iter(Squares(10))), 285)
self.assertEqual(sum([[1], [2], [3]], []), [1, 2, 3])
self.assertRaises(TypeError, sum)
self.assertRaises(TypeError, sum, 42)
self.assertRaises(TypeError, sum, ['a', 'b', 'c'])
self.assertRaises(TypeError, sum, ['a', 'b', 'c'], '')
self.assertRaises(TypeError, sum, [b'a', b'c'], b'')
values = [bytearray(b'a'), bytearray(b'b')]
self.assertRaises(TypeError, sum, values, bytearray(b''))
self.assertRaises(TypeError, sum, [[1], [2], [3]])
self.assertRaises(TypeError, sum, [{2:3}])
self.assertRaises(TypeError, sum, [{2:3}]*2, {2:3})
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, sum, BadSeq())
empty = []
sum(([x] for x in range(10)), empty)
self.assertEqual(empty, [])
def test_type(self):
self.assertEqual(type(''), type('123'))
self.assertNotEqual(type(''), type(()))
# We don't want self in vars(), so these are static methods
@staticmethod
def get_vars_f0():
return vars()
@staticmethod
def get_vars_f2():
BuiltinTest.get_vars_f0()
a = 1
b = 2
return vars()
class C_get_vars(object):
def getDict(self):
return {'a':2}
__dict__ = property(fget=getDict)
def test_vars(self):
self.assertEqual(set(vars()), set(dir()))
self.assertEqual(set(vars(sys)), set(dir(sys)))
self.assertEqual(self.get_vars_f0(), {})
self.assertEqual(self.get_vars_f2(), {'a': 1, 'b': 2})
self.assertRaises(TypeError, vars, 42, 42)
self.assertRaises(TypeError, vars, 42)
self.assertEqual(vars(self.C_get_vars()), {'a':2})
def test_zip(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
self.assertEqual(list(zip(a, b)), t)
b = [4, 5, 6]
self.assertEqual(list(zip(a, b)), t)
b = (4, 5, 6, 7)
self.assertEqual(list(zip(a, b)), t)
class I:
def __getitem__(self, i):
if i < 0 or i > 2: raise IndexError
return i + 4
self.assertEqual(list(zip(a, I())), t)
self.assertEqual(list(zip()), [])
self.assertEqual(list(zip(*[])), [])
self.assertRaises(TypeError, zip, None)
class G:
pass
self.assertRaises(TypeError, zip, a, G())
self.assertRaises(RuntimeError, zip, a, TestFailingIter())
# Make sure zip doesn't try to allocate a billion elements for the
# result list when one of its arguments doesn't say how long it is.
# A MemoryError is the most likely failure mode.
class SequenceWithoutALength:
def __getitem__(self, i):
if i == 5:
raise IndexError
else:
return i
self.assertEqual(
list(zip(SequenceWithoutALength(), range(2**30))),
list(enumerate(range(5)))
)
class BadSeq:
def __getitem__(self, i):
if i == 5:
raise ValueError
else:
return i
self.assertRaises(ValueError, list, zip(BadSeq(), BadSeq()))
def test_zip_pickle(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z1 = zip(a, b)
self.check_iter_pickle(z1, t, proto)
def test_format(self):
# Test the basic machinery of the format() builtin. Don't test
# the specifics of the various formatters
self.assertEqual(format(3, ''), '3')
# Returns some classes to use for various tests. There's
# an old-style version, and a new-style version
def classes_new():
class A(object):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromA(A):
pass
class Simple(object): pass
class DerivedFromSimple(Simple):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromSimple2(DerivedFromSimple): pass
return A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2
def class_test(A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2):
self.assertEqual(format(A(3), 'spec'), '3spec')
self.assertEqual(format(DerivedFromA(4), 'spec'), '4spec')
self.assertEqual(format(DerivedFromSimple(5), 'abc'), '5abc')
self.assertEqual(format(DerivedFromSimple2(10), 'abcdef'),
'10abcdef')
class_test(*classes_new())
def empty_format_spec(value):
# test that:
# format(x, '') == str(x)
# format(x) == str(x)
self.assertEqual(format(value, ""), str(value))
self.assertEqual(format(value), str(value))
# for builtin types, format(x, "") == str(x)
empty_format_spec(17**13)
empty_format_spec(1.0)
empty_format_spec(3.1415e104)
empty_format_spec(-3.1415e104)
empty_format_spec(3.1415e-104)
empty_format_spec(-3.1415e-104)
empty_format_spec(object)
empty_format_spec(None)
# TypeError because self.__format__ returns the wrong type
class BadFormatResult:
def __format__(self, format_spec):
return 1.0
self.assertRaises(TypeError, format, BadFormatResult(), "")
# TypeError because format_spec is not unicode or str
self.assertRaises(TypeError, format, object(), 4)
self.assertRaises(TypeError, format, object(), object())
# tests for object.__format__ really belong elsewhere, but
# there's no good place to put them
x = object().__format__('')
self.assertTrue(x.startswith('<object object at'))
# first argument to object.__format__ must be string
self.assertRaises(TypeError, object().__format__, 3)
self.assertRaises(TypeError, object().__format__, object())
self.assertRaises(TypeError, object().__format__, None)
# --------------------------------------------------------------------
# Issue #7994: object.__format__ with a non-empty format string is
# disallowed
class A:
def __format__(self, fmt_str):
return format('', fmt_str)
self.assertEqual(format(A()), '')
self.assertEqual(format(A(), ''), '')
self.assertEqual(format(A(), 's'), '')
class B:
pass
class C(object):
pass
for cls in [object, B, C]:
obj = cls()
self.assertEqual(format(obj), str(obj))
self.assertEqual(format(obj, ''), str(obj))
with self.assertRaisesRegex(TypeError,
r'\b%s\b' % re.escape(cls.__name__)):
format(obj, 's')
# --------------------------------------------------------------------
# make sure we can take a subclass of str as a format spec
class DerivedFromStr(str): pass
self.assertEqual(format(0, DerivedFromStr('10')), ' 0')
def test_bin(self):
self.assertEqual(bin(0), '0b0')
self.assertEqual(bin(1), '0b1')
self.assertEqual(bin(-1), '-0b1')
self.assertEqual(bin(2**65), '0b1' + '0' * 65)
self.assertEqual(bin(2**65-1), '0b' + '1' * 65)
self.assertEqual(bin(-(2**65)), '-0b1' + '0' * 65)
self.assertEqual(bin(-(2**65-1)), '-0b' + '1' * 65)
def test_bytearray_translate(self):
x = bytearray(b"abc")
self.assertRaises(ValueError, x.translate, b"1", 1)
self.assertRaises(TypeError, x.translate, b"1"*256, 1)
def test_construct_singletons(self):
for const in None, Ellipsis, NotImplemented:
tp = type(const)
self.assertIs(tp(), const)
self.assertRaises(TypeError, tp, 1, 2)
self.assertRaises(TypeError, tp, a=1, b=2)
@unittest.skipUnless(pty, "the pty and signal modules must be available")
class PtyTests(unittest.TestCase):
"""Tests that use a pseudo terminal to guarantee stdin and stdout are
terminals in the test environment"""
def run_child(self, child, terminal_input):
r, w = os.pipe() # Pipe test results from child back to parent
try:
pid, fd = pty.fork()
except (OSError, AttributeError) as e:
os.close(r)
os.close(w)
self.skipTest("pty.fork() raised {}".format(e))
raise
if pid == 0:
# Child
try:
# Make sure we don't get stuck if there's a problem
signal.alarm(2)
os.close(r)
with open(w, "w") as wpipe:
child(wpipe)
except:
traceback.print_exc()
finally:
# We don't want to return to unittest...
os._exit(0)
# Parent
os.close(w)
os.write(fd, terminal_input)
# Get results from the pipe
with open(r, "r") as rpipe:
lines = []
while True:
line = rpipe.readline().strip()
if line == "":
# The other end was closed => the child exited
break
lines.append(line)
# Check the result was got and corresponds to the user's terminal input
if len(lines) != 2:
# Something went wrong, try to get at stderr
# Beware of Linux raising EIO when the slave is closed
child_output = bytearray()
while True:
try:
chunk = os.read(fd, 3000)
except OSError: # Assume EIO
break
if not chunk:
break
child_output.extend(chunk)
os.close(fd)
child_output = child_output.decode("ascii", "ignore")
self.fail("got %d lines in pipe but expected 2, child output was:\n%s"
% (len(lines), child_output))
os.close(fd)
return lines
def check_input_tty(self, prompt, terminal_input, stdio_encoding=None):
if not sys.stdin.isatty() or not sys.stdout.isatty():
self.skipTest("stdin and stdout must be ttys")
def child(wpipe):
# Check the error handlers are accounted for
if stdio_encoding:
sys.stdin = io.TextIOWrapper(sys.stdin.detach(),
encoding=stdio_encoding,
errors='surrogateescape')
sys.stdout = io.TextIOWrapper(sys.stdout.detach(),
encoding=stdio_encoding,
errors='replace')
print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe)
print(ascii(input(prompt)), file=wpipe)
lines = self.run_child(child, terminal_input + b"\r\n")
# Check we did exercise the GNU readline path
self.assertIn(lines[0], {'tty = True', 'tty = False'})
if lines[0] != 'tty = True':
self.skipTest("standard IO in should have been a tty")
input_result = eval(lines[1]) # ascii() -> eval() roundtrip
if stdio_encoding:
expected = terminal_input.decode(stdio_encoding, 'surrogateescape')
else:
expected = terminal_input.decode(sys.stdin.encoding) # what else?
self.assertEqual(input_result, expected)
def test_input_tty(self):
# Test input() functionality when wired to a tty (the code path
# is different and invokes GNU readline if available).
self.check_input_tty("prompt", b"quux")
def test_input_tty_non_ascii(self):
# Check stdin/stdout encoding is used when invoking GNU readline
self.check_input_tty("prompté", b"quux\xe9", "utf-8")
def test_input_tty_non_ascii_unicode_errors(self):
# Check stdin/stdout error handler is used when invoking GNU readline
self.check_input_tty("prompté", b"quux\xe9", "ascii")
def test_input_no_stdout_fileno(self):
# Issue #24402: If stdin is the original terminal but stdout.fileno()
# fails, do not use the original stdout file descriptor
def child(wpipe):
print("stdin.isatty():", sys.stdin.isatty(), file=wpipe)
sys.stdout = io.StringIO() # Does not support fileno()
input("prompt")
print("captured:", ascii(sys.stdout.getvalue()), file=wpipe)
lines = self.run_child(child, b"quux\r")
expected = (
"stdin.isatty(): True",
"captured: 'prompt'",
)
self.assertSequenceEqual(lines, expected)
class TestSorted(unittest.TestCase):
def test_basic(self):
data = list(range(100))
copy = data[:]
random.shuffle(copy)
self.assertEqual(data, sorted(copy))
self.assertNotEqual(data, copy)
data.reverse()
random.shuffle(copy)
self.assertEqual(data, sorted(copy, key=lambda x: -x))
self.assertNotEqual(data, copy)
random.shuffle(copy)
self.assertEqual(data, sorted(copy, reverse=1))
self.assertNotEqual(data, copy)
def test_inputtypes(self):
s = 'abracadabra'
types = [list, tuple, str]
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
s = ''.join(set(s)) # unique letters only
types = [str, set, frozenset, list, tuple, dict.fromkeys]
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, sorted, data, None, lambda x,y: 0)
class ShutdownTest(unittest.TestCase):
def test_cleanup(self):
# Issue #19255: builtins are still available at shutdown
code = """if 1:
import builtins
import sys
class C:
def __del__(self):
print("before")
# Check that builtins still exist
len(())
print("after")
c = C()
# Make this module survive until builtins and sys are cleaned
builtins.here = sys.modules[__name__]
sys.here = sys.modules[__name__]
# Create a reference loop so that this module needs to go
# through a GC phase.
here = sys.modules[__name__]
"""
# Issue #20599: Force ASCII encoding to get a codec implemented in C,
# otherwise the codec may be unloaded before C.__del__() is called, and
# so print("before") fails because the codec cannot be used to encode
# "before" to sys.stdout.encoding. For example, on Windows,
# sys.stdout.encoding is the OEM code page and these code pages are
# implemented in Python
rc, out, err = assert_python_ok("-c", code,
PYTHONIOENCODING="ascii")
self.assertEqual(["before", "after"], out.decode().splitlines())
class TestType(unittest.TestCase):
def test_new_type(self):
A = type('A', (), {})
self.assertEqual(A.__name__, 'A')
self.assertEqual(A.__qualname__, 'A')
self.assertEqual(A.__module__, __name__)
self.assertEqual(A.__bases__, (object,))
self.assertIs(A.__base__, object)
x = A()
self.assertIs(type(x), A)
self.assertIs(x.__class__, A)
class B:
def ham(self):
return 'ham%d' % self
C = type('C', (B, int), {'spam': lambda self: 'spam%s' % self})
self.assertEqual(C.__name__, 'C')
self.assertEqual(C.__qualname__, 'C')
self.assertEqual(C.__module__, __name__)
self.assertEqual(C.__bases__, (B, int))
self.assertIs(C.__base__, int)
self.assertIn('spam', C.__dict__)
self.assertNotIn('ham', C.__dict__)
x = C(42)
self.assertEqual(x, 42)
self.assertIs(type(x), C)
self.assertIs(x.__class__, C)
self.assertEqual(x.ham(), 'ham42')
self.assertEqual(x.spam(), 'spam42')
self.assertEqual(x.to_bytes(2, 'little'), b'\x2a\x00')
def test_type_nokwargs(self):
with self.assertRaises(TypeError):
type('a', (), {}, x=5)
with self.assertRaises(TypeError):
type('a', (), dict={})
def test_type_name(self):
for name in 'A', '\xc4', '\U0001f40d', 'B.A', '42', '':
with self.subTest(name=name):
A = type(name, (), {})
self.assertEqual(A.__name__, name)
self.assertEqual(A.__qualname__, name)
self.assertEqual(A.__module__, __name__)
with self.assertRaises(ValueError):
type('A\x00B', (), {})
with self.assertRaises(ValueError):
type('A\udcdcB', (), {})
with self.assertRaises(TypeError):
type(b'A', (), {})
C = type('C', (), {})
for name in 'A', '\xc4', '\U0001f40d', 'B.A', '42', '':
with self.subTest(name=name):
C.__name__ = name
self.assertEqual(C.__name__, name)
self.assertEqual(C.__qualname__, 'C')
self.assertEqual(C.__module__, __name__)
A = type('C', (), {})
with self.assertRaises(ValueError):
A.__name__ = 'A\x00B'
self.assertEqual(A.__name__, 'C')
with self.assertRaises(ValueError):
A.__name__ = 'A\udcdcB'
self.assertEqual(A.__name__, 'C')
with self.assertRaises(TypeError):
A.__name__ = b'A'
self.assertEqual(A.__name__, 'C')
def test_type_qualname(self):
A = type('A', (), {'__qualname__': 'B.C'})
self.assertEqual(A.__name__, 'A')
self.assertEqual(A.__qualname__, 'B.C')
self.assertEqual(A.__module__, __name__)
with self.assertRaises(TypeError):
type('A', (), {'__qualname__': b'B'})
self.assertEqual(A.__qualname__, 'B.C')
A.__qualname__ = 'D.E'
self.assertEqual(A.__name__, 'A')
self.assertEqual(A.__qualname__, 'D.E')
with self.assertRaises(TypeError):
A.__qualname__ = b'B'
self.assertEqual(A.__qualname__, 'D.E')
def test_type_doc(self):
for doc in 'x', '\xc4', '\U0001f40d', 'x\x00y', b'x', 42, None:
A = type('A', (), {'__doc__': doc})
self.assertEqual(A.__doc__, doc)
with self.assertRaises(UnicodeEncodeError):
type('A', (), {'__doc__': 'x\udcdcy'})
A = type('A', (), {})
self.assertEqual(A.__doc__, None)
for doc in 'x', '\xc4', '\U0001f40d', 'x\x00y', 'x\udcdcy', b'x', 42, None:
A.__doc__ = doc
self.assertEqual(A.__doc__, doc)
def test_bad_args(self):
with self.assertRaises(TypeError):
type()
with self.assertRaises(TypeError):
type('A', ())
with self.assertRaises(TypeError):
type('A', (), {}, ())
with self.assertRaises(TypeError):
type('A', (), dict={})
with self.assertRaises(TypeError):
type('A', [], {})
with self.assertRaises(TypeError):
type('A', (), types.MappingProxyType({}))
with self.assertRaises(TypeError):
type('A', (None,), {})
with self.assertRaises(TypeError):
type('A', (bool,), {})
with self.assertRaises(TypeError):
type('A', (int, str), {})
def test_bad_slots(self):
with self.assertRaises(TypeError):
type('A', (), {'__slots__': b'x'})
with self.assertRaises(TypeError):
type('A', (int,), {'__slots__': 'x'})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': ''})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': '42'})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': 'x\x00y'})
with self.assertRaises(ValueError):
type('A', (), {'__slots__': 'x', 'x': 0})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': ('__dict__', '__dict__')})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': ('__weakref__', '__weakref__')})
class B:
pass
with self.assertRaises(TypeError):
type('A', (B,), {'__slots__': '__dict__'})
with self.assertRaises(TypeError):
type('A', (B,), {'__slots__': '__weakref__'})
def load_tests(loader, tests, pattern):
from doctest import DocTestSuite
tests.addTest(DocTestSuite(builtins))
return tests
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "2424e5aca743611cb4e1761bf5587dde",
"timestamp": "",
"source": "github",
"line_count": 1829,
"max_line_length": 105,
"avg_line_length": 36.09021323127392,
"alnum_prop": 0.5253071550849127,
"repo_name": "anbangleo/NlsdeWeb",
"id": "a792099f10a48ee79e38e7ec15de3446791453f5",
"size": "66053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python-3.6.0/Lib/test/test_builtin.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "593890"
},
{
"name": "Batchfile",
"bytes": "44928"
},
{
"name": "C",
"bytes": "16572509"
},
{
"name": "C++",
"bytes": "442285"
},
{
"name": "CSS",
"bytes": "8574"
},
{
"name": "CoffeeScript",
"bytes": "45748"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "DTrace",
"bytes": "2053"
},
{
"name": "HTML",
"bytes": "259587"
},
{
"name": "JavaScript",
"bytes": "87380"
},
{
"name": "M4",
"bytes": "231072"
},
{
"name": "Makefile",
"bytes": "278201"
},
{
"name": "Objective-C",
"bytes": "26739"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Python",
"bytes": "26396189"
},
{
"name": "Roff",
"bytes": "254982"
},
{
"name": "Shell",
"bytes": "495563"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "70"
}
],
"symlink_target": ""
}
|
import typing as t
from .scope_definition import MutableScope
ScopeBuilderScopes = t.Union[
None, str, t.Tuple[str, str], t.List[t.Union[str, t.Tuple[str, str]]]
]
class ScopeBuilder:
"""
Utility class for creating scope strings for a specified resource server.
:param resource_server: The identifier, usually a domain name or a UUID, for the
resource server to return scopes for.
:type resource_server: str
:param known_scopes: A list of scope names to pre-populate on this instance. This
will set attributes on the instance using the URN scope format.
:type known_scopes: list of str, optional
:param known_url_scopes: A list of scope names to pre-populate on this instance.
This will set attributes on the instance using the URL scope format.
:type known_url_scopes: list of str, optional
"""
_classattr_scope_names: t.List[str] = []
def __init__(
self,
resource_server: str,
*,
known_scopes: ScopeBuilderScopes = None,
known_url_scopes: ScopeBuilderScopes = None,
) -> None:
self.resource_server = resource_server
self._registered_scope_names: t.List[str] = []
self._register_scopes(known_scopes, self.urn_scope_string)
self._register_scopes(known_url_scopes, self.url_scope_string)
def _register_scopes(
self, scopes: ScopeBuilderScopes, transform_func: t.Callable[[str], str]
) -> None:
scopes_dict = self._scopes_input_to_dict(scopes)
for scope_name, scope_val in scopes_dict.items():
self._registered_scope_names.append(scope_name)
setattr(self, scope_name, transform_func(scope_val))
def _scopes_input_to_dict(self, items: ScopeBuilderScopes) -> t.Dict[str, str]:
"""
ScopeBuilders accepts many collection-style types of scopes. This function
normalizes all of those types into a standard {scope_name: scope_val} dict
Translation Map:
None => {}
"my-str" => {"my-str": "my-str"}
["my-list"] => {"my-list": "my-list"}
("my-tuple-key", "my-tuple-val") => {"my-tuple-key": "my-tuple-val"}
"""
if items is None:
return {}
elif isinstance(items, str):
return {items: items}
elif isinstance(items, tuple):
return {items[0]: items[1]}
else:
items_dict = {}
for item in items:
if isinstance(item, str):
items_dict[item] = item
else:
items_dict[item[0]] = item[1]
return items_dict
@property
def scope_names(self) -> t.List[str]:
return self._classattr_scope_names + self._registered_scope_names
# custom __getattr__ instructs `mypy` that unknown attributes of a ScopeBuilder are
# of type `str`, allowing for dynamic attribute names
# to test, try creating a module with
#
# from globus_sdk.scopes import TransferScopes
# x = TransferScopes.all
#
# without this method, the assignment to `x` would fail type checking
# because `all` is unknown to mypy
#
# note that the implementation just raises AttributeError; this is okay because
# __getattr__ is only called as a last resort, when __getattribute__ has failed
# normal attribute access will not be disrupted
def __getattr__(self, name: str) -> str:
raise AttributeError(f"Unrecognized Attribute '{name}'")
def urn_scope_string(self, scope_name: str) -> str:
"""
Return a complete string representing the scope with a given name for this
client, in the Globus Auth URN format.
Note that this module already provides many such scope strings for use with
Globus services.
**Examples**
>>> sb = ScopeBuilder("transfer.api.globus.org")
>>> sb.urn_scope_string("transfer.api.globus.org", "all")
"urn:globus:auth:scope:transfer.api.globus.org:all"
:param scope_name: The short name for the scope involved.
:type scope_name: str
"""
return f"urn:globus:auth:scope:{self.resource_server}:{scope_name}"
def url_scope_string(self, scope_name: str) -> str:
"""
Return a complete string representing the scope with a given name for this
client, in URL format.
**Examples**
>>> sb = ScopeBuilder("actions.globus.org")
>>> sb.url_scope_string("actions.globus.org", "hello_world")
"https://auth.globus.org/scopes/actions.globus.org/hello_world"
:param scope_name: The short name for the scope involved.
:type scope_name: str
"""
return f"https://auth.globus.org/scopes/{self.resource_server}/{scope_name}"
def make_mutable(self, scope: str, *, optional: bool = False) -> MutableScope:
"""
For a given scope, create a MutableScope object.
The ``scope`` name given refers to the name of a scope attached to the
ScopeBuilder. It is given by attribute name, not by the full scope string.
**Examples**
Using the ``TransferScopes`` object, one could reference ``all`` as follows:
>>> TransferScopes.all
'urn:globus:auth:scope:transfer.api.globus.org:all'
>>> TransferScopes.make_mutable("all")
Scope('urn:globus:auth:scope:transfer.api.globus.org:all')
This is equivalent to constructing a Scope object from the resolved
scope string, as in
>>> Scope(TransferScopes.all)
Scope('urn:globus:auth:scope:transfer.api.globus.org:all')
:param scope: The name of the scope to convert to a MutableScope
:type scope: str
:param optional: If true, the created MutableScope object will be marked
optional
:type optional: bool
"""
return MutableScope(getattr(self, scope), optional=optional)
def __str__(self) -> str:
return f"ScopeBuilder[{self.resource_server}]\n" + "\n".join(
f" {name}:\n {getattr(self, name)}" for name in self.scope_names
)
|
{
"content_hash": "f19e838146176b3549b88987120ccee9",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 87,
"avg_line_length": 38.03086419753087,
"alnum_prop": 0.6231131309852297,
"repo_name": "globus/globus-sdk-python",
"id": "359dbc124f759d5051796c18349c37b727f02448",
"size": "6161",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/globus_sdk/scopes/builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "303"
},
{
"name": "Makefile",
"bytes": "810"
},
{
"name": "Python",
"bytes": "896256"
},
{
"name": "Shell",
"bytes": "125"
}
],
"symlink_target": ""
}
|
import json, hmac, hashlib, time, requests, base64
from requests.auth import AuthBase
from abc import ABCMeta, abstractmethod
class exchange(object):
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def place_order(self):
pass
class cb_exchange_sim(exchange):
"""Class used for backtesting"""
def __init__(self, start_btc, start_usd):
self.start_usd = start_usd
self.start_btc = start_btc
self.btc_bal = start_btc
self.usd_bal = start_usd
self.fee_percent = 0.25
self.times_bought = 0
self.times_sold = 0
self.api_url = 'https://api.exchange.coinbase.com/'
def get_historic_rates(self, start_time, end_time, granularity, product_id='BTC-USD'):
"""Get a historic rates for a product. Rates are returned in grouped
buckets based on requested granularity"""
params= {
'start': start_time,
'end': end_time,
'granularity': granularity
}
r = requests.get(self.api_url + 'products/' + product_id + '/candles', params=params)
rDict = r.json()
return rDict
def get_last_trade(self, product_id):
"""Get snapshot information about the last trade (tick)"""
r = requests.get(self.api_url + 'products/' + product_id + '/ticker')
rDict = r.json()
return rDict
def place_order(self, price, size, side, product_id, historic_timeslice=None):
"""Place an order
For buy orders, we will hold price x size x (1 + fee-percent) USD. For sell orders,
we will hold the number of Bitcoin you wish to sell. Actual fees are assessed at time of trade.
If you cancel a partially filled or unfilled order, any remaining funds will be released from hold.
"""
order = {
'size': size,
'price': price,
'side': side,
'product_id': product_id,
}
size = float(size)
currprice = float(self.get_last_trade(product_id).get("price"))
if historic_timeslice is not None:
currprice = float(historic_timeslice[4])
if side == "buy":
if self.usd_bal >= (size*price):
if currprice <= price:
self.usd_bal -= (size*currprice)
self.btc_bal += size
self.times_bought += 1
print("Buy of: "+str(size)+" BTC made at price: "+str(currprice))
else:
# TODO: put buy in holds
pass
else:
print("Insufficient funds")
elif side == "sell":
if self.btc_bal >= size:
if currprice >= price:
self.usd_bal += (size*currprice)
self.btc_bal -= size
self.times_sold += 1
print("Sell of: "+str(size)+" BTC made at price: "+str(currprice))
else:
# TODO: put sell in holds
pass
else:
print("Insufficient funds")
return
# Create custom authentication for Exchange
class CoinbaseExchangeAuth(AuthBase):
def __init__(self, api_key, secret_key, passphrase):
self.api_key = api_key
self.secret_key = secret_key
self.passphrase = passphrase
def __call__(self, request):
timestamp = str(time.time())
message = timestamp + request.method + request.path_url + (request.body or '')
message = message.encode('ascii')
hmac_key = base64.b64decode(self.secret_key)
signature = hmac.new(hmac_key, message, hashlib.sha256)
signature_b64 = base64.b64encode(signature.digest())
request.headers.update({
'CB-ACCESS-SIGN': signature_b64,
'CB-ACCESS-TIMESTAMP': timestamp,
'CB-ACCESS-KEY': self.api_key,
'CB-ACCESS-PASSPHRASE': self.passphrase,
})
return request
class cb_exchange(exchange):
"""CoinbaseExchange API Wrapper"""
def __init__(self, key=None, secret=None, password=None):
self.key=key
self.secret=secret
self.password=password
self.auth = None
if password is not None:
self.auth = CoinbaseExchangeAuth(key, secret, password)
self.api_url = 'https://api.exchange.coinbase.com/'
def list_accounts(self):
"""Get a list of trading accounts"""
r = requests.get(self.api_url + 'accounts', auth=self.auth)
rDict = r.json()
return rDict
def get_account(self, account_id):
"""Get information for a single account"""
r = requests.get(self.api_url + 'accounts/' + account_id, auth=self.auth)
rDict = r.json()
return rDict
def get_account_history(self, account_id):
"""List account activity. Items are paginated and sorted latest first."""
r = requests.get(self.api_url + 'accounts/' + account_id + '/ledger', auth=self.auth)
rDict = r.json()
return rDict
def get_holds(self, account_id):
"""Holds are placed on an account for any active orders. As an order
is filled, the hold amount is updated. If ad order is canceled, any
remaining hold is removed."""
r = requests.get(self.api_url + 'accounts/' + account_id + '/holds', auth=self.auth)
rDict = r.json()
return rDict
def place_order(self, price, size, side, product_id):
"""Place an order"""
order = {
'size': size,
'price': price,
'side': side,
'product_id': product_id,
}
r = requests.post(self.api_url + 'orders', params=order, auth=self.auth)
rDict = r.json()
return rDict
def cancel_order(self, order_id):
"""Cancel an order"""
r = requests.delete(self.api_url + 'orders/' + order_id, auth=self.auth)
rDict = r.json()
return rDict
def list_open_orders(self):
"""List currently open orders"""
r = requests.get(self.api_url + 'orders', auth=self.auth)
rDict = r.json()
return rDict
def get_order(self, order_id):
"""Get a single order"""
r = requests.get(self.api_url + 'orders/' + order_id, auth=self.auth)
rDict = json.loa(r.json())
return rDict
def list_fills(self):
"""Get a list of recent fills"""
r = requests.get(self.api_url + 'fills', auth=self.auth)
rDict = r.json()
return rDict
def transfer(self):
"""Move funds to/from Coinbase Exchange and Coinbase account"""
r = requests.post(self.api_url + 'transfers', auth=self.auth)
rDict = r.json()
return rDict
# Unauthenticated endpoints below
def get_products(self):
"""Get a list of available currency pairs for trading"""
r = requests.get(self.api_url + 'products')
rDict = r.json()
return rDict
def get_order_book(self, product_id, level=1):
"""Get a list of open orders for a product. Amount of detail
shown can be customized with the level parameter."""
params= {
'level': level,
}
r = requests.get(self.api_url + 'products/' + product_id + '/book', params=params)
rDict = r.json()
return rDict
def get_last_trade(self, product_id):
"""Get snapshot information about the last trade (tick)"""
r = requests.get(self.api_url + 'products/' + product_id + '/ticker')
rDict = r.json()
return rDict
def get_trades(self, product_id):
"""Get a paginated list of latest trades for a product"""
r = requests.get(self.api_url + 'products/' + product_id + '/trades')
rDict = r.json()
return rDict
def get_historic_rates(self, start_time, end_time, granularity, product_id='BTC-USD'):
"""Get a historic rates for a product. Rates are returned in grouped
buckets based on requested granularity"""
params= {
'start': start_time,
'end': end_time,
'granularity': granularity
}
r = requests.get(self.api_url + 'products/' + product_id + '/candles', params=params)
rDict = r.json()
return rDict
def get_24_hour_stats(self, product_id):
"""Get 24 hour stats for the product"""
r = requests.get(self.api_url + 'products/' + product_id + '/stats')
rDict = r.json()
return rDict
def get_currencies(self):
"""Get a list of known currencies"""
r = requests.get(self.api_url + 'currencies')
rDict = r.json()
return rDict
def get_time(self):
"""Get the API server time"""
r = requests.get(self.api_url + 'time')
rDict = r.json()
return rDict
|
{
"content_hash": "71ba065f259eeb986a5ca6b93eb2e421",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 107,
"avg_line_length": 35.42857142857143,
"alnum_prop": 0.5647401433691757,
"repo_name": "ehickox/bitraider",
"id": "111bb9113261b64bedffee0cb7d9ce7c95272dcf",
"size": "8928",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bitraider/exchange.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7244"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "51319"
}
],
"symlink_target": ""
}
|
default_app_config = 'sample.config.SiteConfig'
|
{
"content_hash": "66d7ab5b21c1ba3ef391fd3de58cd942",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 47,
"avg_line_length": 48,
"alnum_prop": 0.7916666666666666,
"repo_name": "altio/foundation",
"id": "1f8edd2834109d73df0d5c8587b1ee15d8a15c21",
"size": "48",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "sample/sample/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1700"
},
{
"name": "HTML",
"bytes": "60043"
},
{
"name": "JavaScript",
"bytes": "6017"
},
{
"name": "Python",
"bytes": "223531"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('trees', '0010_treephoto_license'),
]
operations = [
migrations.AddField(
model_name='supplementalcontent',
name='author',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
{
"content_hash": "1436641efb00688cb6950f93d72a993e",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 26.095238095238095,
"alnum_prop": 0.6405109489051095,
"repo_name": "mattblair/pdx-trees-django",
"id": "eb7104abf3196a44376e214092221021afcc79b1",
"size": "572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trees/migrations/0011_supplementalcontent_author.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162775"
},
{
"name": "HTML",
"bytes": "19273"
},
{
"name": "Python",
"bytes": "75257"
}
],
"symlink_target": ""
}
|
import asyncio
import unittest
from couchbase.experimental import enable; enable()
from fixtures import asynct, AioTestCase
from couchbase.n1ql import N1QLQuery
class CouchbaseBeerTest(AioTestCase):
def make_connection(self):
try:
return super().make_connection(bucket='beer-sample')
except CouchbaseError:
raise SkipTest("Need 'beer-sample' bucket for this")
@asynct
@asyncio.coroutine
def test_get_data(self):
beer_bucket = self.cb
yield from (beer_bucket.connect() or asyncio.sleep(0.01))
data = yield from beer_bucket.get('21st_amendment_brewery_cafe')
self.assertEqual("21st Amendment Brewery Cafe", data.value["name"])
@asynct
@asyncio.coroutine
def test_query(self):
beer_bucket = self.cb
yield from (beer_bucket.connect() or asyncio.sleep(0.01))
viewiter = beer_bucket.query("beer", "brewery_beers", limit=10)
yield from viewiter.future
count = len(list(viewiter))
self.assertEqual(count, 10)
class CouchbaseDefaultTest(AioTestCase):
@asynct
@asyncio.coroutine
def test_upsert(self):
import uuid
expected = str(uuid.uuid4())
default_bucket = self.cb
yield from (default_bucket.connect() or asyncio.sleep(0.01))
yield from default_bucket.upsert('hello', {"key": expected})
obtained = yield from default_bucket.get('hello')
self.assertEqual({"key": expected}, obtained.value)
@asynct
@asyncio.coroutine
def test_n1ql(self):
default_bucket = self.cb
yield from (default_bucket.connect() or asyncio.sleep(0.01))
q = N1QLQuery("SELECT mockrow")
it = default_bucket.n1ql_query(q)
yield from it.future
data = list(it)
self.assertEqual('value', data[0]['row'])
|
{
"content_hash": "782f666cd2a4b313b190d4fd2d36b416",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 75,
"avg_line_length": 25.958333333333332,
"alnum_prop": 0.6420545746388443,
"repo_name": "mnunberg/couchbase-python-client",
"id": "fc688672ca71cf73c3e305378b352e473a1e4a13",
"size": "1869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acouchbase/tests/py34only.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "261401"
},
{
"name": "Python",
"bytes": "428174"
}
],
"symlink_target": ""
}
|
import os
import sys
import periodicals
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = periodicals.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-periodicals',
version=version,
description='Django app to create periodical/magazine websites"',
long_description=readme + '\n\n' + history,
author='Steve Schwarz',
author_email='steve@agilitynerd.com',
url='https://github.com/saschwarz/django-periodicals',
packages=[
'periodicals',
],
include_package_data=True,
install_requires=[
'django-autoslug',
'django-haystack'
],
license="BSD",
zip_safe=False,
keywords='django-periodicals',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
],
)
|
{
"content_hash": "563312cd263efb712dad8d035a9bb356",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 69,
"avg_line_length": 28.555555555555557,
"alnum_prop": 0.6160830090791181,
"repo_name": "saschwarz/django-periodicals",
"id": "3966d2e06163905fcfeca2ac6a09cdfafa4a1d2d",
"size": "1589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3125"
},
{
"name": "HTML",
"bytes": "39069"
},
{
"name": "Python",
"bytes": "83195"
}
],
"symlink_target": ""
}
|
"""Converts MNIST data to TFRecords file format with Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import mnist
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' # MNIST filenames
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
tf.app.flags.DEFINE_string('directory', '/tmp/data',
'Directory to download data files and write the '
'converted result')
tf.app.flags.DEFINE_integer('validation_size', 5000,
'Number of examples to separate from the training '
'data for the validation set.')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to(data_set, name):
images = data_set.images
labels = data_set.labels
num_examples = data_set.num_examples
if images.shape[0] != num_examples:
raise ValueError('Images size %d does not match label size %d.' %
(images.shape[0], num_examples))
rows = images.shape[1]
cols = images.shape[2]
depth = images.shape[3]
filename = os.path.join(FLAGS.directory, name + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
image_raw = images[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(rows),
'width': _int64_feature(cols),
'depth': _int64_feature(depth),
'label': _int64_feature(int(labels[index])),
'image_raw': _bytes_feature(image_raw)}))
writer.write(example.SerializeToString())
writer.close()
def main(argv):
# Get the data.
data_sets = mnist.read_data_sets(FLAGS.directory,
dtype=tf.uint8,
reshape=False)
# Convert to Examples and write the result to TFRecords.
convert_to(data_sets.train, 'train')
convert_to(data_sets.validation, 'validation')
convert_to(data_sets.test, 'test')
if __name__ == '__main__':
tf.app.run()
|
{
"content_hash": "d6613e9828358f9dc31befe2ac181fae",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 32.4025974025974,
"alnum_prop": 0.6509018036072144,
"repo_name": "HaebinShin/tensorflow",
"id": "2e3035731ad435fcb11d08adb423b73f5fbd52cc",
"size": "3185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/examples/how_tos/reading_data/convert_to_records.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "176349"
},
{
"name": "C++",
"bytes": "10558866"
},
{
"name": "CMake",
"bytes": "34638"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "GCC Machine Description",
"bytes": "2"
},
{
"name": "HTML",
"bytes": "865714"
},
{
"name": "Java",
"bytes": "41615"
},
{
"name": "JavaScript",
"bytes": "10609"
},
{
"name": "Jupyter Notebook",
"bytes": "1773504"
},
{
"name": "Makefile",
"bytes": "20930"
},
{
"name": "Objective-C",
"bytes": "5332"
},
{
"name": "Objective-C++",
"bytes": "45677"
},
{
"name": "Protocol Buffer",
"bytes": "118214"
},
{
"name": "Python",
"bytes": "8858431"
},
{
"name": "Shell",
"bytes": "234426"
},
{
"name": "TypeScript",
"bytes": "428153"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name='BatchNotebook',
version='0.0.3',
packages=['batch_notebook'],
license='BSD-new license',
description='Tools for running an IPython notebook in batch mode.',
long_description=open('README.rst').read(),
author='John Bjorn Nelson',
author_email='jbn@pathdependent.com',
url='https://github.com/jbn/BatchNotebook',
install_requires=[
'ipython >= 1.1.0'
],
scripts=['bin/run_ipython_script.py'],
)
|
{
"content_hash": "5094b7d954c5d2dac3cafce32ffc3a0c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 29.176470588235293,
"alnum_prop": 0.6532258064516129,
"repo_name": "jbn/BatchNotebook",
"id": "b7d98efbcd66f36f9fe1c2a9c6d2cd9f36814db0",
"size": "496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6974"
}
],
"symlink_target": ""
}
|
"""
kombu.exceptions
================
Exceptions.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
class NotBoundError(Exception):
"""Trying to call channel dependent method on unbound entity."""
pass
class MessageStateError(Exception):
"""The message has already been acknowledged."""
pass
class TimeoutError(Exception):
"""Operation timed out."""
pass
class LimitExceeded(Exception):
"""Limit exceeded."""
pass
class ConnectionLimitExceeded(LimitExceeded):
"""Maximum number of simultaneous connections exceeded."""
pass
class ChannelLimitExceeded(LimitExceeded):
"""Maximum number of simultaenous channels exceeded."""
pass
class StdChannelError(Exception):
pass
class VersionMismatch(Exception):
pass
|
{
"content_hash": "e2f51600a8f22df7121e6ab113080d98",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 68,
"avg_line_length": 17.125,
"alnum_prop": 0.694647201946472,
"repo_name": "WoLpH/kombu",
"id": "bac4ac8688fa93853d338834021d026b16001e3c",
"size": "822",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kombu/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "365468"
},
{
"name": "Shell",
"bytes": "1487"
}
],
"symlink_target": ""
}
|
import sys
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
.. note:: Experimental
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
class GaussianMixtureModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
HasProbabilityCol, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. note:: For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001,
... maxIter=10, seed=10)
>>> model = gm.fit(df)
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> summary.logLikelihood
8.14636...
>>> weights = model.weights
>>> len(weights)
3
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[4].prediction == rows[5].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
self._setDefault(k=2, tol=0.01, maxIter=100)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureSummary(ClusteringSummary):
"""
.. note:: Experimental
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Summary of KMeans.
.. versionadded:: 2.1.0
"""
pass
class KMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Return the K-means cost (sum of squared distances of points to their nearest center)
for this model on the given data.
"""
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class KMeans(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> kmeans = KMeans(k=2, seed=1)
>>> model = kmeans.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
distanceMeasure = Param(Params._dummy(), "distanceMeasure", "The distance measure. " +
"Supported options: 'euclidean' and 'cosine'.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure`
"""
return self.getOrDefault(self.distanceMeasure)
class BisectingKMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
"""
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class BisectingKMeans(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasSeed,
JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> model = bkm.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0):
"""
__init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0)
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0):
"""
setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0)
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
pass
@inherit_doc
class LDAModel(JavaModel):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by
the Expectation-Maximization ("em") `optimizer`, then this method could involve
collecting a large amount of data to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes:
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
@since("2.0.0")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. note:: Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
:return List of checkpoint files from training
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed, HasCheckpointInterval,
JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> model = lda.fit(df)
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
@keyword_only
def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currenlty only support 'em' and 'online'.
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class PowerIterationClustering(HasMaxIter, HasWeightCol, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
<a href=http://www.icml2010.org/papers/387.pdf>Lin and Cohen</a>. From the abstract:
PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. seealso:: `Wikipedia on Spectral clustering \
<http://en.wikipedia.org/wiki/Spectral_clustering>`_
>>> data = [(1, 0, 0.5), \
(2, 0, 0.5), (2, 1, 0.7), \
(3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9), \
(4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1), \
(5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight")
>>> pic = PowerIterationClustering(k=2, maxIter=40, weightCol="weight")
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |1 |
|1 |1 |
|2 |1 |
|3 |1 |
|4 |1 |
|5 |0 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
.. versionadded:: 2.4.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
:param dataset:
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
:return:
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
.. versionadded:: 2.4.0
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import pyspark.ml.clustering
from pyspark.sql import SparkSession
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
{
"content_hash": "98c485787a50271aaf8f61dbd8513a86",
"timestamp": "",
"source": "github",
"line_count": 1343,
"max_line_length": 100,
"avg_line_length": 35.79523454951601,
"alnum_prop": 0.5943877020364862,
"repo_name": "bravo-zhang/spark",
"id": "4aa1cf84b5824cb193164c24e032945c19562b1c",
"size": "48858",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyspark/ml/clustering.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "34022"
},
{
"name": "Batchfile",
"bytes": "30285"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "23956"
},
{
"name": "HTML",
"bytes": "10056"
},
{
"name": "HiveQL",
"bytes": "1823425"
},
{
"name": "Java",
"bytes": "3373672"
},
{
"name": "JavaScript",
"bytes": "144886"
},
{
"name": "Makefile",
"bytes": "7774"
},
{
"name": "PLpgSQL",
"bytes": "163419"
},
{
"name": "PowerShell",
"bytes": "3756"
},
{
"name": "Python",
"bytes": "2688843"
},
{
"name": "R",
"bytes": "1125677"
},
{
"name": "Roff",
"bytes": "20699"
},
{
"name": "SQLPL",
"bytes": "30039"
},
{
"name": "Scala",
"bytes": "26381840"
},
{
"name": "Shell",
"bytes": "188619"
},
{
"name": "Thrift",
"bytes": "33605"
},
{
"name": "q",
"bytes": "146878"
}
],
"symlink_target": ""
}
|
"""Distributed MNIST training and validation, with model replicas.
A simple softmax model with one hidden layer is defined. The parameters
(weights and biases) are located on two parameter servers (ps), while the
ops are defined on a worker node. The TF sessions also run on the worker
node.
Multiple invocations of this script can be done in parallel, with different
values for --worker_index. There should be exactly one invocation with
--worker_index, which will create a master session that carries out variable
initialization. The other, non-master, sessions will wait for the master
session to finish the initialization before proceeding to the training stage.
The coordination between the multiple worker invocations occurs due to
the definition of the parameters on the same ps devices. The parameter updates
from one worker is visible to all other workers. As such, the workers can
perform forward computation and gradient calculation in parallel, which
should lead to increased training speed for the simple model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import tempfile
import time
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
flags = tf.app.flags
flags.DEFINE_string("data_dir", "/tmp/mnist-data",
"Directory for storing mnist data")
flags.DEFINE_boolean("download_only", False,
"Only perform downloading of data; Do not proceed to "
"session preparation, model definition or training")
flags.DEFINE_integer("worker_index", 0,
"Worker task index, should be >= 0. worker_index=0 is "
"the master worker task the performs the variable "
"initialization ")
flags.DEFINE_string("ps_hosts", "",
"Comma-separated list of hostname:port pairs")
flags.DEFINE_string("worker_hosts", "",
"Comma-separated list of hostname:port pairs")
flags.DEFINE_string("job_name", "", "One of 'ps', 'worker'")
flags.DEFINE_integer("replicas_to_aggregate", None,
"Number of replicas to aggregate before parameter update"
"is applied (For sync_replicas mode only; default: "
"num_workers)")
flags.DEFINE_integer("hidden_units", 100,
"Number of units in the hidden layer of the NN")
flags.DEFINE_integer("train_steps", 200,
"Number of (global) training steps to perform")
flags.DEFINE_integer("batch_size", 100, "Training batch size")
flags.DEFINE_float("learning_rate", 0.01, "Learning rate")
flags.DEFINE_boolean("sync_replicas", False,
"Use the sync_replicas (synchronized replicas) mode, "
"wherein the parameter updates from workers are aggregated "
"before applied to avoid stale gradients")
FLAGS = flags.FLAGS
IMAGE_PIXELS = 28
def main(unused_argv):
ps_hosts = FLAGS.ps_hosts.split(",")
worker_hosts = FLAGS.worker_hosts.split(",")
cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
server = tf.train.Server(cluster,
job_name=FLAGS.job_name,
task_index=FLAGS.worker_index)
if FLAGS.job_name == "ps":
server.join()
sys.exit(0)
mnist = read_data_sets(FLAGS.data_dir, one_hot=True)
if FLAGS.download_only:
sys.exit(0)
num_workers = len(worker_hosts)
worker_grpc_url = 'grpc://' + worker_hosts[0]
print("Worker GRPC URL: %s" % worker_grpc_url)
print("Worker index = %d" % FLAGS.worker_index)
print("Number of workers = %d" % num_workers)
is_chief = (FLAGS.worker_index == 0)
if FLAGS.sync_replicas:
if FLAGS.replicas_to_aggregate is None:
replicas_to_aggregate = num_workers
else:
replicas_to_aggregate = FLAGS.replicas_to_aggregate
# Construct device setter object
device_setter = tf.train.replica_device_setter(cluster=cluster)
# The device setter will automatically place Variables ops on separate
# parameter servers (ps). The non-Variable ops will be placed on the workers.
with tf.device(device_setter):
global_step = tf.Variable(0, name="global_step", trainable=False)
# Variables of the hidden layer
hid_w = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
stddev=1.0 / IMAGE_PIXELS), name="hid_w")
hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hid_b")
# Variables of the softmax layer
sm_w = tf.Variable(
tf.truncated_normal([FLAGS.hidden_units, 10],
stddev=1.0 / math.sqrt(FLAGS.hidden_units)),
name="sm_w")
sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
# Ops: located on the worker specified with FLAGS.worker_index
x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])
y_ = tf.placeholder(tf.float32, [None, 10])
hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
hid = tf.nn.relu(hid_lin)
y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
cross_entropy = -tf.reduce_sum(y_ *
tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
opt = tf.train.AdamOptimizer(FLAGS.learning_rate)
if FLAGS.sync_replicas:
opt = tf.train.SyncReplicasOptimizer(
opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers,
replica_id=FLAGS.worker_index,
name="mnist_sync_replicas")
train_step = opt.minimize(cross_entropy,
global_step=global_step)
if FLAGS.sync_replicas and is_chief:
# Initial token and chief queue runners required by the sync_replicas mode
chief_queue_runner = opt.get_chief_queue_runner()
init_tokens_op = opt.get_init_tokens_op()
init_op = tf.initialize_all_variables()
train_dir = tempfile.mkdtemp()
sv = tf.train.Supervisor(is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
recovery_wait_secs=1,
global_step=global_step)
sess_config = tf.ConfigProto(
device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.worker_index])
# The chief worker (worker_index==0) session will prepare the session,
# while the remaining workers will wait for the preparation to complete.
if is_chief:
print("Worker %d: Initializing session..." % FLAGS.worker_index)
else:
print("Worker %d: Waiting for session to be initialized..." %
FLAGS.worker_index)
with sv.prepare_or_wait_for_session(worker_grpc_url, config=sess_config) as sess:
print("Worker %d: Session initialization complete." % FLAGS.worker_index)
if FLAGS.sync_replicas and is_chief:
# Chief worker will start the chief queue runner and call the init op
print("Starting chief queue runner and running init_tokens_op")
sv.start_queue_runners(sess, [chief_queue_runner])
sess.run(init_tokens_op)
# Perform training
time_begin = time.time()
print("Training begins @ %f" % time_begin)
local_step = 0
step = 0
while not sv.should_stop() and step < FLAGS.train_steps:
# Training feed
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
train_feed = {x: batch_xs,
y_: batch_ys}
_, step = sess.run([train_step, global_step], feed_dict=train_feed)
local_step += 1
now = time.time()
if is_chief:
print("%f: Worker %d: training step %d done (global step: %d)" %
(now, FLAGS.worker_index, local_step, step))
sv.stop()
if is_chief:
time_end = time.time()
print("Training ends @ %f" % time_end)
training_time = time_end - time_begin
print("Training elapsed time: %f s" % training_time)
# Validation feed
val_feed = {x: mnist.validation.images,
y_: mnist.validation.labels}
val_xent = sess.run(cross_entropy, feed_dict=val_feed)
print("After %d training step(s), validation cross entropy = %g" %
(FLAGS.train_steps, val_xent))
if __name__ == "__main__":
tf.app.run()
|
{
"content_hash": "39901eb3d4e60e3cbdc94653e2df9f8e",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 85,
"avg_line_length": 39.63720930232558,
"alnum_prop": 0.6284909645623094,
"repo_name": "douban/tfmesos",
"id": "a984d24ff8d9da4ed322a8b7b36f415e664b2d8e",
"size": "9212",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/mnist/mnist_replica.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "855"
},
{
"name": "Python",
"bytes": "26790"
},
{
"name": "Shell",
"bytes": "2689"
}
],
"symlink_target": ""
}
|
import os
import six
import tempfile
import nova.privsep.path
from nova import test
class LastBytesTestCase(test.NoDBTestCase):
"""Test the last_bytes() utility method."""
def setUp(self):
super(LastBytesTestCase, self).setUp()
self.f = six.BytesIO(b'1234567890')
def test_truncated(self):
self.f.seek(0, os.SEEK_SET)
out, remaining = nova.privsep.path._last_bytes_inner(self.f, 5)
self.assertEqual(out, b'67890')
self.assertGreater(remaining, 0)
def test_read_all(self):
self.f.seek(0, os.SEEK_SET)
out, remaining = nova.privsep.path._last_bytes_inner(self.f, 1000)
self.assertEqual(out, b'1234567890')
self.assertFalse(remaining > 0)
def test_seek_too_far_real_file(self):
# StringIO doesn't raise IOError if you see past the start of the file.
with tempfile.TemporaryFile() as flo:
content = b'1234567890'
flo.write(content)
self.assertEqual(
(content, 0),
nova.privsep.path._last_bytes_inner(flo, 1000))
|
{
"content_hash": "4b158d7f7a0701587e341b534154ead7",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 31.4,
"alnum_prop": 0.6278434940855323,
"repo_name": "gooddata/openstack-nova",
"id": "cacbebd0e9f939e7043c8df84749bed65da99ab5",
"size": "1741",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/tests/unit/privsep/test_path.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3858"
},
{
"name": "HTML",
"bytes": "1386"
},
{
"name": "PHP",
"bytes": "43584"
},
{
"name": "Python",
"bytes": "23012372"
},
{
"name": "Shell",
"bytes": "32567"
},
{
"name": "Smarty",
"bytes": "429290"
}
],
"symlink_target": ""
}
|
import types
import warnings
import pickle
import re
from copy import deepcopy
from functools import partial, wraps
from inspect import signature
from numbers import Real
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import joblib
from . import IS_PYPY
from .. import config_context
from ._param_validation import Interval
from ._testing import _get_args
from ._testing import assert_raise_message
from ._testing import assert_array_equal
from ._testing import assert_array_almost_equal
from ._testing import assert_allclose
from ._testing import assert_allclose_dense_sparse
from ._testing import assert_array_less
from ._testing import set_random_state
from ._testing import SkipTest
from ._testing import ignore_warnings
from ._testing import create_memmap_backed_data
from ._testing import raises
from . import is_scalar_nan
from ..linear_model import LinearRegression
from ..linear_model import LogisticRegression
from ..linear_model import RANSACRegressor
from ..linear_model import Ridge
from ..linear_model import SGDRegressor
from ..base import (
clone,
ClusterMixin,
is_classifier,
is_regressor,
is_outlier_detector,
RegressorMixin,
)
from ..metrics import accuracy_score, adjusted_rand_score, f1_score
from ..random_projection import BaseRandomProjection
from ..feature_selection import SelectKBest
from ..feature_selection import SelectFromModel
from ..pipeline import make_pipeline
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..exceptions import SkipTestWarning
from ..model_selection import train_test_split
from ..model_selection import ShuffleSplit
from ..model_selection._validation import _safe_split
from ..metrics.pairwise import rbf_kernel, linear_kernel, pairwise_distances
from ..utils.fixes import sp_version
from ..utils.fixes import parse_version
from ..utils.validation import check_is_fitted
from ..utils._param_validation import make_constraint
from ..utils._param_validation import generate_invalid_param_val
from . import shuffle
from ._tags import (
_DEFAULT_TAGS,
_safe_tags,
)
from .validation import has_fit_parameter, _num_samples
from ..preprocessing import StandardScaler
from ..preprocessing import scale
from ..datasets import (
load_iris,
make_blobs,
make_multilabel_classification,
make_regression,
)
REGRESSION_DATASET = None
CROSS_DECOMPOSITION = ["PLSCanonical", "PLSRegression", "CCA", "PLSSVD"]
def _yield_checks(estimator):
name = estimator.__class__.__name__
tags = _safe_tags(estimator)
yield check_no_attributes_set_in_init
yield check_estimators_dtypes
yield check_fit_score_takes_y
if has_fit_parameter(estimator, "sample_weight"):
yield check_sample_weights_pandas_series
yield check_sample_weights_not_an_array
yield check_sample_weights_list
if not tags["pairwise"]:
# We skip pairwise because the data is not pairwise
yield check_sample_weights_shape
yield check_sample_weights_not_overwritten
yield partial(check_sample_weights_invariance, kind="ones")
yield partial(check_sample_weights_invariance, kind="zeros")
yield check_estimators_fit_returns_self
yield partial(check_estimators_fit_returns_self, readonly_memmap=True)
# Check that all estimator yield informative messages when
# trained on empty datasets
if not tags["no_validation"]:
yield check_complex_data
yield check_dtype_object
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION:
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if not tags["allow_nan"] and not tags["no_validation"]:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if tags["pairwise"]:
# Check that pairwise estimator throws error on non-square input
yield check_nonsquare_error
yield check_estimators_overwrite_params
if hasattr(estimator, "sparsify"):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
yield check_estimator_get_tags_default_keys
def _yield_classifier_checks(classifier):
tags = _safe_tags(classifier)
# test classifiers can handle non-array data and pandas objects
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
if tags["multioutput"]:
yield check_classifier_multioutput
# basic consistency testing
yield check_classifiers_train
yield partial(check_classifiers_train, readonly_memmap=True)
yield partial(check_classifiers_train, readonly_memmap=True, X_dtype="float32")
yield check_classifiers_regression_target
if tags["multilabel"]:
yield check_classifiers_multilabel_representation_invariance
yield check_classifiers_multilabel_output_format_predict
yield check_classifiers_multilabel_output_format_predict_proba
yield check_classifiers_multilabel_output_format_decision_function
if not tags["no_validation"]:
yield check_supervised_y_no_nan
if not tags["multioutput_only"]:
yield check_supervised_y_2d
if tags["requires_fit"]:
yield check_estimators_unfitted
if "class_weight" in classifier.get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=FutureWarning)
def check_supervised_y_no_nan(name, estimator_orig):
# Checks that the Estimator targets are not NaN.
estimator = clone(estimator_orig)
rng = np.random.RandomState(888)
X = rng.standard_normal(size=(10, 5))
for value in [np.nan, np.inf]:
y = np.full(10, value)
y = _enforce_estimator_tags_y(estimator, y)
module_name = estimator.__module__
if module_name.startswith("sklearn.") and not (
"test_" in module_name or module_name.endswith("_testing")
):
# In scikit-learn we want the error message to mention the input
# name and be specific about the kind of unexpected value.
if np.isinf(value):
match = (
r"Input (y|Y) contains infinity or a value too large for"
r" dtype\('float64'\)."
)
else:
match = r"Input (y|Y) contains NaN."
else:
# Do not impose a particular error message to third-party libraries.
match = None
err_msg = (
f"Estimator {name} should have raised error on fitting array y with inf"
" value."
)
with raises(ValueError, match=match, err_msg=err_msg):
estimator.fit(X, y)
def _yield_regressor_checks(regressor):
tags = _safe_tags(regressor)
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield partial(check_regressors_train, readonly_memmap=True)
yield partial(check_regressors_train, readonly_memmap=True, X_dtype="float32")
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
if tags["multioutput"]:
yield check_regressor_multioutput
yield check_regressors_no_decision_function
if not tags["no_validation"] and not tags["multioutput_only"]:
yield check_supervised_y_2d
yield check_supervised_y_no_nan
name = regressor.__class__.__name__
if name != "CCA":
# check that the regressor handles int input
yield check_regressors_int
if tags["requires_fit"]:
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(transformer):
tags = _safe_tags(transformer)
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if not tags["no_validation"]:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
yield check_transformer_general
if tags["preserves_dtype"]:
yield check_transformer_preserve_dtypes
yield partial(check_transformer_general, readonly_memmap=True)
if not _safe_tags(transformer, key="stateless"):
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = [
"Isomap",
"KernelPCA",
"LocallyLinearEmbedding",
"RandomizedLasso",
"LogisticRegressionCV",
"BisectingKMeans",
]
name = transformer.__class__.__name__
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(clusterer):
yield check_clusterer_compute_labels_predict
name = clusterer.__class__.__name__
if name not in ("WardAgglomeration", "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield partial(check_clustering, readonly_memmap=True)
yield check_estimators_partial_fit_n_features
if not hasattr(clusterer, "transform"):
yield check_non_transformer_estimators_n_iter
def _yield_outliers_checks(estimator):
# checks for the contamination parameter
if hasattr(estimator, "contamination"):
yield check_outlier_contamination
# checks for outlier detectors that have a fit_predict method
if hasattr(estimator, "fit_predict"):
yield check_outliers_fit_predict
# checks for estimators that can be used on a test set
if hasattr(estimator, "predict"):
yield check_outliers_train
yield partial(check_outliers_train, readonly_memmap=True)
# test outlier detectors can handle non-array data
yield check_classifier_data_not_an_array
# test if NotFittedError is raised
if _safe_tags(estimator, key="requires_fit"):
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(estimator):
name = estimator.__class__.__name__
tags = _safe_tags(estimator)
if "2darray" not in tags["X_types"]:
warnings.warn(
"Can't test estimator {} which requires input of type {}".format(
name, tags["X_types"]
),
SkipTestWarning,
)
return
if tags["_skip_test"]:
warnings.warn(
"Explicit SKIP via _skip_test tag for estimator {}.".format(name),
SkipTestWarning,
)
return
for check in _yield_checks(estimator):
yield check
if is_classifier(estimator):
for check in _yield_classifier_checks(estimator):
yield check
if is_regressor(estimator):
for check in _yield_regressor_checks(estimator):
yield check
if hasattr(estimator, "transform"):
for check in _yield_transformer_checks(estimator):
yield check
if isinstance(estimator, ClusterMixin):
for check in _yield_clustering_checks(estimator):
yield check
if is_outlier_detector(estimator):
for check in _yield_outliers_checks(estimator):
yield check
yield check_parameters_default_constructible
if not tags["non_deterministic"]:
yield check_methods_sample_order_invariance
yield check_methods_subset_invariance
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_get_params_invariance
yield check_set_params
yield check_dict_unchanged
yield check_dont_overwrite_parameters
yield check_fit_idempotent
yield check_fit_check_is_fitted
if not tags["no_validation"]:
yield check_n_features_in
yield check_fit1d
yield check_fit2d_predict1d
if tags["requires_y"]:
yield check_requires_y_none
if tags["requires_positive_X"]:
yield check_fit_non_negative
def _get_check_estimator_ids(obj):
"""Create pytest ids for checks.
When `obj` is an estimator, this returns the pprint version of the
estimator (with `print_changed_only=True`). When `obj` is a function, the
name of the function is returned with its keyword arguments.
`_get_check_estimator_ids` is designed to be used as the `id` in
`pytest.mark.parametrize` where `check_estimator(..., generate_only=True)`
is yielding estimators and checks.
Parameters
----------
obj : estimator or function
Items generated by `check_estimator`.
Returns
-------
id : str or None
See Also
--------
check_estimator
"""
if callable(obj):
if not isinstance(obj, partial):
return obj.__name__
if not obj.keywords:
return obj.func.__name__
kwstring = ",".join(["{}={}".format(k, v) for k, v in obj.keywords.items()])
return "{}({})".format(obj.func.__name__, kwstring)
if hasattr(obj, "get_params"):
with config_context(print_changed_only=True):
return re.sub(r"\s", "", str(obj))
def _construct_instance(Estimator):
"""Construct Estimator instance if possible."""
required_parameters = getattr(Estimator, "_required_parameters", [])
if len(required_parameters):
if required_parameters in (["estimator"], ["base_estimator"]):
# `RANSACRegressor` will raise an error with any model other
# than `LinearRegression` if we don't fix `min_samples` parameter.
# For common test, we can enforce using `LinearRegression` that
# is the default estimator in `RANSACRegressor` instead of `Ridge`.
if issubclass(Estimator, RANSACRegressor):
estimator = Estimator(LinearRegression())
elif issubclass(Estimator, RegressorMixin):
estimator = Estimator(Ridge())
elif issubclass(Estimator, SelectFromModel):
# Increases coverage because SGDRegressor has partial_fit
estimator = Estimator(SGDRegressor(random_state=0))
else:
estimator = Estimator(LogisticRegression(C=1))
elif required_parameters in (["estimators"],):
# Heterogeneous ensemble classes (i.e. stacking, voting)
if issubclass(Estimator, RegressorMixin):
estimator = Estimator(
estimators=[("est1", Ridge(alpha=0.1)), ("est2", Ridge(alpha=1))]
)
else:
estimator = Estimator(
estimators=[
("est1", LogisticRegression(C=0.1)),
("est2", LogisticRegression(C=1)),
]
)
else:
msg = (
f"Can't instantiate estimator {Estimator.__name__} "
f"parameters {required_parameters}"
)
# raise additional warning to be shown by pytest
warnings.warn(msg, SkipTestWarning)
raise SkipTest(msg)
else:
estimator = Estimator()
return estimator
def _maybe_mark_xfail(estimator, check, pytest):
# Mark (estimator, check) pairs as XFAIL if needed (see conditions in
# _should_be_skipped_or_marked())
# This is similar to _maybe_skip(), but this one is used by
# @parametrize_with_checks() instead of check_estimator()
should_be_marked, reason = _should_be_skipped_or_marked(estimator, check)
if not should_be_marked:
return estimator, check
else:
return pytest.param(estimator, check, marks=pytest.mark.xfail(reason=reason))
def _maybe_skip(estimator, check):
# Wrap a check so that it's skipped if needed (see conditions in
# _should_be_skipped_or_marked())
# This is similar to _maybe_mark_xfail(), but this one is used by
# check_estimator() instead of @parametrize_with_checks which requires
# pytest
should_be_skipped, reason = _should_be_skipped_or_marked(estimator, check)
if not should_be_skipped:
return check
check_name = check.func.__name__ if isinstance(check, partial) else check.__name__
@wraps(check)
def wrapped(*args, **kwargs):
raise SkipTest(
f"Skipping {check_name} for {estimator.__class__.__name__}: {reason}"
)
return wrapped
def _should_be_skipped_or_marked(estimator, check):
# Return whether a check should be skipped (when using check_estimator())
# or marked as XFAIL (when using @parametrize_with_checks()), along with a
# reason.
# Currently, a check should be skipped or marked if
# the check is in the _xfail_checks tag of the estimator
check_name = check.func.__name__ if isinstance(check, partial) else check.__name__
xfail_checks = _safe_tags(estimator, key="_xfail_checks") or {}
if check_name in xfail_checks:
return True, xfail_checks[check_name]
return False, "placeholder reason that will never be used"
def parametrize_with_checks(estimators):
"""Pytest specific decorator for parametrizing estimator checks.
The `id` of each check is set to be a pprint version of the estimator
and the name of the check with its keyword arguments.
This allows to use `pytest -k` to specify which tests to run::
pytest test_check_estimators.py -k check_estimators_fit_returns_self
Parameters
----------
estimators : list of estimators instances
Estimators to generated checks for.
.. versionchanged:: 0.24
Passing a class was deprecated in version 0.23, and support for
classes was removed in 0.24. Pass an instance instead.
.. versionadded:: 0.24
Returns
-------
decorator : `pytest.mark.parametrize`
See Also
--------
check_estimator : Check if estimator adheres to scikit-learn conventions.
Examples
--------
>>> from sklearn.utils.estimator_checks import parametrize_with_checks
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.tree import DecisionTreeRegressor
>>> @parametrize_with_checks([LogisticRegression(),
... DecisionTreeRegressor()])
... def test_sklearn_compatible_estimator(estimator, check):
... check(estimator)
"""
import pytest
if any(isinstance(est, type) for est in estimators):
msg = (
"Passing a class was deprecated in version 0.23 "
"and isn't supported anymore from 0.24."
"Please pass an instance instead."
)
raise TypeError(msg)
def checks_generator():
for estimator in estimators:
name = type(estimator).__name__
for check in _yield_all_checks(estimator):
check = partial(check, name)
yield _maybe_mark_xfail(estimator, check, pytest)
return pytest.mark.parametrize(
"estimator, check", checks_generator(), ids=_get_check_estimator_ids
)
def check_estimator(estimator=None, generate_only=False, Estimator="deprecated"):
"""Check if estimator adheres to scikit-learn conventions.
This function will run an extensive test-suite for input validation,
shapes, etc, making sure that the estimator complies with `scikit-learn`
conventions as detailed in :ref:`rolling_your_own_estimator`.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Setting `generate_only=True` returns a generator that yields (estimator,
check) tuples where the check can be called independently from each
other, i.e. `check(estimator)`. This allows all checks to be run
independently and report the checks that are failing.
scikit-learn provides a pytest specific decorator,
:func:`~sklearn.utils.parametrize_with_checks`, making it easier to test
multiple estimators.
Parameters
----------
estimator : estimator object
Estimator instance to check.
.. versionadded:: 1.1
Passing a class was deprecated in version 0.23, and support for
classes was removed in 0.24.
generate_only : bool, default=False
When `False`, checks are evaluated when `check_estimator` is called.
When `True`, `check_estimator` returns a generator that yields
(estimator, check) tuples. The check is run by calling
`check(estimator)`.
.. versionadded:: 0.22
Estimator : estimator object
Estimator instance to check.
.. deprecated:: 1.1
``Estimator`` was deprecated in favor of ``estimator`` in version 1.1
and will be removed in version 1.3.
Returns
-------
checks_generator : generator
Generator that yields (estimator, check) tuples. Returned when
`generate_only=True`.
See Also
--------
parametrize_with_checks : Pytest specific decorator for parametrizing estimator
checks.
"""
if estimator is None and Estimator == "deprecated":
msg = "Either estimator or Estimator should be passed to check_estimator."
raise ValueError(msg)
if Estimator != "deprecated":
msg = (
"'Estimator' was deprecated in favor of 'estimator' in version 1.1 "
"and will be removed in version 1.3."
)
warnings.warn(msg, FutureWarning)
estimator = Estimator
if isinstance(estimator, type):
msg = (
"Passing a class was deprecated in version 0.23 "
"and isn't supported anymore from 0.24."
"Please pass an instance instead."
)
raise TypeError(msg)
name = type(estimator).__name__
def checks_generator():
for check in _yield_all_checks(estimator):
check = _maybe_skip(estimator, check)
yield estimator, partial(check, name)
if generate_only:
return checks_generator()
for estimator, check in checks_generator():
try:
check(estimator)
except SkipTest as exception:
# SkipTest is thrown when pandas can't be imported, or by checks
# that are in the xfail_checks tag
warnings.warn(str(exception), SkipTestWarning)
def _regression_dataset():
global REGRESSION_DATASET
if REGRESSION_DATASET is None:
X, y = make_regression(
n_samples=200,
n_features=10,
n_informative=1,
bias=5.0,
noise=20,
random_state=42,
)
X = StandardScaler().fit_transform(X)
REGRESSION_DATASET = X, y
return REGRESSION_DATASET
def _set_checking_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
name = estimator.__class__.__name__
if name == "TSNE":
estimator.set_params(perplexity=2)
if "n_iter" in params and name != "TSNE":
estimator.set_params(n_iter=5)
if "max_iter" in params:
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR, LinearSVC
if name in ["LinearSVR", "LinearSVC"]:
estimator.set_params(max_iter=20)
# NMF
if name == "NMF":
estimator.set_params(max_iter=500)
# MiniBatchNMF
if estimator.__class__.__name__ == "MiniBatchNMF":
estimator.set_params(max_iter=20, fresh_restarts=True)
# MLP
if name in ["MLPClassifier", "MLPRegressor"]:
estimator.set_params(max_iter=100)
# MiniBatchDictionaryLearning
if name == "MiniBatchDictionaryLearning":
estimator.set_params(max_iter=5)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "batch_size" in params and not name.startswith("MLP"):
estimator.set_params(batch_size=10)
if name == "MeanShift":
# In the case of check_fit2d_1sample, bandwidth is set to None and
# is thus estimated. De facto it is 0.0 as a single sample is provided
# and this makes the test fails. Hence we give it a placeholder value.
estimator.set_params(bandwidth=1.0)
if name == "TruncatedSVD":
# TruncatedSVD doesn't run with n_components = n_features
# This is ugly :-/
estimator.n_components = 1
if name == "LassoLarsIC":
# Noise variance estimation does not work when `n_samples < n_features`.
# We need to provide the noise variance explicitly.
estimator.set_params(noise_variance=1.0)
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = min(estimator.n_clusters, 2)
if hasattr(estimator, "n_best"):
estimator.n_best = 1
if name == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=0.5)
if name == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=2)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if name in ("HistGradientBoostingClassifier", "HistGradientBoostingRegressor"):
# The default min_samples_leaf (20) isn't appropriate for small
# datasets (only very shallow trees are built) that the checks use.
estimator.set_params(min_samples_leaf=5)
if name == "DummyClassifier":
# the default strategy prior would output constant predictions and fail
# for check_classifiers_predictions
estimator.set_params(strategy="stratified")
# Speed-up by reducing the number of CV or splits for CV estimators
loo_cv = ["RidgeCV", "RidgeClassifierCV"]
if name not in loo_cv and hasattr(estimator, "cv"):
estimator.set_params(cv=3)
if hasattr(estimator, "n_splits"):
estimator.set_params(n_splits=3)
if name == "OneHotEncoder":
estimator.set_params(handle_unknown="ignore")
if name == "QuantileRegressor":
# Avoid warning due to Scipy deprecating interior-point solver
solver = "highs" if sp_version >= parse_version("1.6.0") else "interior-point"
estimator.set_params(solver=solver)
if name in CROSS_DECOMPOSITION:
estimator.set_params(n_components=1)
# Default "auto" parameter can lead to different ordering of eigenvalues on
# windows: #24105
if name == "SpectralEmbedding":
estimator.set_params(eigen_tol=1e-5)
class _NotAnArray:
"""An object that is convertible to an array.
Parameters
----------
data : array-like
The data.
"""
def __init__(self, data):
self.data = np.asarray(data)
def __array__(self, dtype=None):
return self.data
def __array_function__(self, func, types, args, kwargs):
if func.__name__ == "may_share_memory":
return True
raise TypeError("Don't want to call array_function {}!".format(func.__name__))
def _is_pairwise_metric(estimator):
"""Returns True if estimator accepts pairwise metric.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if _pairwise is set to True and False otherwise.
"""
metric = getattr(estimator, "metric", None)
return bool(metric == "precomputed")
def _generate_sparse_matrix(X_csr):
"""Generate sparse matrices with {32,64}bit indices of diverse format.
Parameters
----------
X_csr: CSR Matrix
Input matrix in CSR format.
Returns
-------
out: iter(Matrices)
In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo',
'coo_64', 'csc_64', 'csr_64']
"""
assert X_csr.format == "csr"
yield "csr", X_csr.copy()
for sparse_format in ["dok", "lil", "dia", "bsr", "csc", "coo"]:
yield sparse_format, X_csr.asformat(sparse_format)
# Generate large indices matrix only if its supported by scipy
X_coo = X_csr.asformat("coo")
X_coo.row = X_coo.row.astype("int64")
X_coo.col = X_coo.col.astype("int64")
yield "coo_64", X_coo
for sparse_format in ["csc", "csr"]:
X = X_csr.asformat(sparse_format)
X.indices = X.indices.astype("int64")
X.indptr = X.indptr.astype("int64")
yield sparse_format + "_64", X
def check_estimator_sparse_data(name, estimator_orig):
rng = np.random.RandomState(0)
X = rng.uniform(size=(40, 3))
X[X < 0.8] = 0
X = _enforce_estimator_tags_X(estimator_orig, X)
X_csr = sparse.csr_matrix(X)
y = (4 * rng.uniform(size=40)).astype(int)
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
tags = _safe_tags(estimator_orig)
for matrix_format, X in _generate_sparse_matrix(X_csr):
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
estimator = clone(estimator_orig)
if name in ["Scaler", "StandardScaler"]:
estimator.set_params(with_mean=False)
# fit and predict
if "64" in matrix_format:
err_msg = (
f"Estimator {name} doesn't seem to support {matrix_format} "
"matrix, and is not failing gracefully, e.g. by using "
"check_array(X, accept_large_sparse=False)"
)
else:
err_msg = (
f"Estimator {name} doesn't seem to fail gracefully on sparse "
"data: error message should state explicitly that sparse "
"input is not supported if this is not the case."
)
with raises(
(TypeError, ValueError),
match=["sparse", "Sparse"],
may_pass=True,
err_msg=err_msg,
):
with ignore_warnings(category=FutureWarning):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
if tags["multioutput_only"]:
assert pred.shape == (X.shape[0], 1)
else:
assert pred.shape == (X.shape[0],)
if hasattr(estimator, "predict_proba"):
probs = estimator.predict_proba(X)
if tags["binary_only"]:
expected_probs_shape = (X.shape[0], 2)
else:
expected_probs_shape = (X.shape[0], 4)
assert probs.shape == expected_probs_shape
@ignore_warnings(category=FutureWarning)
def check_sample_weights_pandas_series(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = clone(estimator_orig)
try:
import pandas as pd
X = np.array(
[
[1, 1],
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[3, 1],
[3, 2],
[3, 3],
[3, 4],
]
)
X = pd.DataFrame(_enforce_estimator_tags_X(estimator_orig, X))
y = pd.Series([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])
weights = pd.Series([1] * 12)
if _safe_tags(estimator, key="multioutput_only"):
y = pd.DataFrame(y)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError(
"Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name)
)
except ImportError:
raise SkipTest(
"pandas is not installed: not testing for "
"input of type pandas.Series to class weight."
)
@ignore_warnings(category=(FutureWarning))
def check_sample_weights_not_an_array(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type _NotAnArray in the 'fit' function.
estimator = clone(estimator_orig)
X = np.array(
[
[1, 1],
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[3, 1],
[3, 2],
[3, 3],
[3, 4],
]
)
X = _NotAnArray(_enforce_estimator_tags_X(estimator_orig, X))
y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])
weights = _NotAnArray([1] * 12)
if _safe_tags(estimator, key="multioutput_only"):
y = _NotAnArray(y.data.reshape(-1, 1))
estimator.fit(X, y, sample_weight=weights)
@ignore_warnings(category=(FutureWarning))
def check_sample_weights_list(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
n_samples = 30
X = _enforce_estimator_tags_X(estimator_orig, rnd.uniform(size=(n_samples, 3)))
y = np.arange(n_samples) % 3
y = _enforce_estimator_tags_y(estimator, y)
sample_weight = [3] * n_samples
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=FutureWarning)
def check_sample_weights_shape(name, estimator_orig):
# check that estimators raise an error if sample_weight
# shape mismatches the input
estimator = clone(estimator_orig)
X = np.array(
[
[1, 3],
[1, 3],
[1, 3],
[1, 3],
[2, 1],
[2, 1],
[2, 1],
[2, 1],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[4, 1],
[4, 1],
[4, 1],
[4, 1],
]
)
y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2])
y = _enforce_estimator_tags_y(estimator, y)
estimator.fit(X, y, sample_weight=np.ones(len(y)))
with raises(ValueError):
estimator.fit(X, y, sample_weight=np.ones(2 * len(y)))
with raises(ValueError):
estimator.fit(X, y, sample_weight=np.ones((len(y), 2)))
@ignore_warnings(category=FutureWarning)
def check_sample_weights_invariance(name, estimator_orig, kind="ones"):
# For kind="ones" check that the estimators yield same results for
# unit weights and no weights
# For kind="zeros" check that setting sample_weight to 0 is equivalent
# to removing corresponding samples.
estimator1 = clone(estimator_orig)
estimator2 = clone(estimator_orig)
set_random_state(estimator1, random_state=0)
set_random_state(estimator2, random_state=0)
X1 = np.array(
[
[1, 3],
[1, 3],
[1, 3],
[1, 3],
[2, 1],
[2, 1],
[2, 1],
[2, 1],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[4, 1],
[4, 1],
[4, 1],
[4, 1],
],
dtype=np.float64,
)
y1 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)
if kind == "ones":
X2 = X1
y2 = y1
sw2 = np.ones(shape=len(y1))
err_msg = (
f"For {name} sample_weight=None is not equivalent to sample_weight=ones"
)
elif kind == "zeros":
# Construct a dataset that is very different to (X, y) if weights
# are disregarded, but identical to (X, y) given weights.
X2 = np.vstack([X1, X1 + 1])
y2 = np.hstack([y1, 3 - y1])
sw2 = np.ones(shape=len(y1) * 2)
sw2[len(y1) :] = 0
X2, y2, sw2 = shuffle(X2, y2, sw2, random_state=0)
err_msg = (
f"For {name}, a zero sample_weight is not equivalent to removing the sample"
)
else: # pragma: no cover
raise ValueError
y1 = _enforce_estimator_tags_y(estimator1, y1)
y2 = _enforce_estimator_tags_y(estimator2, y2)
estimator1.fit(X1, y=y1, sample_weight=None)
estimator2.fit(X2, y=y2, sample_weight=sw2)
for method in ["predict", "predict_proba", "decision_function", "transform"]:
if hasattr(estimator_orig, method):
X_pred1 = getattr(estimator1, method)(X1)
X_pred2 = getattr(estimator2, method)(X1)
assert_allclose_dense_sparse(X_pred1, X_pred2, err_msg=err_msg)
def check_sample_weights_not_overwritten(name, estimator_orig):
# check that estimators don't override the passed sample_weight parameter
estimator = clone(estimator_orig)
set_random_state(estimator, random_state=0)
X = np.array(
[
[1, 3],
[1, 3],
[1, 3],
[1, 3],
[2, 1],
[2, 1],
[2, 1],
[2, 1],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[4, 1],
[4, 1],
[4, 1],
[4, 1],
],
dtype=np.float64,
)
y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)
y = _enforce_estimator_tags_y(estimator, y)
sample_weight_original = np.ones(y.shape[0])
sample_weight_original[0] = 10.0
sample_weight_fit = sample_weight_original.copy()
estimator.fit(X, y, sample_weight=sample_weight_fit)
err_msg = f"{name} overwrote the original `sample_weight` given during fit"
assert_allclose(sample_weight_fit, sample_weight_original, err_msg=err_msg)
@ignore_warnings(category=(FutureWarning, UserWarning))
def check_dtype_object(name, estimator_orig):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = _enforce_estimator_tags_X(estimator_orig, rng.uniform(size=(40, 10)))
X = X.astype(object)
tags = _safe_tags(estimator_orig)
y = (X[:, 0] * 4).astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
with raises(Exception, match="Unknown label type", may_pass=True):
estimator.fit(X, y.astype(object))
if "string" not in tags["X_types"]:
X[0, 0] = {"foo": "bar"}
msg = "argument must be a string.* number"
with raises(TypeError, match=msg):
estimator.fit(X, y)
else:
# Estimators supporting string will not call np.asarray to convert the
# data to numeric and therefore, the error will not be raised.
# Checking for each element dtype in the input array will be costly.
# Refer to #11401 for full discussion.
estimator.fit(X, y)
def check_complex_data(name, estimator_orig):
rng = np.random.RandomState(42)
# check that estimators raise an exception on providing complex data
X = rng.uniform(size=10) + 1j * rng.uniform(size=10)
X = X.reshape(-1, 1)
# Something both valid for classification and regression
y = rng.randint(low=0, high=2, size=10) + 1j
estimator = clone(estimator_orig)
set_random_state(estimator, random_state=0)
with raises(ValueError, match="Complex data not supported"):
estimator.fit(X, y)
@ignore_warnings
def check_dict_unchanged(name, estimator_orig):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ["SpectralCoclustering"]:
return
rnd = np.random.RandomState(0)
if name in ["RANSACRegressor"]:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
X = _enforce_estimator_tags_X(estimator_orig, X)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function", "predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert estimator.__dict__ == dict_before, (
"Estimator changes __dict__ during %s" % method
)
def _is_public_parameter(attr):
return not (attr.startswith("_") or attr.endswith("_"))
@ignore_warnings(category=FutureWarning)
def check_dont_overwrite_parameters(name, estimator_orig):
# check that fit method only changes or sets private attributes
if hasattr(estimator_orig.__init__, "deprecated_original"):
# to not check deprecated classes
return
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _enforce_estimator_tags_X(estimator_orig, X)
y = X[:, 0].astype(int)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [
key for key in dict_after_fit.keys() if _is_public_parameter(key)
]
attrs_added_by_fit = [
key for key in public_keys_after_fit if key not in dict_before_fit.keys()
]
# check that fit doesn't add any public attribute
assert not attrs_added_by_fit, (
"Estimator adds public attribute(s) during"
" the fit method."
" Estimators are only allowed to add private attributes"
" either started with _ or ended"
" with _ but %s added"
% ", ".join(attrs_added_by_fit)
)
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [
key
for key in public_keys_after_fit
if (dict_before_fit[key] is not dict_after_fit[key])
]
assert not attrs_changed_by_fit, (
"Estimator changes public attribute(s) during"
" the fit method. Estimators are only allowed"
" to change attributes started"
" or ended with _, but"
" %s changed"
% ", ".join(attrs_changed_by_fit)
)
@ignore_warnings(category=FutureWarning)
def check_fit2d_predict1d(name, estimator_orig):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _enforce_estimator_tags_X(estimator_orig, X)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function", "predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(
ValueError, "Reshape your data", getattr(estimator, method), X[0]
)
def _apply_on_subsets(func, X):
# apply function on the whole set and on mini batches
result_full = func(X)
n_features = X.shape[1]
result_by_batch = [func(batch.reshape(1, n_features)) for batch in X]
# func can output tuple (e.g. score_samples)
if type(result_full) == tuple:
result_full = result_full[0]
result_by_batch = list(map(lambda x: x[0], result_by_batch))
if sparse.issparse(result_full):
result_full = result_full.A
result_by_batch = [x.A for x in result_by_batch]
return np.ravel(result_full), np.ravel(result_by_batch)
@ignore_warnings(category=FutureWarning)
def check_methods_subset_invariance(name, estimator_orig):
# check that method gives invariant results if applied
# on mini batches or the whole set
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _enforce_estimator_tags_X(estimator_orig, X)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in [
"predict",
"transform",
"decision_function",
"score_samples",
"predict_proba",
]:
msg = ("{method} of {name} is not invariant when applied to a subset.").format(
method=method, name=name
)
if hasattr(estimator, method):
result_full, result_by_batch = _apply_on_subsets(
getattr(estimator, method), X
)
assert_allclose(result_full, result_by_batch, atol=1e-7, err_msg=msg)
@ignore_warnings(category=FutureWarning)
def check_methods_sample_order_invariance(name, estimator_orig):
# check that method gives invariant results if applied
# on a subset with different sample order
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _enforce_estimator_tags_X(estimator_orig, X)
y = X[:, 0].astype(np.int64)
if _safe_tags(estimator_orig, key="binary_only"):
y[y == 2] = 1
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 2
set_random_state(estimator, 1)
estimator.fit(X, y)
idx = np.random.permutation(X.shape[0])
for method in [
"predict",
"transform",
"decision_function",
"score_samples",
"predict_proba",
]:
msg = (
"{method} of {name} is not invariant when applied to a dataset"
"with different sample order."
).format(method=method, name=name)
if hasattr(estimator, method):
assert_allclose_dense_sparse(
getattr(estimator, method)(X)[idx],
getattr(estimator, method)(X[idx]),
atol=1e-9,
err_msg=msg,
)
@ignore_warnings
def check_fit2d_1sample(name, estimator_orig):
# Check that fitting a 2d array with only one sample either works or
# returns an informative message. The error message should either mention
# the number of samples or the number of classes.
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
X = _enforce_estimator_tags_X(estimator_orig, X)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
# min_cluster_size cannot be less than the data size for OPTICS.
if name == "OPTICS":
estimator.set_params(min_samples=1)
# perplexity cannot be more than the number of samples for TSNE.
if name == "TSNE":
estimator.set_params(perplexity=0.5)
msgs = [
"1 sample",
"n_samples = 1",
"n_samples=1",
"one sample",
"1 class",
"one class",
]
with raises(ValueError, match=msgs, may_pass=True):
estimator.fit(X, y)
@ignore_warnings
def check_fit2d_1feature(name, estimator_orig):
# check fitting a 2d array with only 1 feature either works or returns
# informative message
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
X = _enforce_estimator_tags_X(estimator_orig, X)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
# ensure two labels in subsample for RandomizedLogisticRegression
if name == "RandomizedLogisticRegression":
estimator.sample_fraction = 1
# ensure non skipped trials for RANSACRegressor
if name == "RANSACRegressor":
estimator.residual_threshold = 0.5
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator, 1)
msgs = [r"1 feature\(s\)", "n_features = 1", "n_features=1"]
with raises(ValueError, match=msgs, may_pass=True):
estimator.fit(X, y)
@ignore_warnings
def check_fit1d(name, estimator_orig):
# check fitting 1d X array raises a ValueError
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
with raises(ValueError):
estimator.fit(X, y)
@ignore_warnings(category=FutureWarning)
def check_transformer_general(name, transformer, readonly_memmap=False):
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
X = _enforce_estimator_tags_X(transformer, X)
if readonly_memmap:
X, y = create_memmap_backed_data([X, y])
_check_transformer(name, transformer, X, y)
@ignore_warnings(category=FutureWarning)
def check_transformer_data_not_an_array(name, transformer):
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
X = _enforce_estimator_tags_X(transformer, X)
this_X = _NotAnArray(X)
this_y = _NotAnArray(np.asarray(y))
_check_transformer(name, transformer, this_X, this_y)
# try the same with some list
_check_transformer(name, transformer, X.tolist(), y.tolist())
@ignore_warnings(category=FutureWarning)
def check_transformers_unfitted(name, transformer):
X, y = _regression_dataset()
transformer = clone(transformer)
with raises(
(AttributeError, ValueError),
err_msg=(
"The unfitted "
f"transformer {name} does not raise an error when "
"transform is called. Perhaps use "
"check_is_fitted in transform."
),
):
transformer.transform(X)
def _check_transformer(name, transformer_orig, X, y):
n_samples, n_features = np.asarray(X).shape
transformer = clone(transformer_orig)
set_random_state(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(y), np.asarray(y)]
y_[::2, 1] *= 2
if isinstance(X, _NotAnArray):
y_ = _NotAnArray(y_)
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert x_pred.shape[0] == n_samples
else:
# check for consistent n_samples
assert X_pred.shape[0] == n_samples
if hasattr(transformer, "transform"):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if _safe_tags(transformer_orig, key="non_deterministic"):
msg = name + " is non deterministic"
raise SkipTest(msg)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(
x_pred,
x_pred2,
atol=1e-2,
err_msg="fit_transform and transform outcomes not consistent in %s"
% transformer,
)
assert_allclose_dense_sparse(
x_pred,
x_pred3,
atol=1e-2,
err_msg="consecutive fit_transform outcomes not consistent in %s"
% transformer,
)
else:
assert_allclose_dense_sparse(
X_pred,
X_pred2,
err_msg="fit_transform and transform outcomes not consistent in %s"
% transformer,
atol=1e-2,
)
assert_allclose_dense_sparse(
X_pred,
X_pred3,
atol=1e-2,
err_msg="consecutive fit_transform outcomes not consistent in %s"
% transformer,
)
assert _num_samples(X_pred2) == n_samples
assert _num_samples(X_pred3) == n_samples
# raises error on malformed input for transform
if (
hasattr(X, "shape")
and not _safe_tags(transformer, key="stateless")
and X.ndim == 2
and X.shape[1] > 1
):
# If it's not an array, it does not have a 'T' property
with raises(
ValueError,
err_msg=(
f"The transformer {name} does not raise an error "
"when the number of features in transform is different from "
"the number of features in fit."
),
):
transformer.transform(X[:, :-1])
@ignore_warnings
def check_pipeline_consistency(name, estimator_orig):
if _safe_tags(estimator_orig, key="non_deterministic"):
msg = name + " is non deterministic"
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = _enforce_estimator_tags_X(estimator_orig, X, kernel=rbf_kernel)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_allclose_dense_sparse(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, estimator_orig):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
n_samples = 30
X = rnd.uniform(size=(n_samples, 3))
X = _enforce_estimator_tags_X(estimator_orig, X)
y = np.arange(n_samples) % 3
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
if args[0] == "self":
# if_delegate_has_method makes methods into functions
# with an explicit "self", so need to shift arguments
args = args[1:]
assert args[1] in ["y", "Y"], (
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, type(estimator).__name__, args)
)
@ignore_warnings
def check_estimators_dtypes(name, estimator_orig):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_32 = _enforce_estimator_tags_X(estimator_orig, X_train_32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = _enforce_estimator_tags_y(estimator_orig, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_transformer_preserve_dtypes(name, transformer_orig):
# check that dtype are preserved meaning if input X is of some dtype
# X_transformed should be from the same dtype.
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
X = _enforce_estimator_tags_X(transformer_orig, X)
for dtype in _safe_tags(transformer_orig, key="preserves_dtype"):
X_cast = X.astype(dtype)
transformer = clone(transformer_orig)
set_random_state(transformer)
X_trans = transformer.fit_transform(X_cast, y)
if isinstance(X_trans, tuple):
# cross-decompostion returns a tuple of (x_scores, y_scores)
# when given y with fit_transform; only check the first element
X_trans = X_trans[0]
# check that the output dtype is preserved
assert X_trans.dtype == dtype, (
f"Estimator transform dtype: {X_trans.dtype} - "
f"original/expected dtype: {dtype.__name__}"
)
@ignore_warnings(category=FutureWarning)
def check_estimators_empty_data_messages(name, estimator_orig):
e = clone(estimator_orig)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
err_msg = (
f"The estimator {name} does not raise a ValueError when an "
"empty data is used to train. Perhaps use check_array in train."
)
with raises(ValueError, err_msg=err_msg):
e.fit(X_zero_samples, [])
X_zero_features = np.empty(0).reshape(12, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = _enforce_estimator_tags_y(e, np.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]))
msg = r"0 feature\(s\) \(shape=\(\d*, 0\)\) while a minimum of \d* " "is required."
with raises(ValueError, match=msg):
e.fit(X_zero_features, y)
@ignore_warnings(category=FutureWarning)
def check_estimators_nan_inf(name, estimator_orig):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = _enforce_estimator_tags_X(
estimator_orig, rnd.uniform(size=(10, 3))
)
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = _enforce_estimator_tags_y(estimator_orig, y)
error_string_fit = f"Estimator {name} doesn't check for NaN and inf in fit."
error_string_predict = f"Estimator {name} doesn't check for NaN and inf in predict."
error_string_transform = (
f"Estimator {name} doesn't check for NaN and inf in transform."
)
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
# try to fit
with raises(ValueError, match=["inf", "NaN"], err_msg=error_string_fit):
estimator.fit(X_train, y)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
with raises(
ValueError,
match=["inf", "NaN"],
err_msg=error_string_predict,
):
estimator.predict(X_train)
# transform
if hasattr(estimator, "transform"):
with raises(
ValueError,
match=["inf", "NaN"],
err_msg=error_string_transform,
):
estimator.transform(X_train)
@ignore_warnings
def check_nonsquare_error(name, estimator_orig):
"""Test that error is thrown when non-square data provided."""
X, y = make_blobs(n_samples=20, n_features=10)
estimator = clone(estimator_orig)
with raises(
ValueError,
err_msg=(
f"The pairwise estimator {name} does not raise an error on non-square data"
),
):
estimator.fit(X, y)
@ignore_warnings
def check_estimators_pickle(name, estimator_orig):
"""Test that we can pickle all estimators."""
check_methods = ["predict", "transform", "decision_function", "predict_proba"]
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = _enforce_estimator_tags_X(estimator_orig, X, kernel=rbf_kernel)
tags = _safe_tags(estimator_orig)
# include NaN values when the estimator should deal with them
if tags["allow_nan"]:
# set randomly 10 elements to np.nan
rng = np.random.RandomState(42)
mask = rng.choice(X.size, 10, replace=False)
X.reshape(-1)[mask] = np.nan
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
estimator.fit(X, y)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
module_name = estimator.__module__
if module_name.startswith("sklearn.") and not (
"test_" in module_name or module_name.endswith("_testing")
):
# strict check for sklearn estimators that are not implemented in test
# modules.
assert b"version" in pickled_estimator
unpickled_estimator = pickle.loads(pickled_estimator)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
@ignore_warnings(category=FutureWarning)
def check_estimators_partial_fit_n_features(name, estimator_orig):
# check if number of features changes between calls to partial_fit.
if not hasattr(estimator_orig, "partial_fit"):
return
estimator = clone(estimator_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X = _enforce_estimator_tags_X(estimator_orig, X)
y = _enforce_estimator_tags_y(estimator_orig, y)
try:
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
except NotImplementedError:
return
with raises(
ValueError,
err_msg=(
f"The estimator {name} does not raise an error when the "
"number of features changes between calls to partial_fit."
),
):
estimator.partial_fit(X[:, :-1], y)
@ignore_warnings(category=FutureWarning)
def check_classifier_multioutput(name, estimator):
n_samples, n_labels, n_classes = 42, 5, 3
tags = _safe_tags(estimator)
estimator = clone(estimator)
X, y = make_multilabel_classification(
random_state=42, n_samples=n_samples, n_labels=n_labels, n_classes=n_classes
)
estimator.fit(X, y)
y_pred = estimator.predict(X)
assert y_pred.shape == (n_samples, n_classes), (
"The shape of the prediction for multioutput data is "
"incorrect. Expected {}, got {}.".format((n_samples, n_labels), y_pred.shape)
)
assert y_pred.dtype.kind == "i"
if hasattr(estimator, "decision_function"):
decision = estimator.decision_function(X)
assert isinstance(decision, np.ndarray)
assert decision.shape == (n_samples, n_classes), (
"The shape of the decision function output for "
"multioutput data is incorrect. Expected {}, got {}.".format(
(n_samples, n_classes), decision.shape
)
)
dec_pred = (decision > 0).astype(int)
dec_exp = estimator.classes_[dec_pred]
assert_array_equal(dec_exp, y_pred)
if hasattr(estimator, "predict_proba"):
y_prob = estimator.predict_proba(X)
if isinstance(y_prob, list) and not tags["poor_score"]:
for i in range(n_classes):
assert y_prob[i].shape == (n_samples, 2), (
"The shape of the probability for multioutput data is"
" incorrect. Expected {}, got {}.".format(
(n_samples, 2), y_prob[i].shape
)
)
assert_array_equal(
np.argmax(y_prob[i], axis=1).astype(int), y_pred[:, i]
)
elif not tags["poor_score"]:
assert y_prob.shape == (n_samples, n_classes), (
"The shape of the probability for multioutput data is"
" incorrect. Expected {}, got {}.".format(
(n_samples, n_classes), y_prob.shape
)
)
assert_array_equal(y_prob.round().astype(int), y_pred)
if hasattr(estimator, "decision_function") and hasattr(estimator, "predict_proba"):
for i in range(n_classes):
y_proba = estimator.predict_proba(X)[:, i]
y_decision = estimator.decision_function(X)
assert_array_equal(rankdata(y_proba), rankdata(y_decision[:, i]))
@ignore_warnings(category=FutureWarning)
def check_regressor_multioutput(name, estimator):
estimator = clone(estimator)
n_samples = n_features = 10
if not _is_pairwise_metric(estimator):
n_samples = n_samples + 1
X, y = make_regression(
random_state=42, n_targets=5, n_samples=n_samples, n_features=n_features
)
X = _enforce_estimator_tags_X(estimator, X)
estimator.fit(X, y)
y_pred = estimator.predict(X)
assert y_pred.dtype == np.dtype("float64"), (
"Multioutput predictions by a regressor are expected to be"
" floating-point precision. Got {} instead".format(y_pred.dtype)
)
assert y_pred.shape == y.shape, (
"The shape of the prediction for multioutput data is incorrect."
" Expected {}, got {}."
)
@ignore_warnings(category=FutureWarning)
def check_clustering(name, clusterer_orig, readonly_memmap=False):
clusterer = clone(clusterer_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
rng = np.random.RandomState(7)
X_noise = np.concatenate([X, rng.uniform(low=-3, high=3, size=(5, 2))])
if readonly_memmap:
X, y, X_noise = create_memmap_backed_data([X, y, X_noise])
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
if hasattr(clusterer, "n_clusters"):
clusterer.set_params(n_clusters=3)
set_random_state(clusterer)
if name == "AffinityPropagation":
clusterer.set_params(preference=-100)
clusterer.set_params(max_iter=100)
# fit
clusterer.fit(X)
# with lists
clusterer.fit(X.tolist())
pred = clusterer.labels_
assert pred.shape == (n_samples,)
assert adjusted_rand_score(pred, y) > 0.4
if _safe_tags(clusterer, key="non_deterministic"):
return
set_random_state(clusterer)
with warnings.catch_warnings(record=True):
pred2 = clusterer.fit_predict(X)
assert_array_equal(pred, pred2)
# fit_predict(X) and labels_ should be of type int
assert pred.dtype in [np.dtype("int32"), np.dtype("int64")]
assert pred2.dtype in [np.dtype("int32"), np.dtype("int64")]
# Add noise to X to test the possible values of the labels
labels = clusterer.fit_predict(X_noise)
# There should be at least one sample in every cluster. Equivalently
# labels_ should contain all the consecutive values between its
# min and its max.
labels_sorted = np.unique(labels)
assert_array_equal(
labels_sorted, np.arange(labels_sorted[0], labels_sorted[-1] + 1)
)
# Labels are expected to start at 0 (no noise) or -1 (if noise)
assert labels_sorted[0] in [0, -1]
# Labels should be less than n_clusters - 1
if hasattr(clusterer, "n_clusters"):
n_clusters = getattr(clusterer, "n_clusters")
assert n_clusters - 1 >= labels_sorted[-1]
# else labels should be less than max(labels_) which is necessarily true
@ignore_warnings(category=FutureWarning)
def check_clusterer_compute_labels_predict(name, clusterer_orig):
"""Check that predict is invariant of compute_labels."""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = clone(clusterer_orig)
set_random_state(clusterer)
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
@ignore_warnings(category=FutureWarning)
def check_classifiers_one_label(name, classifier_orig):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = "Classifier can't predict when only one class is present."
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
classifier = clone(classifier_orig)
with raises(
ValueError, match="class", may_pass=True, err_msg=error_string_fit
) as cm:
classifier.fit(X_train, y)
if cm.raised_and_matched:
# ValueError was raised with proper error message
return
assert_array_equal(classifier.predict(X_test), y, err_msg=error_string_predict)
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(
name, classifier_orig, readonly_memmap=False, X_dtype="float64"
):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m = X_m.astype(X_dtype)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
if name in ["BernoulliNB", "MultinomialNB", "ComplementNB", "CategoricalNB"]:
X_m -= X_m.min()
X_b -= X_b.min()
if readonly_memmap:
X_m, y_m, X_b, y_b = create_memmap_backed_data([X_m, y_m, X_b, y_b])
problems = [(X_b, y_b)]
tags = _safe_tags(classifier_orig)
if not tags["binary_only"]:
problems.append((X_m, y_m))
for X, y in problems:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = clone(classifier_orig)
X = _enforce_estimator_tags_X(classifier, X)
y = _enforce_estimator_tags_y(classifier, y)
set_random_state(classifier)
# raises error on malformed input for fit
if not tags["no_validation"]:
with raises(
ValueError,
err_msg=(
f"The classifier {name} does not raise an error when "
"incorrect/malformed input data for fit is passed. The number "
"of training examples is not the same as the number of "
"labels. Perhaps use check_X_y in fit."
),
):
classifier.fit(X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert hasattr(classifier, "classes_")
y_pred = classifier.predict(X)
assert y_pred.shape == (n_samples,)
# training set performance
if not tags["poor_score"]:
assert accuracy_score(y, y_pred) > 0.83
# raises error on malformed input for predict
msg_pairwise = (
"The classifier {} does not raise an error when shape of X in "
" {} is not equal to (n_test_samples, n_training_samples)"
)
msg = (
"The classifier {} does not raise an error when the number of "
"features in {} is different from the number of features in "
"fit."
)
if not tags["no_validation"]:
if tags["pairwise"]:
with raises(
ValueError,
err_msg=msg_pairwise.format(name, "predict"),
):
classifier.predict(X.reshape(-1, 1))
else:
with raises(ValueError, err_msg=msg.format(name, "predict")):
classifier.predict(X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes == 2:
if not tags["multioutput_only"]:
assert decision.shape == (n_samples,)
else:
assert decision.shape == (n_samples, 1)
dec_pred = (decision.ravel() > 0).astype(int)
assert_array_equal(dec_pred, y_pred)
else:
assert decision.shape == (n_samples, n_classes)
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input for decision_function
if not tags["no_validation"]:
if tags["pairwise"]:
with raises(
ValueError,
err_msg=msg_pairwise.format(name, "decision_function"),
):
classifier.decision_function(X.reshape(-1, 1))
else:
with raises(
ValueError,
err_msg=msg.format(name, "decision_function"),
):
classifier.decision_function(X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert y_prob.shape == (n_samples, n_classes)
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1), np.ones(n_samples))
if not tags["no_validation"]:
# raises error on malformed input for predict_proba
if tags["pairwise"]:
with raises(
ValueError,
err_msg=msg_pairwise.format(name, "predict_proba"),
):
classifier.predict_proba(X.reshape(-1, 1))
else:
with raises(
ValueError,
err_msg=msg.format(name, "predict_proba"),
):
classifier.predict_proba(X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
def check_outlier_corruption(num_outliers, expected_outliers, decision):
# Check for deviation from the precise given contamination level that may
# be due to ties in the anomaly scores.
if num_outliers < expected_outliers:
start = num_outliers
end = expected_outliers + 1
else:
start = expected_outliers
end = num_outliers + 1
# ensure that all values in the 'critical area' are tied,
# leading to the observed discrepancy between provided
# and actual contamination levels.
sorted_decision = np.sort(decision)
msg = (
"The number of predicted outliers is not equal to the expected "
"number of outliers and this difference is not explained by the "
"number of ties in the decision_function values"
)
assert len(np.unique(sorted_decision[start:end])) == 1, msg
def check_outliers_train(name, estimator_orig, readonly_memmap=True):
n_samples = 300
X, _ = make_blobs(n_samples=n_samples, random_state=0)
X = shuffle(X, random_state=7)
if readonly_memmap:
X = create_memmap_backed_data(X)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X)
# with lists
estimator.fit(X.tolist())
y_pred = estimator.predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == "i"
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
decision = estimator.decision_function(X)
scores = estimator.score_samples(X)
for output in [decision, scores]:
assert output.dtype == np.dtype("float")
assert output.shape == (n_samples,)
# raises error on malformed input for predict
with raises(ValueError):
estimator.predict(X.T)
# decision_function agrees with predict
dec_pred = (decision >= 0).astype(int)
dec_pred[dec_pred == 0] = -1
assert_array_equal(dec_pred, y_pred)
# raises error on malformed input for decision_function
with raises(ValueError):
estimator.decision_function(X.T)
# decision_function is a translation of score_samples
y_dec = scores - estimator.offset_
assert_allclose(y_dec, decision)
# raises error on malformed input for score_samples
with raises(ValueError):
estimator.score_samples(X.T)
# contamination parameter (not for OneClassSVM which has the nu parameter)
if hasattr(estimator, "contamination") and not hasattr(estimator, "novelty"):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'. This is true for the training set and cannot thus be
# checked as follows for estimators with a novelty parameter such as
# LocalOutlierFactor (tested in check_outliers_fit_predict)
expected_outliers = 30
contamination = expected_outliers / n_samples
estimator.set_params(contamination=contamination)
estimator.fit(X)
y_pred = estimator.predict(X)
num_outliers = np.sum(y_pred != 1)
# num_outliers should be equal to expected_outliers unless
# there are ties in the decision_function values. this can
# only be tested for estimators with a decision_function
# method, i.e. all estimators except LOF which is already
# excluded from this if branch.
if num_outliers != expected_outliers:
decision = estimator.decision_function(X)
check_outlier_corruption(num_outliers, expected_outliers, decision)
def check_outlier_contamination(name, estimator_orig):
# Check that the contamination parameter is in (0.0, 0.5] when it is an
# interval constraint.
if not hasattr(estimator_orig, "_parameter_constraints"):
# Only estimator implementing parameter constraints will be checked
return
if "contamination" not in estimator_orig._parameter_constraints:
return
contamination_constraints = estimator_orig._parameter_constraints["contamination"]
if not any([isinstance(c, Interval) for c in contamination_constraints]):
raise AssertionError(
"contamination constraints should contain a Real Interval constraint."
)
for constraint in contamination_constraints:
if isinstance(constraint, Interval):
assert (
constraint.type == Real
and constraint.left >= 0.0
and constraint.right <= 0.5
and (constraint.left > 0 or constraint.closed in {"right", "neither"})
), "contamination constraint should be an interval in (0, 0.5]"
@ignore_warnings(category=FutureWarning)
def check_classifiers_multilabel_representation_invariance(name, classifier_orig):
X, y = make_multilabel_classification(
n_samples=100,
n_features=2,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, y_train = X[:80], y[:80]
X_test = X[80:]
y_train_list_of_lists = y_train.tolist()
y_train_list_of_arrays = list(y_train)
classifier = clone(classifier_orig)
set_random_state(classifier)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
y_pred_list_of_lists = classifier.fit(X_train, y_train_list_of_lists).predict(
X_test
)
y_pred_list_of_arrays = classifier.fit(X_train, y_train_list_of_arrays).predict(
X_test
)
assert_array_equal(y_pred, y_pred_list_of_arrays)
assert_array_equal(y_pred, y_pred_list_of_lists)
assert y_pred.dtype == y_pred_list_of_arrays.dtype
assert y_pred.dtype == y_pred_list_of_lists.dtype
assert type(y_pred) == type(y_pred_list_of_arrays)
assert type(y_pred) == type(y_pred_list_of_lists)
@ignore_warnings(category=FutureWarning)
def check_classifiers_multilabel_output_format_predict(name, classifier_orig):
"""Check the output of the `predict` method for classifiers supporting
multilabel-indicator targets."""
classifier = clone(classifier_orig)
set_random_state(classifier)
n_samples, test_size, n_outputs = 100, 25, 5
X, y = make_multilabel_classification(
n_samples=n_samples,
n_features=2,
n_classes=n_outputs,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, X_test = X[:-test_size], X[-test_size:]
y_train, y_test = y[:-test_size], y[-test_size:]
classifier.fit(X_train, y_train)
response_method_name = "predict"
predict_method = getattr(classifier, response_method_name, None)
if predict_method is None:
raise SkipTest(f"{name} does not have a {response_method_name} method.")
y_pred = predict_method(X_test)
# y_pred.shape -> y_test.shape with the same dtype
assert isinstance(y_pred, np.ndarray), (
f"{name}.predict is expected to output a NumPy array. Got "
f"{type(y_pred)} instead."
)
assert y_pred.shape == y_test.shape, (
f"{name}.predict outputs a NumPy array of shape {y_pred.shape} "
f"instead of {y_test.shape}."
)
assert y_pred.dtype == y_test.dtype, (
f"{name}.predict does not output the same dtype than the targets. "
f"Got {y_pred.dtype} instead of {y_test.dtype}."
)
@ignore_warnings(category=FutureWarning)
def check_classifiers_multilabel_output_format_predict_proba(name, classifier_orig):
"""Check the output of the `predict_proba` method for classifiers supporting
multilabel-indicator targets."""
classifier = clone(classifier_orig)
set_random_state(classifier)
n_samples, test_size, n_outputs = 100, 25, 5
X, y = make_multilabel_classification(
n_samples=n_samples,
n_features=2,
n_classes=n_outputs,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, X_test = X[:-test_size], X[-test_size:]
y_train = y[:-test_size]
classifier.fit(X_train, y_train)
response_method_name = "predict_proba"
predict_proba_method = getattr(classifier, response_method_name, None)
if predict_proba_method is None:
raise SkipTest(f"{name} does not have a {response_method_name} method.")
y_pred = predict_proba_method(X_test)
# y_pred.shape -> 2 possibilities:
# - list of length n_outputs of shape (n_samples, 2);
# - ndarray of shape (n_samples, n_outputs).
# dtype should be floating
if isinstance(y_pred, list):
assert len(y_pred) == n_outputs, (
f"When {name}.predict_proba returns a list, the list should "
"be of length n_outputs and contain NumPy arrays. Got length "
f"of {len(y_pred)} instead of {n_outputs}."
)
for pred in y_pred:
assert pred.shape == (test_size, 2), (
f"When {name}.predict_proba returns a list, this list "
"should contain NumPy arrays of shape (n_samples, 2). Got "
f"NumPy arrays of shape {pred.shape} instead of "
f"{(test_size, 2)}."
)
assert pred.dtype.kind == "f", (
f"When {name}.predict_proba returns a list, it should "
"contain NumPy arrays with floating dtype. Got "
f"{pred.dtype} instead."
)
# check that we have the correct probabilities
err_msg = (
f"When {name}.predict_proba returns a list, each NumPy "
"array should contain probabilities for each class and "
"thus each row should sum to 1 (or close to 1 due to "
"numerical errors)."
)
assert_allclose(pred.sum(axis=1), 1, err_msg=err_msg)
elif isinstance(y_pred, np.ndarray):
assert y_pred.shape == (test_size, n_outputs), (
f"When {name}.predict_proba returns a NumPy array, the "
f"expected shape is (n_samples, n_outputs). Got {y_pred.shape}"
f" instead of {(test_size, n_outputs)}."
)
assert y_pred.dtype.kind == "f", (
f"When {name}.predict_proba returns a NumPy array, the "
f"expected data type is floating. Got {y_pred.dtype} instead."
)
err_msg = (
f"When {name}.predict_proba returns a NumPy array, this array "
"is expected to provide probabilities of the positive class "
"and should therefore contain values between 0 and 1."
)
assert_array_less(0, y_pred, err_msg=err_msg)
assert_array_less(y_pred, 1, err_msg=err_msg)
else:
raise ValueError(
f"Unknown returned type {type(y_pred)} by {name}."
"predict_proba. A list or a Numpy array is expected."
)
@ignore_warnings(category=FutureWarning)
def check_classifiers_multilabel_output_format_decision_function(name, classifier_orig):
"""Check the output of the `decision_function` method for classifiers supporting
multilabel-indicator targets."""
classifier = clone(classifier_orig)
set_random_state(classifier)
n_samples, test_size, n_outputs = 100, 25, 5
X, y = make_multilabel_classification(
n_samples=n_samples,
n_features=2,
n_classes=n_outputs,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, X_test = X[:-test_size], X[-test_size:]
y_train = y[:-test_size]
classifier.fit(X_train, y_train)
response_method_name = "decision_function"
decision_function_method = getattr(classifier, response_method_name, None)
if decision_function_method is None:
raise SkipTest(f"{name} does not have a {response_method_name} method.")
y_pred = decision_function_method(X_test)
# y_pred.shape -> y_test.shape with floating dtype
assert isinstance(y_pred, np.ndarray), (
f"{name}.decision_function is expected to output a NumPy array."
f" Got {type(y_pred)} instead."
)
assert y_pred.shape == (test_size, n_outputs), (
f"{name}.decision_function is expected to provide a NumPy array "
f"of shape (n_samples, n_outputs). Got {y_pred.shape} instead of "
f"{(test_size, n_outputs)}."
)
assert y_pred.dtype.kind == "f", (
f"{name}.decision_function is expected to output a floating dtype."
f" Got {y_pred.dtype} instead."
)
@ignore_warnings(category=FutureWarning)
def check_estimators_fit_returns_self(name, estimator_orig, readonly_memmap=False):
"""Check if self is returned when calling fit."""
X, y = make_blobs(random_state=0, n_samples=21)
X = _enforce_estimator_tags_X(estimator_orig, X)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if readonly_memmap:
X, y = create_memmap_backed_data([X, y])
set_random_state(estimator)
assert estimator.fit(X, y) is estimator
@ignore_warnings
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise a NotFittedError.
"""
# Common test for Regressors, Classifiers and Outlier detection estimators
X, y = _regression_dataset()
estimator = clone(estimator_orig)
for method in (
"decision_function",
"predict",
"predict_proba",
"predict_log_proba",
):
if hasattr(estimator, method):
with raises(NotFittedError):
getattr(estimator, method)(X)
@ignore_warnings(category=FutureWarning)
def check_supervised_y_2d(name, estimator_orig):
tags = _safe_tags(estimator_orig)
rnd = np.random.RandomState(0)
n_samples = 30
X = _enforce_estimator_tags_X(estimator_orig, rnd.uniform(size=(n_samples, 3)))
y = np.arange(n_samples) % 3
y = _enforce_estimator_tags_y(estimator_orig, y)
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % ", ".join(
[str(w_x) for w_x in w]
)
if not tags["multioutput"]:
# check that we warned if we don't support multi-output
assert len(w) > 0, msg
assert (
"DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected"
in msg
)
assert_allclose(y_pred.ravel(), y_pred_2d.ravel())
@ignore_warnings
def check_classifiers_predictions(X, y, name, classifier_orig):
classes = np.unique(y)
classifier = clone(classifier_orig)
if name == "BernoulliNB":
X = X > X.mean()
set_random_state(classifier)
classifier.fit(X, y)
y_pred = classifier.predict(X)
if hasattr(classifier, "decision_function"):
decision = classifier.decision_function(X)
assert isinstance(decision, np.ndarray)
if len(classes) == 2:
dec_pred = (decision.ravel() > 0).astype(int)
dec_exp = classifier.classes_[dec_pred]
assert_array_equal(
dec_exp,
y_pred,
err_msg=(
"decision_function does not match "
"classifier for %r: expected '%s', got '%s'"
)
% (
classifier,
", ".join(map(str, dec_exp)),
", ".join(map(str, y_pred)),
),
)
elif getattr(classifier, "decision_function_shape", "ovr") == "ovr":
decision_y = np.argmax(decision, axis=1).astype(int)
y_exp = classifier.classes_[decision_y]
assert_array_equal(
y_exp,
y_pred,
err_msg=(
"decision_function does not match "
"classifier for %r: expected '%s', got '%s'"
)
% (classifier, ", ".join(map(str, y_exp)), ", ".join(map(str, y_pred))),
)
# training set performance
if name != "ComplementNB":
# This is a pathological data set for ComplementNB.
# For some specific cases 'ComplementNB' predicts less classes
# than expected
assert_array_equal(np.unique(y), np.unique(y_pred))
assert_array_equal(
classes,
classifier.classes_,
err_msg="Unexpected classes_ attribute for %r: expected '%s', got '%s'"
% (
classifier,
", ".join(map(str, classes)),
", ".join(map(str, classifier.classes_)),
),
)
def _choose_check_classifiers_labels(name, y, y_names):
# Semisupervised classifiers use -1 as the indicator for an unlabeled
# sample.
return (
y
if name in ["LabelPropagation", "LabelSpreading", "SelfTrainingClassifier"]
else y_names
)
def check_classifiers_classes(name, classifier_orig):
X_multiclass, y_multiclass = make_blobs(
n_samples=30, random_state=0, cluster_std=0.1
)
X_multiclass, y_multiclass = shuffle(X_multiclass, y_multiclass, random_state=7)
X_multiclass = StandardScaler().fit_transform(X_multiclass)
X_binary = X_multiclass[y_multiclass != 2]
y_binary = y_multiclass[y_multiclass != 2]
X_multiclass = _enforce_estimator_tags_X(classifier_orig, X_multiclass)
X_binary = _enforce_estimator_tags_X(classifier_orig, X_binary)
labels_multiclass = ["one", "two", "three"]
labels_binary = ["one", "two"]
y_names_multiclass = np.take(labels_multiclass, y_multiclass)
y_names_binary = np.take(labels_binary, y_binary)
problems = [(X_binary, y_binary, y_names_binary)]
if not _safe_tags(classifier_orig, key="binary_only"):
problems.append((X_multiclass, y_multiclass, y_names_multiclass))
for X, y, y_names in problems:
for y_names_i in [y_names, y_names.astype("O")]:
y_ = _choose_check_classifiers_labels(name, y, y_names_i)
check_classifiers_predictions(X, y_, name, classifier_orig)
labels_binary = [-1, 1]
y_names_binary = np.take(labels_binary, y_binary)
y_binary = _choose_check_classifiers_labels(name, y_binary, y_names_binary)
check_classifiers_predictions(X_binary, y_binary, name, classifier_orig)
@ignore_warnings(category=FutureWarning)
def check_regressors_int(name, regressor_orig):
X, _ = _regression_dataset()
X = _enforce_estimator_tags_X(regressor_orig, X[:50])
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = _enforce_estimator_tags_y(regressor_orig, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = clone(regressor_orig)
regressor_2 = clone(regressor_orig)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(float))
pred2 = regressor_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
@ignore_warnings(category=FutureWarning)
def check_regressors_train(
name, regressor_orig, readonly_memmap=False, X_dtype=np.float64
):
X, y = _regression_dataset()
X = X.astype(X_dtype)
y = scale(y) # X is already scaled
regressor = clone(regressor_orig)
X = _enforce_estimator_tags_X(regressor, X)
y = _enforce_estimator_tags_y(regressor, y)
if name in CROSS_DECOMPOSITION:
rnd = np.random.RandomState(0)
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
if readonly_memmap:
X, y, y_ = create_memmap_backed_data([X, y, y_])
if not hasattr(regressor, "alphas") and hasattr(regressor, "alpha"):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == "PassiveAggressiveRegressor":
regressor.C = 0.01
# raises error on malformed input for fit
with raises(
ValueError,
err_msg=(
f"The classifier {name} does not raise an error when "
"incorrect/malformed input data for fit is passed. The number of "
"training examples is not the same as the number of labels. Perhaps "
"use check_X_y in fit."
),
):
regressor.fit(X, y[:-1])
# fit
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert y_pred.shape == y_.shape
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if not _safe_tags(regressor, key="poor_score"):
assert regressor.score(X, y_) > 0.5
@ignore_warnings
def check_regressors_no_decision_function(name, regressor_orig):
# check that regressors don't have a decision_function, predict_proba, or
# predict_log_proba method.
rng = np.random.RandomState(0)
regressor = clone(regressor_orig)
X = rng.normal(size=(10, 4))
X = _enforce_estimator_tags_X(regressor_orig, X)
y = _enforce_estimator_tags_y(regressor, X[:, 0])
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
assert not hasattr(regressor, func_name)
@ignore_warnings(category=FutureWarning)
def check_class_weight_classifiers(name, classifier_orig):
if _safe_tags(classifier_orig, key="binary_only"):
problems = [2]
else:
problems = [2, 3]
for n_centers in problems:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0
)
# can't use gram_if_pairwise() here, setting up gram matrix manually
if _safe_tags(classifier_orig, key="pairwise"):
X_test = rbf_kernel(X_test, X_train)
X_train = rbf_kernel(X_train, X_train)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
classifier = clone(classifier_orig).set_params(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
if hasattr(classifier, "n_iter_no_change"):
classifier.set_params(n_iter_no_change=20)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# XXX: Generally can use 0.89 here. On Windows, LinearSVC gets
# 0.88 (Issue #9111)
if not _safe_tags(classifier_orig, key="poor_score"):
assert np.mean(y_pred == 0) > 0.87
@ignore_warnings(category=FutureWarning)
def check_class_weight_balanced_classifiers(
name, classifier_orig, X_train, y_train, X_test, y_test, weights
):
classifier = clone(classifier_orig)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight="balanced")
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert f1_score(y_test, y_pred_balanced, average="weighted") > f1_score(
y_test, y_pred, average="weighted"
)
@ignore_warnings(category=FutureWarning)
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
# this is run on classes, not instances, though this should be changed
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, "cv"):
classifier.set_params(cv=3)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight="balanced")
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {
1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes),
}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_allclose(
coef_balanced,
coef_manual,
err_msg="Classifier %s is not computing class_weight=balanced properly." % name,
)
@ignore_warnings(category=FutureWarning)
def check_estimators_overwrite_params(name, estimator_orig):
X, y = make_blobs(random_state=0, n_samples=21)
X = _enforce_estimator_tags_X(estimator_orig, X, kernel=rbf_kernel)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert joblib.hash(new_value) == joblib.hash(original_value), (
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value)
)
@ignore_warnings(category=FutureWarning)
def check_no_attributes_set_in_init(name, estimator_orig):
"""Check setting during init."""
try:
# Clone fails if the estimator does not store
# all parameters as an attribute during init
estimator = clone(estimator_orig)
except AttributeError:
raise AttributeError(
f"Estimator {name} should store all parameters as an attribute during init."
)
if hasattr(type(estimator).__init__, "deprecated_original"):
return
init_params = _get_args(type(estimator).__init__)
if IS_PYPY:
# __init__ signature has additional objects in PyPy
for key in ["obj"]:
if key in init_params:
init_params.remove(key)
parents_init_params = [
param
for params_parent in (_get_args(parent) for parent in type(estimator).__mro__)
for param in params_parent
]
# Test for no setting apart from parameters during init
invalid_attr = set(vars(estimator)) - set(init_params) - set(parents_init_params)
assert not invalid_attr, (
"Estimator %s should not set any attribute apart"
" from parameters during init. Found attributes %s."
% (name, sorted(invalid_attr))
)
@ignore_warnings(category=FutureWarning)
def check_sparsify_coefficients(name, estimator_orig):
X = np.array(
[
[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-1, -2],
[2, 2],
[-2, -2],
]
)
y = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
y = _enforce_estimator_tags_y(estimator_orig, y)
est = clone(estimator_orig)
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert sparse.issparse(est.coef_)
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert sparse.issparse(est.coef_)
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
@ignore_warnings(category=FutureWarning)
def check_classifier_data_not_an_array(name, estimator_orig):
X = np.array(
[
[3, 0],
[0, 1],
[0, 2],
[1, 1],
[1, 2],
[2, 1],
[0, 3],
[1, 0],
[2, 0],
[4, 4],
[2, 3],
[3, 2],
]
)
X = _enforce_estimator_tags_X(estimator_orig, X)
y = np.array([1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2])
y = _enforce_estimator_tags_y(estimator_orig, y)
for obj_type in ["NotAnArray", "PandasDataframe"]:
check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type)
@ignore_warnings(category=FutureWarning)
def check_regressor_data_not_an_array(name, estimator_orig):
X, y = _regression_dataset()
X = _enforce_estimator_tags_X(estimator_orig, X)
y = _enforce_estimator_tags_y(estimator_orig, y)
for obj_type in ["NotAnArray", "PandasDataframe"]:
check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type)
@ignore_warnings(category=FutureWarning)
def check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type):
if name in CROSS_DECOMPOSITION:
raise SkipTest(
"Skipping check_estimators_data_not_an_array "
"for cross decomposition module as estimators "
"are not deterministic."
)
# separate estimators to control random seeds
estimator_1 = clone(estimator_orig)
estimator_2 = clone(estimator_orig)
set_random_state(estimator_1)
set_random_state(estimator_2)
if obj_type not in ["NotAnArray", "PandasDataframe"]:
raise ValueError("Data type {0} not supported".format(obj_type))
if obj_type == "NotAnArray":
y_ = _NotAnArray(np.asarray(y))
X_ = _NotAnArray(np.asarray(X))
else:
# Here pandas objects (Series and DataFrame) are tested explicitly
# because some estimators may handle them (especially their indexing)
# specially.
try:
import pandas as pd
y_ = np.asarray(y)
if y_.ndim == 1:
y_ = pd.Series(y_)
else:
y_ = pd.DataFrame(y_)
X_ = pd.DataFrame(np.asarray(X))
except ImportError:
raise SkipTest(
"pandas is not installed: not checking estimators for pandas objects."
)
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
def check_parameters_default_constructible(name, Estimator):
# test default-constructibility
# get rid of deprecation warnings
Estimator = Estimator.__class__
with ignore_warnings(category=FutureWarning):
estimator = _construct_instance(Estimator)
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert estimator.set_params() is estimator
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, "deprecated_original", estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator."""
return (
p.name != "self"
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL
)
init_params = [
p for p in signature(init).parameters.values() if param_filter(p)
]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
# they can need a non-default argument
init_params = init_params[len(getattr(estimator, "_required_parameters", [])) :]
for init_param in init_params:
assert (
init_param.default != init_param.empty
), "parameter %s for %s has no default value" % (
init_param.name,
type(estimator).__name__,
)
allowed_types = {
str,
int,
float,
bool,
tuple,
type(None),
type,
types.FunctionType,
joblib.Memory,
}
# Any numpy numeric such as np.int32.
allowed_types.update(np.core.numerictypes.allTypes.values())
assert type(init_param.default) in allowed_types, (
f"Parameter '{init_param.name}' of estimator "
f"'{Estimator.__name__}' is of type "
f"{type(init_param.default).__name__} which is not "
"allowed. All init parameters have to be immutable to "
"make cloning possible. Therefore we restrict the set of "
"legal types to "
f"{set(type.__name__ for type in allowed_types)}."
)
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert init_param.default is None, (
f"Estimator parameter '{init_param.name}' of estimator "
f"'{Estimator.__name__}' is not returned by get_params. "
"If it is deprecated, set its default value to None."
)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
failure_text = (
f"Parameter {init_param.name} was mutated on init. All "
"parameters must be stored unchanged."
)
if is_scalar_nan(param_value):
# Allows to set default parameters to np.nan
assert param_value is init_param.default, failure_text
else:
assert param_value == init_param.default, failure_text
def _enforce_estimator_tags_y(estimator, y):
# Estimators with a `requires_positive_y` tag only accept strictly positive
# data
if _safe_tags(estimator, key="requires_positive_y"):
# Create strictly positive y. The minimal increment above 0 is 1, as
# y could be of integer dtype.
y += 1 + abs(y.min())
# Estimators with a `binary_only` tag only accept up to two unique y values
if _safe_tags(estimator, key="binary_only") and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if _safe_tags(estimator, key="multioutput_only"):
return np.reshape(y, (-1, 1))
return y
def _enforce_estimator_tags_X(estimator, X, kernel=linear_kernel):
# Estimators with `1darray` in `X_types` tag only accept
# X of shape (`n_samples`,)
if "1darray" in _safe_tags(estimator, key="X_types"):
X = X[:, 0]
# Estimators with a `requires_positive_X` tag only accept
# strictly positive data
if _safe_tags(estimator, key="requires_positive_X"):
X = X - X.min()
if "categorical" in _safe_tags(estimator, key="X_types"):
X = (X - X.min()).astype(np.int32)
if estimator.__class__.__name__ == "SkewedChi2Sampler":
# SkewedChi2Sampler requires X > -skewdness in transform
X = X - X.min()
# Pairwise estimators only accept
# X of shape (`n_samples`, `n_samples`)
if _is_pairwise_metric(estimator):
X = pairwise_distances(X, metric="euclidean")
elif _safe_tags(estimator, key="pairwise"):
X = kernel(X, X)
return X
@ignore_warnings(category=FutureWarning)
def check_non_transformer_estimators_n_iter(name, estimator_orig):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
# SelfTrainingClassifier does not perform an iteration if all samples are
# labeled, hence n_iter_ = 0 is valid.
not_run_check_n_iter = [
"Ridge",
"RidgeClassifier",
"RandomizedLasso",
"LogisticRegressionCV",
"LinearSVC",
"LogisticRegression",
"SelfTrainingClassifier",
]
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == "LassoLars":
estimator = clone(estimator_orig).set_params(alpha=0.0)
else:
estimator = clone(estimator_orig)
if hasattr(estimator, "max_iter"):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = _enforce_estimator_tags_y(estimator, y_)
set_random_state(estimator, 0)
X = _enforce_estimator_tags_X(estimator_orig, X)
estimator.fit(X, y_)
assert np.all(estimator.n_iter_ >= 1)
@ignore_warnings(category=FutureWarning)
def check_transformer_n_iter(name, estimator_orig):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = clone(estimator_orig)
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [2.0, 5.0, 4.0]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = _enforce_estimator_tags_X(estimator_orig, X)
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert iter_ >= 1
else:
assert estimator.n_iter_ >= 1
@ignore_warnings(category=FutureWarning)
def check_get_params_invariance(name, estimator_orig):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
e = clone(estimator_orig)
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert all(item in deep_params.items() for item in shallow_params.items())
@ignore_warnings(category=FutureWarning)
def check_set_params(name, estimator_orig):
# Check that get_params() returns the same thing
# before and after set_params() with some fuzz
estimator = clone(estimator_orig)
orig_params = estimator.get_params(deep=False)
msg = "get_params result does not match what was passed to set_params"
estimator.set_params(**orig_params)
curr_params = estimator.get_params(deep=False)
assert set(orig_params.keys()) == set(curr_params.keys()), msg
for k, v in curr_params.items():
assert orig_params[k] is v, msg
# some fuzz values
test_values = [-np.inf, np.inf, None]
test_params = deepcopy(orig_params)
for param_name in orig_params.keys():
default_value = orig_params[param_name]
for value in test_values:
test_params[param_name] = value
try:
estimator.set_params(**test_params)
except (TypeError, ValueError) as e:
e_type = e.__class__.__name__
# Exception occurred, possibly parameter validation
warnings.warn(
"{0} occurred during set_params of param {1} on "
"{2}. It is recommended to delay parameter "
"validation until fit.".format(e_type, param_name, name)
)
change_warning_msg = (
"Estimator's parameters changed after set_params raised {}".format(
e_type
)
)
params_before_exception = curr_params
curr_params = estimator.get_params(deep=False)
try:
assert set(params_before_exception.keys()) == set(
curr_params.keys()
)
for k, v in curr_params.items():
assert params_before_exception[k] is v
except AssertionError:
warnings.warn(change_warning_msg)
else:
curr_params = estimator.get_params(deep=False)
assert set(test_params.keys()) == set(curr_params.keys()), msg
for k, v in curr_params.items():
assert test_params[k] is v, msg
test_params[param_name] = default_value
@ignore_warnings(category=FutureWarning)
def check_classifiers_regression_target(name, estimator_orig):
# Check if classifier throws an exception when fed regression targets
X, y = _regression_dataset()
X = _enforce_estimator_tags_X(estimator_orig, X)
e = clone(estimator_orig)
msg = "Unknown label type: "
if not _safe_tags(e, key="no_validation"):
with raises(ValueError, match=msg):
e.fit(X, y)
@ignore_warnings(category=FutureWarning)
def check_decision_proba_consistency(name, estimator_orig):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(
n_samples=100,
random_state=0,
n_features=4,
centers=centers,
cluster_std=1.0,
shuffle=True,
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0
)
estimator = clone(estimator_orig)
if hasattr(estimator, "decision_function") and hasattr(estimator, "predict_proba"):
estimator.fit(X_train, y_train)
# Since the link function from decision_function() to predict_proba()
# is sometimes not precise enough (typically expit), we round to the
# 10th decimal to avoid numerical issues: we compare the rank
# with deterministic ties rather than get platform specific rank
# inversions in case of machine level differences.
a = estimator.predict_proba(X_test)[:, 1].round(decimals=10)
b = estimator.decision_function(X_test).round(decimals=10)
rank_proba, rank_score = rankdata(a), rankdata(b)
try:
assert_array_almost_equal(rank_proba, rank_score)
except AssertionError:
# Sometimes, the rounding applied on the probabilities will have
# ties that are not present in the scores because it is
# numerically more precise. In this case, we relax the test by
# grouping the decision function scores based on the probability
# rank and check that the score is monotonically increasing.
grouped_y_score = np.array(
[b[rank_proba == group].mean() for group in np.unique(rank_proba)]
)
sorted_idx = np.argsort(grouped_y_score)
assert_array_equal(sorted_idx, np.arange(len(sorted_idx)))
def check_outliers_fit_predict(name, estimator_orig):
# Check fit_predict for outlier detectors.
n_samples = 300
X, _ = make_blobs(n_samples=n_samples, random_state=0)
X = shuffle(X, random_state=7)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
y_pred = estimator.fit_predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == "i"
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
# check fit_predict = fit.predict when the estimator has both a predict and
# a fit_predict method. recall that it is already assumed here that the
# estimator has a fit_predict method
if hasattr(estimator, "predict"):
y_pred_2 = estimator.fit(X).predict(X)
assert_array_equal(y_pred, y_pred_2)
if hasattr(estimator, "contamination"):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'
expected_outliers = 30
contamination = float(expected_outliers) / n_samples
estimator.set_params(contamination=contamination)
y_pred = estimator.fit_predict(X)
num_outliers = np.sum(y_pred != 1)
# num_outliers should be equal to expected_outliers unless
# there are ties in the decision_function values. this can
# only be tested for estimators with a decision_function
# method
if num_outliers != expected_outliers and hasattr(
estimator, "decision_function"
):
decision = estimator.decision_function(X)
check_outlier_corruption(num_outliers, expected_outliers, decision)
def check_fit_non_negative(name, estimator_orig):
# Check that proper warning is raised for non-negative X
# when tag requires_positive_X is present
X = np.array([[-1.0, 1], [-1.0, 1]])
y = np.array([1, 2])
estimator = clone(estimator_orig)
with raises(ValueError):
estimator.fit(X, y)
def check_fit_idempotent(name, estimator_orig):
# Check that est.fit(X) is the same as est.fit(X).fit(X). Ideally we would
# check that the estimated parameters during training (e.g. coefs_) are
# the same, but having a universal comparison function for those
# attributes is difficult and full of edge cases. So instead we check that
# predict(), predict_proba(), decision_function() and transform() return
# the same results.
check_methods = ["predict", "transform", "decision_function", "predict_proba"]
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
if "warm_start" in estimator.get_params().keys():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = _enforce_estimator_tags_X(estimator, X)
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
train, test = next(ShuffleSplit(test_size=0.2, random_state=rng).split(X))
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
# Fit for the first time
estimator.fit(X_train, y_train)
result = {
method: getattr(estimator, method)(X_test)
for method in check_methods
if hasattr(estimator, method)
}
# Fit again
set_random_state(estimator)
estimator.fit(X_train, y_train)
for method in check_methods:
if hasattr(estimator, method):
new_result = getattr(estimator, method)(X_test)
if np.issubdtype(new_result.dtype, np.floating):
tol = 2 * np.finfo(new_result.dtype).eps
else:
tol = 2 * np.finfo(np.float64).eps
assert_allclose_dense_sparse(
result[method],
new_result,
atol=max(tol, 1e-9),
rtol=max(tol, 1e-7),
err_msg="Idempotency check failed for method {}".format(method),
)
def check_fit_check_is_fitted(name, estimator_orig):
# Make sure that estimator doesn't pass check_is_fitted before calling fit
# and that passes check_is_fitted once it's fit.
rng = np.random.RandomState(42)
estimator = clone(estimator_orig)
set_random_state(estimator)
if "warm_start" in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = _enforce_estimator_tags_X(estimator, X)
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
if not _safe_tags(estimator).get("stateless", False):
# stateless estimators (such as FunctionTransformer) are always "fit"!
try:
check_is_fitted(estimator)
raise AssertionError(
f"{estimator.__class__.__name__} passes check_is_fitted before being"
" fit!"
)
except NotFittedError:
pass
estimator.fit(X, y)
try:
check_is_fitted(estimator)
except NotFittedError as e:
raise NotFittedError(
"Estimator fails to pass `check_is_fitted` even though it has been fit."
) from e
def check_n_features_in(name, estimator_orig):
# Make sure that n_features_in_ attribute doesn't exist until fit is
# called, and that its value is correct.
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
if "warm_start" in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = _enforce_estimator_tags_X(estimator, X)
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
assert not hasattr(estimator, "n_features_in_")
estimator.fit(X, y)
assert hasattr(estimator, "n_features_in_")
assert estimator.n_features_in_ == X.shape[1]
def check_requires_y_none(name, estimator_orig):
# Make sure that an estimator with requires_y=True fails gracefully when
# given y=None
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = _enforce_estimator_tags_X(estimator, X)
expected_err_msgs = (
"requires y to be passed, but the target y is None",
"Expected array-like (array or non-string sequence), got None",
"y should be a 1d array",
)
try:
estimator.fit(X, None)
except ValueError as ve:
if not any(msg in str(ve) for msg in expected_err_msgs):
raise ve
@ignore_warnings(category=FutureWarning)
def check_n_features_in_after_fitting(name, estimator_orig):
# Make sure that n_features_in are checked after fitting
tags = _safe_tags(estimator_orig)
is_supported_X_types = (
"2darray" in tags["X_types"] or "categorical" in tags["X_types"]
)
if not is_supported_X_types or tags["no_validation"]:
return
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
if "warm_start" in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 150
X = rng.normal(size=(n_samples, 8))
X = _enforce_estimator_tags_X(estimator, X)
if is_regressor(estimator):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
estimator.fit(X, y)
assert estimator.n_features_in_ == X.shape[1]
# check methods will check n_features_in_
check_methods = [
"predict",
"transform",
"decision_function",
"predict_proba",
"score",
]
X_bad = X[:, [1]]
msg = f"X has 1 features, but \\w+ is expecting {X.shape[1]} features as input"
for method in check_methods:
if not hasattr(estimator, method):
continue
callable_method = getattr(estimator, method)
if method == "score":
callable_method = partial(callable_method, y=y)
with raises(ValueError, match=msg):
callable_method(X_bad)
# partial_fit will check in the second call
if not hasattr(estimator, "partial_fit"):
return
estimator = clone(estimator_orig)
if is_classifier(estimator):
estimator.partial_fit(X, y, classes=np.unique(y))
else:
estimator.partial_fit(X, y)
assert estimator.n_features_in_ == X.shape[1]
with raises(ValueError, match=msg):
estimator.partial_fit(X_bad, y)
def check_estimator_get_tags_default_keys(name, estimator_orig):
# check that if _get_tags is implemented, it contains all keys from
# _DEFAULT_KEYS
estimator = clone(estimator_orig)
if not hasattr(estimator, "_get_tags"):
return
tags_keys = set(estimator._get_tags().keys())
default_tags_keys = set(_DEFAULT_TAGS.keys())
assert tags_keys.intersection(default_tags_keys) == default_tags_keys, (
f"{name}._get_tags() is missing entries for the following default tags"
f": {default_tags_keys - tags_keys.intersection(default_tags_keys)}"
)
def check_dataframe_column_names_consistency(name, estimator_orig):
try:
import pandas as pd
except ImportError:
raise SkipTest(
"pandas is not installed: not checking column name consistency for pandas"
)
tags = _safe_tags(estimator_orig)
is_supported_X_types = (
"2darray" in tags["X_types"] or "categorical" in tags["X_types"]
)
if not is_supported_X_types or tags["no_validation"]:
return
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
X_orig = rng.normal(size=(150, 8))
X_orig = _enforce_estimator_tags_X(estimator, X_orig)
n_samples, n_features = X_orig.shape
names = np.array([f"col_{i}" for i in range(n_features)])
X = pd.DataFrame(X_orig, columns=names)
if is_regressor(estimator):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
# Check that calling `fit` does not raise any warnings about feature names.
with warnings.catch_warnings():
warnings.filterwarnings(
"error",
message="X does not have valid feature names",
category=UserWarning,
module="sklearn",
)
estimator.fit(X, y)
if not hasattr(estimator, "feature_names_in_"):
raise ValueError(
"Estimator does not have a feature_names_in_ "
"attribute after fitting with a dataframe"
)
assert isinstance(estimator.feature_names_in_, np.ndarray)
assert estimator.feature_names_in_.dtype == object
assert_array_equal(estimator.feature_names_in_, names)
# Only check sklearn estimators for feature_names_in_ in docstring
module_name = estimator_orig.__module__
if (
module_name.startswith("sklearn.")
and not ("test_" in module_name or module_name.endswith("_testing"))
and ("feature_names_in_" not in (estimator_orig.__doc__))
):
raise ValueError(
f"Estimator {name} does not document its feature_names_in_ attribute"
)
check_methods = []
for method in (
"predict",
"transform",
"decision_function",
"predict_proba",
"score",
"score_samples",
"predict_log_proba",
):
if not hasattr(estimator, method):
continue
callable_method = getattr(estimator, method)
if method == "score":
callable_method = partial(callable_method, y=y)
check_methods.append((method, callable_method))
for _, method in check_methods:
with warnings.catch_warnings():
warnings.filterwarnings(
"error",
message="X does not have valid feature names",
category=UserWarning,
module="sklearn",
)
method(X) # works without UserWarning for valid features
invalid_names = [
(names[::-1], "Feature names must be in the same order as they were in fit."),
(
[f"another_prefix_{i}" for i in range(n_features)],
"Feature names unseen at fit time:\n- another_prefix_0\n-"
" another_prefix_1\n",
),
(
names[:3],
f"Feature names seen at fit time, yet now missing:\n- {min(names[3:])}\n",
),
]
params = {
key: value
for key, value in estimator.get_params().items()
if "early_stopping" in key
}
early_stopping_enabled = any(value is True for value in params.values())
for invalid_name, additional_message in invalid_names:
X_bad = pd.DataFrame(X, columns=invalid_name)
expected_msg = re.escape(
"The feature names should match those that were passed during fit.\n"
f"{additional_message}"
)
for name, method in check_methods:
with raises(
ValueError, match=expected_msg, err_msg=f"{name} did not raise"
):
method(X_bad)
# partial_fit checks on second call
# Do not call partial fit if early_stopping is on
if not hasattr(estimator, "partial_fit") or early_stopping_enabled:
continue
estimator = clone(estimator_orig)
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
with raises(ValueError, match=expected_msg):
estimator.partial_fit(X_bad, y)
def check_transformer_get_feature_names_out(name, transformer_orig):
tags = transformer_orig._get_tags()
if "2darray" not in tags["X_types"] or tags["no_validation"]:
return
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
transformer = clone(transformer_orig)
X = _enforce_estimator_tags_X(transformer, X)
n_features = X.shape[1]
set_random_state(transformer)
y_ = y
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(y), np.asarray(y)]
y_[::2, 1] *= 2
X_transform = transformer.fit_transform(X, y=y_)
input_features = [f"feature{i}" for i in range(n_features)]
# input_features names is not the same length as n_features_in_
with raises(ValueError, match="input_features should have length equal"):
transformer.get_feature_names_out(input_features[::2])
feature_names_out = transformer.get_feature_names_out(input_features)
assert feature_names_out is not None
assert isinstance(feature_names_out, np.ndarray)
assert feature_names_out.dtype == object
assert all(isinstance(name, str) for name in feature_names_out)
if isinstance(X_transform, tuple):
n_features_out = X_transform[0].shape[1]
else:
n_features_out = X_transform.shape[1]
assert (
len(feature_names_out) == n_features_out
), f"Expected {n_features_out} feature names, got {len(feature_names_out)}"
def check_transformer_get_feature_names_out_pandas(name, transformer_orig):
try:
import pandas as pd
except ImportError:
raise SkipTest(
"pandas is not installed: not checking column name consistency for pandas"
)
tags = transformer_orig._get_tags()
if "2darray" not in tags["X_types"] or tags["no_validation"]:
return
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
transformer = clone(transformer_orig)
X = _enforce_estimator_tags_X(transformer, X)
n_features = X.shape[1]
set_random_state(transformer)
y_ = y
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(y), np.asarray(y)]
y_[::2, 1] *= 2
feature_names_in = [f"col{i}" for i in range(n_features)]
df = pd.DataFrame(X, columns=feature_names_in)
X_transform = transformer.fit_transform(df, y=y_)
# error is raised when `input_features` do not match feature_names_in
invalid_feature_names = [f"bad{i}" for i in range(n_features)]
with raises(ValueError, match="input_features is not equal to feature_names_in_"):
transformer.get_feature_names_out(invalid_feature_names)
feature_names_out_default = transformer.get_feature_names_out()
feature_names_in_explicit_names = transformer.get_feature_names_out(
feature_names_in
)
assert_array_equal(feature_names_out_default, feature_names_in_explicit_names)
if isinstance(X_transform, tuple):
n_features_out = X_transform[0].shape[1]
else:
n_features_out = X_transform.shape[1]
assert (
len(feature_names_out_default) == n_features_out
), f"Expected {n_features_out} feature names, got {len(feature_names_out_default)}"
def check_param_validation(name, estimator_orig):
# Check that an informative error is raised when the value of a constructor
# parameter does not have an appropriate type or value.
rng = np.random.RandomState(0)
X = rng.uniform(size=(20, 5))
y = rng.randint(0, 2, size=20)
y = _enforce_estimator_tags_y(estimator_orig, y)
estimator_params = estimator_orig.get_params(deep=False).keys()
# check that there is a constraint for each parameter
if estimator_params:
validation_params = estimator_orig._parameter_constraints.keys()
unexpected_params = set(validation_params) - set(estimator_params)
missing_params = set(estimator_params) - set(validation_params)
err_msg = (
f"Mismatch between _parameter_constraints and the parameters of {name}."
f"\nConsider the unexpected parameters {unexpected_params} and expected but"
f" missing parameters {missing_params}"
)
assert validation_params == estimator_params, err_msg
# this object does not have a valid type for sure for all params
param_with_bad_type = type("BadType", (), {})()
fit_methods = ["fit", "partial_fit", "fit_transform", "fit_predict"]
for param_name in estimator_params:
constraints = estimator_orig._parameter_constraints[param_name]
if constraints == "no_validation":
# This parameter is not validated
continue
match = rf"The '{param_name}' parameter of {name} must be .* Got .* instead."
err_msg = (
f"{name} does not raise an informative error message when the "
f"parameter {param_name} does not have a valid type or value."
)
estimator = clone(estimator_orig)
# First, check that the error is raised if param doesn't match any valid type.
estimator.set_params(**{param_name: param_with_bad_type})
for method in fit_methods:
if not hasattr(estimator, method):
# the method is not accessible with the current set of parameters
continue
with raises(ValueError, match=match, err_msg=err_msg):
if any(
isinstance(X_type, str) and X_type.endswith("labels")
for X_type in _safe_tags(estimator, key="X_types")
):
# The estimator is a label transformer and take only `y`
getattr(estimator, method)(y)
else:
getattr(estimator, method)(X, y)
# Then, for constraints that are more than a type constraint, check that the
# error is raised if param does match a valid type but does not match any valid
# value for this type.
constraints = [make_constraint(constraint) for constraint in constraints]
for constraint in constraints:
try:
bad_value = generate_invalid_param_val(constraint, constraints)
except NotImplementedError:
continue
estimator.set_params(**{param_name: bad_value})
for method in fit_methods:
if not hasattr(estimator, method):
# the method is not accessible with the current set of parameters
continue
with raises(ValueError, match=match, err_msg=err_msg):
if any(
X_type.endswith("labels")
for X_type in _safe_tags(estimator, key="X_types")
):
# The estimator is a label transformer and take only `y`
getattr(estimator, method)(y)
else:
getattr(estimator, method)(X, y)
def check_set_output_transform(name, transformer_orig):
# Check transformer.set_output with the default configuration does not
# change the transform output.
tags = transformer_orig._get_tags()
if "2darray" not in tags["X_types"] or tags["no_validation"]:
return
rng = np.random.RandomState(0)
transformer = clone(transformer_orig)
X = rng.uniform(size=(20, 5))
X = _enforce_estimator_tags_X(transformer_orig, X)
y = rng.randint(0, 2, size=20)
y = _enforce_estimator_tags_y(transformer_orig, y)
set_random_state(transformer)
def fit_then_transform(est):
if name in CROSS_DECOMPOSITION:
return est.fit(X, y).transform(X, y)
return est.fit(X, y).transform(X)
def fit_transform(est):
return est.fit_transform(X, y)
transform_methods = [fit_then_transform, fit_transform]
for transform_method in transform_methods:
transformer = clone(transformer)
X_trans_no_setting = transform_method(transformer)
# Auto wrapping only wraps the first array
if name in CROSS_DECOMPOSITION:
X_trans_no_setting = X_trans_no_setting[0]
transformer.set_output(transform="default")
X_trans_default = transform_method(transformer)
if name in CROSS_DECOMPOSITION:
X_trans_default = X_trans_default[0]
# Default and no setting -> returns the same transformation
assert_allclose_dense_sparse(X_trans_no_setting, X_trans_default)
def check_set_output_transform_pandas(name, transformer_orig):
# Check transformer.set_output configures the output of transform="pandas".
try:
import pandas as pd
except ImportError:
raise SkipTest(
"pandas is not installed: not checking column name consistency for pandas"
)
tags = transformer_orig._get_tags()
if "2darray" not in tags["X_types"] or tags["no_validation"]:
return
rng = np.random.RandomState(0)
transformer = clone(transformer_orig)
X = rng.uniform(size=(20, 5))
X = _enforce_estimator_tags_X(transformer_orig, X)
y = rng.randint(0, 2, size=20)
y = _enforce_estimator_tags_y(transformer_orig, y)
set_random_state(transformer)
feature_names_in = [f"col{i}" for i in range(X.shape[1])]
df = pd.DataFrame(X, columns=feature_names_in)
def fit_then_transform(est):
if name in CROSS_DECOMPOSITION:
return est.fit(df, y).transform(df, y)
return est.fit(df, y).transform(df)
def fit_transform(est):
return est.fit_transform(df, y)
transform_methods = [fit_then_transform, fit_transform]
for transform_method in transform_methods:
transformer = clone(transformer).set_output(transform="default")
X_trans_no_setting = transform_method(transformer)
# Auto wrapping only wraps the first array
if name in CROSS_DECOMPOSITION:
X_trans_no_setting = X_trans_no_setting[0]
transformer.set_output(transform="pandas")
try:
X_trans_pandas = transform_method(transformer)
except ValueError as e:
# transformer does not support sparse data
assert str(e) == "Pandas output does not support sparse data.", e
return
if name in CROSS_DECOMPOSITION:
X_trans_pandas = X_trans_pandas[0]
assert isinstance(X_trans_pandas, pd.DataFrame)
expected_dataframe = pd.DataFrame(
X_trans_no_setting, columns=transformer.get_feature_names_out()
)
pd.testing.assert_frame_equal(X_trans_pandas, expected_dataframe)
|
{
"content_hash": "f6305c933755074ec62d37c2a9241089",
"timestamp": "",
"source": "github",
"line_count": 4223,
"max_line_length": 88,
"avg_line_length": 35.31304759649538,
"alnum_prop": 0.6093396903310601,
"repo_name": "vinayak-mehta/scikit-learn",
"id": "7026159f162879b84961d78337f1f33812de510f",
"size": "149127",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/utils/estimator_checks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "668672"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10468304"
},
{
"name": "Shell",
"bytes": "41758"
}
],
"symlink_target": ""
}
|
import datetime
import uuid
from keystoneauth1 import _utils
from keystoneauth1.fixture import exception
class _Service(dict):
"""One of the services that exist in the catalog.
You use this by adding a service to a token which returns an instance of
this object and then you can add_endpoints to the service.
"""
def add_endpoint(self, interface, url, region=None, id=None):
data = {'id': id or uuid.uuid4().hex,
'interface': interface,
'url': url,
'region': region,
'region_id': region}
self.setdefault('endpoints', []).append(data)
return data
def add_standard_endpoints(self, public=None, admin=None, internal=None,
region=None):
ret = []
if public:
ret.append(self.add_endpoint('public', public, region=region))
if admin:
ret.append(self.add_endpoint('admin', admin, region=region))
if internal:
ret.append(self.add_endpoint('internal', internal, region=region))
return ret
class Token(dict):
"""A V3 Keystone token that can be used for testing.
This object is designed to allow clients to generate a correct V3 token for
use in there test code. It should prevent clients from having to know the
correct token format and allow them to test the portions of token handling
that matter to them and not copy and paste sample.
"""
def __init__(self, expires=None, issued=None, user_id=None, user_name=None,
user_domain_id=None, user_domain_name=None, methods=None,
project_id=None, project_name=None, project_domain_id=None,
project_domain_name=None, domain_id=None, domain_name=None,
trust_id=None, trust_impersonation=None, trustee_user_id=None,
trustor_user_id=None, oauth_access_token_id=None,
oauth_consumer_id=None, audit_id=None, audit_chain_id=None):
super(Token, self).__init__()
self.user_id = user_id or uuid.uuid4().hex
self.user_name = user_name or uuid.uuid4().hex
self.user_domain_id = user_domain_id or uuid.uuid4().hex
self.user_domain_name = user_domain_name or uuid.uuid4().hex
self.audit_id = audit_id or uuid.uuid4().hex
if not methods:
methods = ['password']
self.methods.extend(methods)
if not issued:
issued = _utils.before_utcnow(minutes=2)
try:
self.issued = issued
except (TypeError, AttributeError):
# issued should be able to be passed as a string so ignore
self.issued_str = issued
if not expires:
expires = self.issued + datetime.timedelta(hours=1)
try:
self.expires = expires
except (TypeError, AttributeError):
# expires should be able to be passed as a string so ignore
self.expires_str = expires
if (project_id or project_name or
project_domain_id or project_domain_name):
self.set_project_scope(id=project_id,
name=project_name,
domain_id=project_domain_id,
domain_name=project_domain_name)
if domain_id or domain_name:
self.set_domain_scope(id=domain_id, name=domain_name)
if (trust_id or (trust_impersonation is not None) or
trustee_user_id or trustor_user_id):
self.set_trust_scope(id=trust_id,
impersonation=trust_impersonation,
trustee_user_id=trustee_user_id,
trustor_user_id=trustor_user_id)
if oauth_access_token_id or oauth_consumer_id:
self.set_oauth(access_token_id=oauth_access_token_id,
consumer_id=oauth_consumer_id)
if audit_chain_id:
self.audit_chain_id = audit_chain_id
@property
def root(self):
return self.setdefault('token', {})
@property
def expires_str(self):
return self.root.get('expires_at')
@expires_str.setter
def expires_str(self, value):
self.root['expires_at'] = value
@property
def expires(self):
return _utils.parse_isotime(self.expires_str)
@expires.setter
def expires(self, value):
self.expires_str = value.isoformat()
@property
def issued_str(self):
return self.root.get('issued_at')
@issued_str.setter
def issued_str(self, value):
self.root['issued_at'] = value
@property
def issued(self):
return _utils.parse_isotime(self.issued_str)
@issued.setter
def issued(self, value):
self.issued_str = value.isoformat()
@property
def _user(self):
return self.root.setdefault('user', {})
@property
def user_id(self):
return self._user.get('id')
@user_id.setter
def user_id(self, value):
self._user['id'] = value
@property
def user_name(self):
return self._user.get('name')
@user_name.setter
def user_name(self, value):
self._user['name'] = value
@property
def _user_domain(self):
return self._user.setdefault('domain', {})
@_user_domain.setter
def _user_domain(self, domain):
self._user['domain'] = domain
@property
def user_domain_id(self):
return self._user_domain.get('id')
@user_domain_id.setter
def user_domain_id(self, value):
self._user_domain['id'] = value
@property
def user_domain_name(self):
return self._user_domain.get('name')
@user_domain_name.setter
def user_domain_name(self, value):
self._user_domain['name'] = value
@property
def methods(self):
return self.root.setdefault('methods', [])
@property
def project_id(self):
return self.root.get('project', {}).get('id')
@project_id.setter
def project_id(self, value):
self.root.setdefault('project', {})['id'] = value
@property
def project_name(self):
return self.root.get('project', {}).get('name')
@project_name.setter
def project_name(self, value):
self.root.setdefault('project', {})['name'] = value
@property
def project_domain_id(self):
return self.root.get('project', {}).get('domain', {}).get('id')
@project_domain_id.setter
def project_domain_id(self, value):
project = self.root.setdefault('project', {})
project.setdefault('domain', {})['id'] = value
@property
def project_domain_name(self):
return self.root.get('project', {}).get('domain', {}).get('name')
@project_domain_name.setter
def project_domain_name(self, value):
project = self.root.setdefault('project', {})
project.setdefault('domain', {})['name'] = value
@property
def domain_id(self):
return self.root.get('domain', {}).get('id')
@domain_id.setter
def domain_id(self, value):
self.root.setdefault('domain', {})['id'] = value
@property
def domain_name(self):
return self.root.get('domain', {}).get('name')
@domain_name.setter
def domain_name(self, value):
self.root.setdefault('domain', {})['name'] = value
@property
def trust_id(self):
return self.root.get('OS-TRUST:trust', {}).get('id')
@trust_id.setter
def trust_id(self, value):
self.root.setdefault('OS-TRUST:trust', {})['id'] = value
@property
def trust_impersonation(self):
return self.root.get('OS-TRUST:trust', {}).get('impersonation')
@trust_impersonation.setter
def trust_impersonation(self, value):
self.root.setdefault('OS-TRUST:trust', {})['impersonation'] = value
@property
def trustee_user_id(self):
trust = self.root.get('OS-TRUST:trust', {})
return trust.get('trustee_user', {}).get('id')
@trustee_user_id.setter
def trustee_user_id(self, value):
trust = self.root.setdefault('OS-TRUST:trust', {})
trust.setdefault('trustee_user', {})['id'] = value
@property
def trustor_user_id(self):
trust = self.root.get('OS-TRUST:trust', {})
return trust.get('trustor_user', {}).get('id')
@trustor_user_id.setter
def trustor_user_id(self, value):
trust = self.root.setdefault('OS-TRUST:trust', {})
trust.setdefault('trustor_user', {})['id'] = value
@property
def oauth_access_token_id(self):
return self.root.get('OS-OAUTH1', {}).get('access_token_id')
@oauth_access_token_id.setter
def oauth_access_token_id(self, value):
self.root.setdefault('OS-OAUTH1', {})['access_token_id'] = value
@property
def oauth_consumer_id(self):
return self.root.get('OS-OAUTH1', {}).get('consumer_id')
@oauth_consumer_id.setter
def oauth_consumer_id(self, value):
self.root.setdefault('OS-OAUTH1', {})['consumer_id'] = value
@property
def audit_id(self):
try:
return self.root.get('audit_ids', [])[0]
except IndexError:
return None
@audit_id.setter
def audit_id(self, value):
audit_chain_id = self.audit_chain_id
lval = [value] if audit_chain_id else [value, audit_chain_id]
self.root['audit_ids'] = lval
@property
def audit_chain_id(self):
try:
return self.root.get('audit_ids', [])[1]
except IndexError:
return None
@audit_chain_id.setter
def audit_chain_id(self, value):
self.root['audit_ids'] = [self.audit_id, value]
def validate(self):
project = self.root.get('project')
domain = self.root.get('domain')
trust = self.root.get('OS-TRUST:trust')
catalog = self.root.get('catalog')
roles = self.root.get('roles')
scoped = project or domain or trust
if sum((bool(project), bool(domain), bool(trust))) > 1:
msg = 'You cannot scope to multiple targets'
raise exception.FixtureValidationError(msg)
if catalog and not scoped:
msg = 'You cannot have a service catalog on an unscoped token'
raise exception.FixtureValidationError(msg)
if scoped and not self.user.get('roles'):
msg = 'You must have roles on a token to scope it'
raise exception.FixtureValidationError(msg)
if bool(scoped) != bool(roles):
msg = 'You must be scoped to have roles and vice-versa'
raise exception.FixtureValidationError(msg)
def add_role(self, name=None, id=None):
roles = self.root.setdefault('roles', [])
data = {'id': id or uuid.uuid4().hex,
'name': name or uuid.uuid4().hex}
roles.append(data)
return data
def add_service(self, type, name=None, id=None):
service = _Service(type=type, id=id or uuid.uuid4().hex)
if name:
service['name'] = name
self.root.setdefault('catalog', []).append(service)
return service
def set_project_scope(self, id=None, name=None, domain_id=None,
domain_name=None):
self.project_id = id or uuid.uuid4().hex
self.project_name = name or uuid.uuid4().hex
self.project_domain_id = domain_id or uuid.uuid4().hex
self.project_domain_name = domain_name or uuid.uuid4().hex
def set_domain_scope(self, id=None, name=None):
self.domain_id = id or uuid.uuid4().hex
self.domain_name = name or uuid.uuid4().hex
def set_trust_scope(self, id=None, impersonation=False,
trustee_user_id=None, trustor_user_id=None):
self.trust_id = id or uuid.uuid4().hex
self.trust_impersonation = impersonation
self.trustee_user_id = trustee_user_id or uuid.uuid4().hex
self.trustor_user_id = trustor_user_id or uuid.uuid4().hex
def set_oauth(self, access_token_id=None, consumer_id=None):
self.oauth_access_token_id = access_token_id or uuid.uuid4().hex
self.oauth_consumer_id = consumer_id or uuid.uuid4().hex
@property
def service_providers(self):
return self.root.get('service_providers')
def add_service_provider(self, sp_id, sp_auth_url, sp_url):
_service_providers = self.root.setdefault('service_providers', [])
sp = {'id': sp_id, 'auth_url': sp_auth_url, 'sp_url': sp_url}
_service_providers.append(sp)
return sp
class V3FederationToken(Token):
"""A V3 Keystone Federation token that can be used for testing.
Similar to V3Token, this object is designed to allow clients to generate
a correct V3 federation token for use in test code.
"""
FEDERATED_DOMAIN_ID = 'Federated'
def __init__(self, methods=None, identity_provider=None, protocol=None,
groups=None):
methods = methods or ['saml2']
super(V3FederationToken, self).__init__(methods=methods)
self._user_domain = {'id': V3FederationToken.FEDERATED_DOMAIN_ID}
self.add_federation_info_to_user(identity_provider, protocol, groups)
def add_federation_info_to_user(self, identity_provider=None,
protocol=None, groups=None):
data = {
"OS-FEDERATION": {
"identity_provider": identity_provider or uuid.uuid4().hex,
"protocol": protocol or uuid.uuid4().hex,
"groups": groups or [{"id": uuid.uuid4().hex}]
}
}
self._user.update(data)
return data
|
{
"content_hash": "d037e9000405cf82834dd1d691374d44",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 79,
"avg_line_length": 33.15421686746988,
"alnum_prop": 0.5939385129733266,
"repo_name": "citrix-openstack-build/keystoneauth",
"id": "b5344245c4cb0570be88c5e7717001d21d9188e1",
"size": "14305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystoneauth1/fixture/v3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "393336"
}
],
"symlink_target": ""
}
|
import unittest
import jupyter_kernel_test
class XeusKernelTests(jupyter_kernel_test.KernelTests):
kernel_name = "test_kernel"
language_name = "cpp"
code_hello_world = "hello, world"
code_page_something = "?"
code_execute_result = [
{'code': '6*7', 'result': '6*7'},
{'code': 'test', 'result': 'test'}
]
completion_samples = [
{'text': 'a.', 'matches': ['a.test1', 'a.test2']}
]
complete_code_samples = ["complete"]
incomplete_code_samples = ["incomplete"]
invalid_code_samples = ["invalid"]
code_inspect_sample = "invalid"
def test_xeus_stderr(self):
reply, output_msgs = self.execute_helper(code='error')
self.assertEqual(output_msgs[0]['msg_type'], 'stream')
self.assertEqual(output_msgs[0]['content']['name'], 'stderr')
self.assertEqual(output_msgs[0]['content']['text'], 'error')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "e12f064ebf61627d539e9bd202e543b9",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 69,
"avg_line_length": 26.416666666666668,
"alnum_prop": 0.5951629863301787,
"repo_name": "QuantStack/xeus",
"id": "f681a0e557103c7e1a38eb0ec38b01a1ca83ed66",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_kernel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "216957"
},
{
"name": "CMake",
"bytes": "30413"
},
{
"name": "Python",
"bytes": "2867"
}
],
"symlink_target": ""
}
|
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
def set_optics(v=None):
el = []
pp = []
pp.append([])
return srwlib.SRWLOptC(el, pp)
varParam = srwl_bl.srwl_uti_ext_options([
['name', 's', 'Undulator Radiation', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', 'NSLS-II Low Beta Final', 'standard electron beam name'],
['ebm_nms', 's', '', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_de', 'f', 0.0, 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0.0, 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0.0, 'electron beam initial average vertical position [m]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', -1.54, 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', -1, ''],
['ebm_emx', 'f', -1, ''],
['ebm_emy', 'f', -1, ''],
['ebm_xp', 'f', 0, ''],
['ebm_yp', 'f', 0, ''],
#---Undulator
['und_bx', 'f', 0.0, 'undulator horizontal peak magnetic field [T]'],
['und_by', 'f', 0.88770981, 'undulator vertical peak magnetic field [T]'],
['und_phx', 'f', 0.0, 'initial phase of the horizontal magnetic field [rad]'],
['und_phy', 'f', 0.0, 'initial phase of the vertical magnetic field [rad]'],
['und_b2e', '', '', 'estimate undulator fundamental photon energy (in [eV]) for the amplitude of sinusoidal magnetic field defined by und_b or und_bx, und_by', 'store_true'],
['und_e2b', '', '', 'estimate undulator field amplitude (in [T]) for the photon energy defined by w_e', 'store_true'],
['und_per', 'f', 0.02, 'undulator period [m]'],
['und_len', 'f', 3.0, 'undulator length [m]'],
['und_zc', 'f', 0.0, 'undulator center longitudinal position [m]'],
['und_sx', 'i', 1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
['und_sy', 'i', 1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_g', 'f', 6.72, 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
['und_ph', 'f', 0.0, 'shift of magnet arrays [mm] for which the field should be set up'],
['und_mfs', 's', '', 'name of magnetic measurements for different gaps summary file'],
#---Calculation Types
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 100.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20000.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'],
['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'],
['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'],
['sm_na', 'i', 10, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'],
['sm_ns', 'i', 10, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 9000.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1., 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.0004, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.0006, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 1.0, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 1, 'method to use for calculation of intensity distribution vs horizontal and vertical position'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 100000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
#to add options
['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['source_type', 's', 'u', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---User Defined Electron Beam
['ueb', 'i', 0, 'Use user defined beam'],
['ueb_e', 'f', 3.0, 'energy [GeV]'],
['ueb_sig_e', 'f', 0.00089, 'RMS energy spread'],
['ueb_beam_definition', 's', 't', 'definition of the beam using Twiss Parameters (t) or Moments (m)'],
['ueb_emit_x', 'f', 5.5e-10, 'horizontal emittance [m]'],
['ueb_beta_x', 'f', 2.02, 'horizontal beta-function [m]'],
['ueb_alpha_x', 'f', 0.0, 'horizontal alpha-function [rad]'],
['ueb_eta_x', 'f', 0.0, 'horizontal dispersion function [m]'],
['ueb_eta_x_pr', 'f', 0.0, 'horizontal dispersion function derivative [rad]'],
['ueb_emit_y', 'f', 8e-12, 'vertical emittance [m]'],
['ueb_beta_y', 'f', 1.06, 'vertical beta-function [m]'],
['ueb_alpha_y', 'f', 0.0, 'vertical alpha-function [rad]'],
['ueb_eta_y', 'f', 0.0, 'vertical dispersion function [m]'],
['ueb_eta_y_pr', 'f', 0.0, 'vertical dispersion function derivative [rad]'],
['ueb_rms_size_x', 'f', 0.000372612, "horizontal RMS size [m]"],
['ueb_rms_diverg_x', 'f', 1.04666e-05, "horizontal RMS divergence [rad]"],
['ueb_xxpr_x', 'f', 0.0, "horizontal <(x-<x>)(x'-<x'>)> [m]"],
['ueb_rms_size_y', 'f', 9.87421e-06, "vertical RMS size [m]"],
['ueb_rms_diverg_y', 'f', 3.94968e-06, "vertical RMS divergence [rad]"],
['ueb_xxpr_y', 'f', 0.0, "vertical <(x-<x>)(x'-<x'>)> [m]"],
])
if __name__ == '__main__':
v = srwl_bl.srwl_uti_parse_options(varParam)
source_type, mag = srwl_bl.setup_source(v)
op = None
v.si = True
v.si_pl = 'xy'
srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
|
{
"content_hash": "2cea2bbfae1c95b36c6afe5ac24e12fe",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 440,
"avg_line_length": 86.73979591836735,
"alnum_prop": 0.6757249573554497,
"repo_name": "radiasoft/sirepo",
"id": "5486918274c196a837206f46c5ad6e884dbbafd4",
"size": "17023",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/template/srw_import_data/exported_undulator_radiation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "152"
},
{
"name": "CSS",
"bytes": "65716"
},
{
"name": "HTML",
"bytes": "144600"
},
{
"name": "JavaScript",
"bytes": "3855752"
},
{
"name": "Jinja",
"bytes": "190763"
},
{
"name": "Jupyter Notebook",
"bytes": "1262"
},
{
"name": "Opal",
"bytes": "61806"
},
{
"name": "Perl",
"bytes": "31089"
},
{
"name": "Python",
"bytes": "3022923"
},
{
"name": "SCSS",
"bytes": "29855"
},
{
"name": "Shell",
"bytes": "21259"
}
],
"symlink_target": ""
}
|
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class AvailableAddOnTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.marketplace.available_add_ons("XBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/marketplace/AvailableAddOns/XBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "XBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "VoiceBase High Accuracy Transcription",
"description": "Automatic Transcription and Keyword Extract...",
"pricing_type": "per minute",
"configuration_schema": {
"type": "object",
"properties": {
"bad_words": {
"type": "boolean"
}
},
"required": [
"bad_words"
]
},
"url": "https://preview.twilio.com/marketplace/AvailableAddOns/XBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"extensions": "https://preview.twilio.com/marketplace/AvailableAddOns/XBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Extensions"
}
}
'''
))
actual = self.client.preview.marketplace.available_add_ons("XBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.marketplace.available_add_ons.list()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/marketplace/AvailableAddOns',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"available_add_ons": [
{
"sid": "XBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "VoiceBase High Accuracy Transcription",
"description": "Automatic Transcription and Keyword Extract...",
"pricing_type": "per minute",
"configuration_schema": {
"type": "object",
"properties": {
"bad_words": {
"type": "boolean"
}
},
"required": [
"bad_words"
]
},
"url": "https://preview.twilio.com/marketplace/AvailableAddOns/XBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"extensions": "https://preview.twilio.com/marketplace/AvailableAddOns/XBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Extensions"
}
}
],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://preview.twilio.com/marketplace/AvailableAddOns?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://preview.twilio.com/marketplace/AvailableAddOns?PageSize=50&Page=0",
"next_page_url": null,
"key": "available_add_ons"
}
}
'''
))
actual = self.client.preview.marketplace.available_add_ons.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"available_add_ons": [],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://preview.twilio.com/marketplace/AvailableAddOns?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://preview.twilio.com/marketplace/AvailableAddOns?PageSize=50&Page=0",
"next_page_url": null,
"key": "available_add_ons"
}
}
'''
))
actual = self.client.preview.marketplace.available_add_ons.list()
self.assertIsNotNone(actual)
|
{
"content_hash": "903d22efbd23e0d041b1422efd29ce79",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 144,
"avg_line_length": 37.080882352941174,
"alnum_prop": 0.4824509220701963,
"repo_name": "twilio/twilio-python",
"id": "31f830e66198641e1e3b47fcf01df79380c8c249",
"size": "5058",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/integration/preview/marketplace/test_available_add_on.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
}
|
import datetime
from keystoneclient import exceptions
import requests
from ceilometer.central import plugin
from ceilometer import counter
from ceilometer.openstack.common import log
LOG = log.getLogger(__name__)
class KwapiClient(object):
"""Kwapi API client."""
def __init__(self, url, token=None):
"""Initializes client."""
self.url = url
self.token = token
def iter_probes(self):
"""Returns a list of dicts describing all probes."""
probes_url = self.url + '/probes/'
headers = {}
if self.token is not None:
headers = {'X-Auth-Token': self.token}
request = requests.get(probes_url, headers=headers)
message = request.json
probes = message['probes']
for key, value in probes.iteritems():
probe_dict = value
probe_dict['id'] = key
yield probe_dict
class _Base(plugin.CentralPollster):
"""Base class for the Kwapi pollster, derived from CentralPollster."""
@staticmethod
def get_kwapi_client(ksclient):
"""Returns a KwapiClient configured with the proper url and token."""
endpoint = ksclient.service_catalog.url_for(service_type='energy',
endpoint_type='internalURL'
)
return KwapiClient(endpoint, ksclient.auth_token)
def iter_probes(self, ksclient):
"""Iterate over all probes."""
try:
client = self.get_kwapi_client(ksclient)
except exceptions.EndpointNotFound:
LOG.debug(_("Kwapi endpoint not found"))
return []
return client.iter_probes()
class KwapiPollster(_Base):
"""Kwapi pollster derived from the base class."""
@staticmethod
def get_counter_names():
return ['energy', 'power']
def get_counters(self, manager):
"""Returns all counters."""
for probe in self.iter_probes(manager.keystone):
yield counter.Counter(
name='energy',
type=counter.TYPE_CUMULATIVE,
unit='kWh',
volume=probe['kwh'],
user_id=None,
project_id=None,
resource_id=probe['id'],
timestamp=datetime.datetime.fromtimestamp(
probe['timestamp']).isoformat(),
resource_metadata={}
)
yield counter.Counter(
name='power',
type=counter.TYPE_GAUGE,
unit='W',
volume=probe['w'],
user_id=None,
project_id=None,
resource_id=probe['id'],
timestamp=datetime.datetime.fromtimestamp(
probe['timestamp']).isoformat(),
resource_metadata={}
)
|
{
"content_hash": "7dcf4c295ad98dafa6173f58f619b2a6",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 32.144444444444446,
"alnum_prop": 0.5458002073971656,
"repo_name": "dreamhost/ceilometer",
"id": "a171d6fb3178dc24758a84911173784c7596e6d8",
"size": "3524",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ceilometer/energy/kwapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "368517"
},
{
"name": "Python",
"bytes": "993129"
}
],
"symlink_target": ""
}
|
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
from hamcrest.core.base_matcher import BaseMatcher
import six
class IsEqualIgnoringCase(BaseMatcher):
def __init__(self, string):
if not isinstance(string, six.string_types):
raise TypeError('IsEqualIgnoringCase requires string')
self.original_string = string
self.lowered_string = string.lower()
def _matches(self, item):
if not isinstance(item, six.string_types):
return False
return self.lowered_string == item.lower()
def describe_to(self, description):
description.append_description_of(self.original_string) \
.append_text(' ignoring case')
def equal_to_ignoring_case(string):
"""Matches if object is a string equal to a given string, ignoring case
differences.
:param string: The string to compare against as the expected value.
This matcher first checks whether the evaluated object is a string. If so,
it compares it with ``string``, ignoring differences of case.
Example::
equal_to_ignoring_case("hello world")
will match "heLLo WorlD".
"""
return IsEqualIgnoringCase(string)
|
{
"content_hash": "1a24211824f907a631914b322442e382",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 78,
"avg_line_length": 29.232558139534884,
"alnum_prop": 0.6690533015115354,
"repo_name": "axbaretto/beam",
"id": "d1ee2d17fc34d32a503f3be5e7ea38c6d049f7e2",
"size": "1257",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "sdks/python/.tox/py27gcp/lib/python2.7/site-packages/hamcrest/library/text/isequal_ignoring_case.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "1339873"
},
{
"name": "C++",
"bytes": "1132901"
},
{
"name": "CSS",
"bytes": "124283"
},
{
"name": "Dockerfile",
"bytes": "23950"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2795906"
},
{
"name": "Groovy",
"bytes": "187109"
},
{
"name": "HTML",
"bytes": "238575"
},
{
"name": "Java",
"bytes": "39085315"
},
{
"name": "JavaScript",
"bytes": "1221326"
},
{
"name": "Jupyter Notebook",
"bytes": "7396"
},
{
"name": "Makefile",
"bytes": "354938"
},
{
"name": "Python",
"bytes": "51449019"
},
{
"name": "Roff",
"bytes": "70716"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "351541"
},
{
"name": "TeX",
"bytes": "70920"
},
{
"name": "Thrift",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0029_auto_20200902_1549'),
]
operations = [
migrations.AddField(
model_name='proposedtalkevent',
name='is_remote',
field=models.BooleanField(default=False, verbose_name='is remote'),
),
]
|
{
"content_hash": "5a6d128150c77550744a4e2db4c51cd2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 79,
"avg_line_length": 23.5625,
"alnum_prop": 0.596816976127321,
"repo_name": "pycontw/pycontw2016",
"id": "3a03415f04600c821f11f0d64bfd6f3214d64783",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/events/migrations/0030_proposedtalkevent_is_remote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "236762"
},
{
"name": "HTML",
"bytes": "605550"
},
{
"name": "JavaScript",
"bytes": "24923"
},
{
"name": "Python",
"bytes": "479686"
},
{
"name": "Shell",
"bytes": "389"
}
],
"symlink_target": ""
}
|
'''Various helpers for doing advanced things with dictionaries.'''
import re
def get_fields(s):
'''Return a set of field names referenced as formatting keys in the given
string. I thought there would be an easier way to get this, but I can't
find one. E.g. get_fields('%(hello)s %(world)s') returns
set('hello', 'world').'''
return set(re.findall(r'%\(([^)]+)\)', s))
class Guard(object):
'''Representation of a condition required for some action. See usage in
Template.py.'''
def __init__(self, guard_fn):
self.guard_fn = guard_fn
def __call__(self, arg):
return self.guard_fn(arg)
|
{
"content_hash": "055781f37d46d3e2a56b5a331ff7f591",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 77,
"avg_line_length": 33.63157894736842,
"alnum_prop": 0.6384976525821596,
"repo_name": "smaccm/camkes-tool",
"id": "265a7c1fb9cf36104debffd7953c6bfebcee811e",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "camkes/internal/dictutils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "3407"
},
{
"name": "C",
"bytes": "383162"
},
{
"name": "C++",
"bytes": "740"
},
{
"name": "Isabelle",
"bytes": "242975"
},
{
"name": "Makefile",
"bytes": "38834"
},
{
"name": "Python",
"bytes": "229476"
},
{
"name": "Shell",
"bytes": "3298"
},
{
"name": "VimL",
"bytes": "3143"
}
],
"symlink_target": ""
}
|
"""Tests for model saving."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import saving
from tensorflow.python.keras.engine import training
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import training as training_module
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
def test_weight_loading(self):
with self.test_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
no_extension_path = os.path.join(temp_dir, 'test')
model.save_weights(no_extension_path, save_format='tf')
model.load_weights(no_extension_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
if h5py is None:
return # Skip rest of test if H5py isn't available.
h5_path = os.path.join(temp_dir, 'test.h5')
model.save_weights(h5_path)
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
model.load_weights(h5_path, by_name=True)
y = model.predict(x)
self.assertAllClose(ref_y, y)
model.save_weights(no_extension_path, save_format='hdf5')
model.load_weights(no_extension_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRU(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTM(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = keras.engine.saving.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = keras.engine.saving.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = keras.engine.saving.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
@parameterized.named_parameters(
('gru', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5)
}),
('gru_with_reset_after', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5),
'reset_after': True
}),
('lstm', keras.layers.LSTM, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnngru', keras.layers.CuDNNGRU, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnnlstm', keras.layers.CuDNNLSTM, {
'units': 2,
'input_shape': (3, 5)
}))
def test_preprocess_weights_for_loading_rnn_should_be_idempotent(
self, layer_class, layer_args):
with self.test_session():
layer = layer_class(**layer_args)
layer.build(input_shape=layer_args.get('input_shape'))
weights1 = layer.get_weights()
weights2 = keras.engine.saving.preprocess_weights_for_loading(
layer, weights1)
_ = [
self.assertAllClose(x, y, rtol=1e-05)
for (x, y) in zip(weights1, weights2)
]
def test_sequential_weight_loading(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
def test_sequential_weight_loading_group_name_with_incorrect_length(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with self.test_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
saving.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, use_bias=False,
input_dim=input_dim, name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegexp(ValueError,
r'Layer #0 \(named \"d1\"\) expects 1 '
r'weight\(s\), but the saved weights have 2 '
r'element\(s\)\.'):
saving.load_weights_from_hdf5_group_by_name(f_model, model.layers)
def test_sequential_weight_loading_group_name_with_incorrect_shape(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with self.test_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
saving.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden + 5, input_dim=input_dim,
name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegexp(ValueError,
r'Layer #0 \(named "d1"\), weight '
r'<tf\.Variable \'d1_1\/kernel:0\' '
r'shape=\(3, 10\) dtype=float32> has '
r'shape \(3, 10\), but the saved weight has '
r'shape \(3, 5\)\.'):
saving.load_weights_from_hdf5_group_by_name(f_model, model.layers)
class TestWholeModelSaving(test.TestCase):
def test_sequential_model_saving(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_without_compile(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
# Save the model without any compilation or training.
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(
fname,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
os.close(fd)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_functional_model_saving(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_with_tf_optimizer(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse',
optimizer=training_module.AdadeltaOptimizer(0.1),
metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_right_after_compilation(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
model._make_train_function()
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_lambda_numpy_array_arguments(self):
with self.test_session():
if h5py is None:
self.skipTest('h5py required to run this test')
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = keras.layers.Input(shape=(4, 2, 3))
output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,
arguments={'mu': mean, 'std': std})(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
self.assertAllClose(mean, model.layers[1].arguments['mu'])
self.assertAllClose(std, model.layers[1].arguments['std'])
def test_saving_model_with_long_layer_names(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
# This layer name will make the `layers_name` HDF5 attribute blow
# out of proportion. Note that it fits into the internal HDF5
# attribute memory limit on its own but because h5py converts
# the list of layer names into numpy array, which uses the same
# amout of memory for every item, it increases the memory
# requirements substantially.
x = keras.Input(shape=(2,), name='input_' + ('x' * (2**15)))
f = x
for i in range(4):
f = keras.layers.Dense(2, name='dense_%d' % (i,))(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
# Check that the HDF5 files contains chunked array
# of layer names.
with h5py.File(fname, 'r') as h5file:
num_names_arrays = len([attr for attr in h5file['model_weights'].attrs
if attr.startswith('layer_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_names_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_saving_model_with_long_weights_names(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
x = keras.Input(shape=(2,), name='nested_model_input')
f = x
for i in range(4):
f = keras.layers.Dense(2, name='nested_model_dense_%d' % (i,))(f)
# This layer name will make the `weights_name`
# HDF5 attribute blow out of proportion.
f = keras.layers.Dense(2, name='nested_model_output' + ('x' * (2**14)))(f)
nested_model = keras.Model(inputs=[x], outputs=[f], name='nested_model')
x = keras.Input(shape=(2,), name='outer_model_input')
f = nested_model(x)
f = keras.layers.Dense(2, name='outer_model_output')(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
# Check that the HDF5 files contains chunked array
# of weight names.
with h5py.File(fname, 'r') as h5file:
num_weight_arrays = len(
[attr for attr in h5file['model_weights']['nested_model'].attrs
if attr.startswith('weight_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_weight_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_model_saving_to_pre_created_h5py_file(self):
if h5py is None:
self.skipTest('h5py required to run this test')
with self.test_session():
inputs = keras.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
outputs = keras.layers.Dense(3)(x)
model = keras.Model(inputs, outputs)
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.Adam(),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
with h5py.File(fname, mode='r+') as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Test non-default options in h5
with h5py.File('_', driver='core',
backing_store=False) as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
class SubclassedModel(training.Model):
def __init__(self):
super(SubclassedModel, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.x_layer(a))
class TestWeightSavingAndLoadingTFFormat(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_tensorflow_format_overwrite(self):
with self.test_session() as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
model(x) # pylint: disable=not-callable
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
model.save_weights(prefix, save_format='tensorflow', overwrite=True)
with self.assertRaises(EOFError):
# Indirectly tests that the user is prompted
model.save_weights(prefix, save_format='tensorflow', overwrite=False)
def test_no_default_session(self):
with ops.Graph().as_default():
self.assertFalse(ops.get_default_session())
data = np.random.random((1000, 32)).astype(np.float32)
labels = np.random.random((1000, 10)).astype(np.float32)
model = keras.models.Sequential([
keras.layers.Dense(10, activation='softmax'),
keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer=training_module.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels)
fname = os.path.join(self.get_temp_dir(), 'weights', 'ckpt')
model.save_weights(fname)
model.load_weights(fname)
def test_no_graph_pollution(self):
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.test_session(graph) as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
model(x) # pylint: disable=not-callable
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
op_count = len(graph.get_operations())
model.save_weights(prefix, save_format='tensorflow')
self.assertEqual(len(graph.get_operations()), op_count)
model.load_weights(prefix)
op_count = len(graph.get_operations())
model.load_weights(prefix)
self.assertEqual(len(graph.get_operations()), op_count)
def _weight_loading_test_template(self, make_model_fn):
with self.test_session() as session:
model = make_model_fn()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
ref_y_tensor = model(x)
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
ref_y = self.evaluate(ref_y_tensor)
model.save_weights(prefix, save_format='tf')
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
model.load_weights(prefix)
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
# Test restore-on-create if this is a subclassed Model (graph Networks
# will have already created their variables).
load_model = make_model_fn()
load_model.load_weights(prefix)
restore_on_create_y_tensor = load_model(x)
restore_on_create_y = self.evaluate(restore_on_create_y_tensor)
self.assertAllClose(ref_y, restore_on_create_y)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model(self):
def _make_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
return keras.models.Model(a, b)
self._weight_loading_test_template(_make_graph_model)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_subclassed_model(self):
self._weight_loading_test_template(SubclassedModel)
def _new_layer_weight_loading_test_template(
self, first_model_fn, second_model_fn, restore_init_fn):
with self.test_session() as session:
model = first_model_fn()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
ref_y_tensor = model(x)
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
ref_y = self.evaluate(ref_y_tensor)
model.save_weights(prefix)
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
second_model = second_model_fn()
second_model.load_weights(prefix)
second_model(x)
self.evaluate(restore_init_fn(second_model))
second_model.save_weights(prefix)
# Check that the second model's checkpoint loads into the original model
model.load_weights(prefix)
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model_added_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dense(1, name='second')(x)
b = keras.layers.Dense(3, name='secondjr')(y)
return keras.models.Model(a, b)
def _restore_init_fn(restore_model):
return [v.initializer for v in restore_model.layers[-1].variables]
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model,
_restore_init_fn)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model_added_no_weight_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dropout(rate=0.1)(x)
b = keras.layers.Dense(1, name='second')(y)
return keras.models.Model(a, b)
def _restore_init_fn(restore_model):
del restore_model # unused
return []
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model,
_restore_init_fn)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_subclassed_model_added_layer(self):
class SubclassedModelRestore(training.Model):
def __init__(self):
super(SubclassedModelRestore, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.y_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.y_layer(self.x_layer(a)))
def _restore_init_fn(restore_model):
return [v.initializer for v in restore_model.y_layer.variables]
self._new_layer_weight_loading_test_template(
SubclassedModel, SubclassedModelRestore,
_restore_init_fn)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "a7a4c4a7958cbd7dd735d56adfbe3593",
"timestamp": "",
"source": "github",
"line_count": 848,
"max_line_length": 80,
"avg_line_length": 36.14033018867924,
"alnum_prop": 0.6077919535354195,
"repo_name": "jart/tensorflow",
"id": "030328f2a66f0ec406ac271aecfbf2dbebf22f5f",
"size": "31334",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/engine/saving_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "310149"
},
{
"name": "C++",
"bytes": "44871792"
},
{
"name": "CMake",
"bytes": "206735"
},
{
"name": "Go",
"bytes": "1163781"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "799574"
},
{
"name": "Jupyter Notebook",
"bytes": "2455980"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52050"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99265"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "38792793"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "447966"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
import requests
TRACKING_URI = 'https://ssl.google-analytics.com/collect'
def _request(data, extra_headers):
return requests.post(TRACKING_URI, data=data, headers=extra_headers,
timeout=5.0)
def report(tracking_id, client_id, requestable, extra_info=None,
extra_headers=None):
"""Actually report measurements to Google Analytics."""
return [_request(data, extra_headers)
for data, extra_headers in payloads(
tracking_id, client_id, requestable, extra_info, extra_headers)]
def payloads(tracking_id, client_id, requestable, extra_info=None,
extra_headers=None):
"""Get data and headers of API requests for Google Analytics.
Generates a sequence of (data, headers) pairs. Both `data` and `headers`
are dicts.
"""
extra_payload = {
'v': '1',
'tid': tracking_id,
'cid': client_id,
'aip': '1'}
if extra_info:
for payload in extra_info:
extra_payload.update(payload)
for request_payload in requestable:
final_payload = dict(request_payload)
final_payload.update(extra_payload)
yield final_payload, extra_headers
class Requestable(object):
def get_payload(self):
raise NotImplementedError()
def __iter__(self):
yield self.get_payload()
class SystemInfo(Requestable, namedtuple('SystemInfo', 'language')):
def __new__(cls, language=None):
return super(SystemInfo, cls).__new__(cls, language)
def get_payload(self):
payload = {}
if self.language:
payload['ul'] = self.language
return payload
class PageView(
Requestable,
namedtuple('PageView',
'path host_name location title referrer')):
def __new__(cls, path=None, host_name=None, location=None, title=None,
referrer=None):
return super(PageView, cls).__new__(cls, path, host_name, location,
title, referrer)
def get_payload(self):
payload = {'t': 'pageview'}
if self.location:
payload['dl'] = self.location
if self.host_name:
payload['dh'] = self.host_name
if self.path:
payload['dp'] = self.path
if self.title:
payload['dt'] = self.title
if self.referrer:
payload['dr'] = self.referrer
return payload
class Event(Requestable, namedtuple('Event', 'category action label value')):
def __new__(cls, category, action, label=None, value=None):
return super(Event, cls).__new__(cls, category, action, label, value)
def get_payload(self):
payload = {
't': 'event',
'ec': self.category,
'ea': self.action}
if self.label:
payload['el'] = self.label
if self.value:
payload['ev'] = str(int(self.value))
return payload
class Transaction(
Requestable,
namedtuple('Transaction',
'transaction_id items revenue shipping affiliation')):
def __new__(cls, transaction_id, items, revenue=None, shipping=None,
affiliation=None):
if not items:
raise ValueError('You need to specify at least one item')
return super(Transaction, cls).__new__(
cls, transaction_id, items, revenue, shipping, affiliation)
def get_total(self):
if self.revenue:
return self.revenue
prices = [i.get_subtotal() for i in self.items]
total = sum(prices[1:], prices[0])
if self.shipping:
total += self.shipping
return total
def get_payload(self):
payload = {
't': 'transaction',
'ti': self.transaction_id}
if self.affiliation:
payload['ta'] = self.affiliation
total = self.get_total()
payload['tr'] = str(total.gross)
payload['tt'] = str(total.tax)
payload['cu'] = total.currency
if self.shipping:
payload['ts'] = str(self.shipping.gross)
return payload
def __iter__(self):
yield self.get_payload()
for i in self.items:
yield i.get_payload_for_transaction(self.transaction_id)
class Item(namedtuple('Item', 'name unit_price quantity item_id category')):
def __new__(cls, name, unit_price, quantity=None, item_id=None,
category=None):
return super(Item, cls).__new__(cls, name, unit_price, quantity,
item_id, category)
def get_subtotal(self):
if self.quantity:
return self.unit_price * self.quantity
return self.unit_price
def get_payload_for_transaction(self, transaction_id):
payload = {
't': 'item',
'ti': transaction_id,
'in': self.name}
payload['ip'] = str(self.unit_price.gross)
payload['cu'] = self.unit_price.currency
if self.quantity:
payload['iq'] = str(int(self.quantity))
if self.item_id:
payload['ic'] = self.item_id
if self.category:
payload['iv'] = self.category
return payload
class EnhancedItem(namedtuple('EnhancedItem',
'name unit_price quantity item_id category brand variant')):
def __new__(cls, name, unit_price, quantity=None, item_id=None,
category=None, brand=None, variant=None):
return super(EnhancedItem, cls).__new__(cls, name, unit_price,
quantity, item_id, category,
brand, variant)
def get_subtotal(self):
if self.quantity:
return self.unit_price * self.quantity
return self.unit_price
def get_payload_for_transaction(self, position):
payload = {
'pr{0}ps'.format(position): '{0}'.format(position),
'pr{0}nm'.format(position): self.name,
'pr{0}pr'.format(position): self.unit_price}
quantity = self.quantity or 1
payload['pr{0}qt'.format(position)] = '{0}'.format(quantity)
if self.item_id:
payload['pr{0}id'.format(position)] = self.item_id
if self.category:
payload['pr{0}ca'.format(position)] = self.category
if self.brand:
payload['pr{0}br'.format(position)] = self.brand
if self.variant:
payload['pr{0}va'.format(position)] = self.variant
return payload
class EnhancedPurchase(Requestable,
namedtuple('EnhancedPurchase', 'transaction_id items url_page revenue tax shipping host affiliation coupon')):
def __new__(cls, transaction_id, items, url_page, revenue=None, tax=None,
shipping=None, host=None, affiliation=None, coupon=None):
if not items:
raise ValueError('You need to specify at least one item')
return super(EnhancedPurchase, cls).__new__(cls, transaction_id, items,
url_page, revenue, tax,
shipping, host,
affiliation, coupon)
def get_total(self):
if self.revenue:
return self.revenue
prices = [i.get_subtotal() for i in self.items]
total = sum(prices[1:], prices[0])
if self.shipping:
total += self.shipping
if self.tax:
total += self.tax
return total
def get_payload(self):
payload = {
'pa': 'purchase',
'ti': self.transaction_id,
'dp': self.url_page}
tax = self.tax or 0
payload['tt'] = str(tax)
total = self.get_total()
payload['tr'] = '{0}'.format(total)
if self.shipping:
payload['ts'] = str(self.shipping)
if self.host:
payload['dh'] = self.host
if self.affiliation:
payload['ta'] = self.affiliation
if self.coupon:
payload['tcc'] = self.coupon
return payload
def __iter__(self):
event = Event('ecommerce', 'purchase')
yield event.get_payload()
to_return = self.get_payload()
for i in range(len(self.items)):
to_return.update(self.items[i].get_payload_for_transaction(i + 1))
yield to_return
|
{
"content_hash": "ee0152bcf1936ef9ee86bf1dba7ebf25",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 133,
"avg_line_length": 33.33203125,
"alnum_prop": 0.5568967537794445,
"repo_name": "hadynz/plugin.video.altv",
"id": "62efff1012d8b0f1f6a94f167a26edc9ab2ba0e0",
"size": "8533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/lib/googlemeasurementprotocol/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23753"
}
],
"symlink_target": ""
}
|
import subprocess
import os
import tempfile
import json
from git import Repository
from config import config
def apply_roles(roles, services, ports, host, repository, extra_vars=None, tags=None, skip=None):
"""
:type roles: list[str]
:type host: str
:type repository: Repository
:type extra_vars: dict
:type tags: list[str]
:type skip: list[str]
:rtype: int
"""
# TODO: inject repo path and add .dork directory
# Create the temporary inventory
inventory = tempfile.NamedTemporaryFile(delete=False)
if config.docker_connect == 'yes':
inventory.write("%s ansible_connection=docker" % host + '\n')
else:
inventory.write("%s ansible_ssh_user=root" % host + '\n')
inventory.close()
# Create the temporary playbook
playbook = tempfile.NamedTemporaryFile(delete=False)
if not extra_vars:
extra_vars = {}
extra_vars['dork_services'] = services
extra_vars['dork_ports'] = ports
pblines = ['- hosts: all', ' roles:']
for role in roles:
pblines.append(' - { role: %s, tags:[\'%s\'] }' % (role, role))
playbook.write('\n'.join(pblines) + '\n')
playbook.close()
result = run_playbook(inventory.name, playbook.name, repository, extra_vars, tags, skip)
# Unlink temporary files
os.unlink(inventory.name)
os.unlink(playbook.name)
return result
def run_playbook(inventory, playbook, repository, extra_vars=None, tags=None, skip=None):
"""
:type inventory: str
:type playbook: str
:type repository: Repository
:type extra_vars: dict
:type tags: list[str]
:type skip: list[str]
:return:
"""
command = ['ansible-playbook', '-i', inventory, playbook]
# Process extra variables if provided
variables = tempfile.NamedTemporaryFile(delete=False)
if extra_vars:
json.dump(extra_vars, variables)
variables.close()
command.append('--extra-vars')
command.append("@%s" % variables.name)
# Add --tags flag if available.
if tags:
command.append('--tags')
command.append(','.join([ t for t in tags if t != 'default' ]))
# Add --skip-tags flag if available.
if skip:
command.append('--skip-tags')
command.append(','.join([s for s in skip if s != 'default' ]))
# Run ansible
ansible_library = config.ansible_roles_path
project_library = repository.directory + '/.dork'
if os.path.isdir(project_library):
ansible_library.append(project_library)
environment = os.environ.copy()
environment['ANSIBLE_ROLES_PATH'] = ':'.join(ansible_library)
result = subprocess.call(' '.join(command), shell=True, env=environment)
os.unlink(variables.name)
return result
|
{
"content_hash": "5b7deb4b9aff2293db7eea57c1f5af77",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 97,
"avg_line_length": 30.633333333333333,
"alnum_prop": 0.6409140369967355,
"repo_name": "iamdork/dork",
"id": "f9b32de8df0159f418d8519e7f951ca1a7eec333",
"size": "2757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dork/runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105401"
}
],
"symlink_target": ""
}
|
"""Print classes which have a virtual method and non-virtual destructor."""
from __future__ import print_function
from __future__ import unicode_literals
from . import ast
from . import metrics
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
def _find_warnings(filename, source, ast_list):
count = 0
for node in ast_list:
if isinstance(node, ast.Class) and node.body:
class_node = node
has_virtuals = False
for node in node.body:
if isinstance(node, ast.Class) and node.body:
_find_warnings(filename, source, [node])
elif (isinstance(node, ast.Function) and
node.modifiers & ast.FUNCTION_VIRTUAL):
has_virtuals = True
if node.modifiers & ast.FUNCTION_DTOR:
break
else:
if has_virtuals and not class_node.bases:
lines = metrics.Metrics(source)
print(
'%s:%d' % (
filename,
lines.get_line_number(
class_node.start)),
end=' ')
print("'{}' has virtual methods without a virtual "
'dtor'.format(class_node.name))
count += 1
return count
def run(filename, source, entire_ast, include_paths, quiet):
return _find_warnings(filename, source, entire_ast)
|
{
"content_hash": "0b3a474dce335db43f735af66aa16119",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 75,
"avg_line_length": 34.63636363636363,
"alnum_prop": 0.5085301837270341,
"repo_name": "susundberg/esp8266-waterpump",
"id": "b1dbd0392b264747637b1e4514ffee240dd7d38c",
"size": "2101",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/tools/cpp/nonvirtual_dtors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "347734"
},
{
"name": "C++",
"bytes": "560623"
},
{
"name": "HTML",
"bytes": "2359"
},
{
"name": "JavaScript",
"bytes": "441"
},
{
"name": "Makefile",
"bytes": "425"
},
{
"name": "Python",
"bytes": "123097"
},
{
"name": "Shell",
"bytes": "1326"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.db import models
class Board(models.Model):
year = models.IntegerField()
officers = models.TextField()
class Meta:
ordering = ("year",)
get_latest_by = "year"
verbose_name = _("Board")
verbose_name_plural = _("Boards")
def __unicode__(self):
return _("Board %(year)s") % {"year": self.year}
ROLE_CHOICES = (
(0, _("Chairman")),
(1, _("Treasurer")),
(2, _("Secretary")),
(3, _("Vice chairman")),
(4, _("Board Member")),
(5, _("Study affairs")),
(6, _("Communications")),
(7, _("Corporate affairs")),
(8, _("RV affairs")),
(9, _("Freshman affairs")),
(20, _("First deputy board member")),
(21, _("Second deputy board member")),
(22, _("Third deputy board member")),
(23, _("Fourth deputy board member")),
)
class BoardMember(models.Model):
board = models.ForeignKey(Board)
name = models.CharField(max_length=100, verbose_name=_("Name"))
face = models.ImageField(upload_to="board_faces/%Y/", verbose_name=_("Mugshot"))
role = models.IntegerField(choices=ROLE_CHOICES, verbose_name=_("Role"))
contact = models.CharField(max_length=100, default=_("firstname.lastname(at)cs.helsinki.fi"), verbose_name=_("Contact"))
def __unicode__(self):
return self.name
class Meta:
ordering = ("role",)
verbose_name = _("Board Member")
verbose_name_plural = _("Board Members")
|
{
"content_hash": "65121b5dc4505c5fd55b0e10077c60d8",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 124,
"avg_line_length": 29.76923076923077,
"alnum_prop": 0.5936692506459949,
"repo_name": "hylje/tekis",
"id": "698813e597edab86d4171a4c2b32fded8eb9d49e",
"size": "1548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tekis/board/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10624"
},
{
"name": "HTML",
"bytes": "19260"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "48440"
}
],
"symlink_target": ""
}
|
import json
import numpy as np
from inversetoon.data.contour import Contour
from inversetoon.data.data import Data
from inversetoon.data.segment import IsophoteSegment
## Curve data class.
#
# Attributes:
# * cvs: list of control vertices (n x 2 numpy.array).
# * segments_cvIDs: list of segment_cvIDs.
# - segment_cvIDs: list of control vertex IDs in the segment.
class Curve(Data):
## Constructor
def __init__(self, cvs=[], segments_cvIDs=[]):
self._cvs = np.array(cvs)
self._segments_cvIDs = segments_cvIDs
def numCVs(self):
return len(self._cvs)
def CVs(self):
return self._cvs
def setCVs(self, cvs):
self._cvs = cvs
def segmentsCVIDs(self):
return self._segments_cvIDs
def setSegmentsCVIDs(self, segments_cvIDs):
self._segments_cvIDs = segments_cvIDs
def setCurve(self, curve):
self._cvs = curve._cvs
self._segments_cvIDs = curve._segments_cvIDs
def setContour(self, contour):
cvs = []
self._segments_cvIDs = []
si = 0
for p_segment in contour.segments():
segment_cvIDs = []
for p in p_segment:
cvs.append(p)
segment_cvIDs.append(si)
si += 1
self._segments_cvIDs.append(segment_cvIDs)
self._cvs = np.array(cvs)
self.setClosing(contour.closing())
def contour(self):
cvs = self._cvs
segments_cvIDs = self._segments_cvIDs
contour_segments = []
for segment_cvIDs in segments_cvIDs:
coutour_segment = cvs[segment_cvIDs]
contour_segments.append(np.array(coutour_segment))
return Contour(contour_segments, self.isClosing())
def setClosing(self, closing=True):
if closing:
if self._segments_cvIDs[0][0] != self._segments_cvIDs[-1][-1]:
self._segments_cvIDs[-1].append(self._segments_cvIDs[0][0])
else:
if len(self._segments_cvIDs) == 0:
return
if self._segments_cvIDs[0][0] == self._segments_cvIDs[-1][-1]:
self._segments_cvIDs[-1] = self._segments_cvIDs[-1][:-1]
def isClosing(self):
return self._segments_cvIDs[0][0] == self._segments_cvIDs[-1][-1]
#################
# Data IO
#################
## dictionary data for writeJson method.
def _dataDict(self):
data = {"cvs": self._cvs.tolist(), "segments": self._segments_cvIDs}
return data
## set dictionary data for loadJson method.
def _setDataDict(self, data):
self._cvs = np.array(data["cvs"])
self._segments_cvIDs = data["segments"]
## Curve data with normals.
#
# Attributes:
# * cvs
# * segments_cvIDs
# * normals: n x 3 normal data.
class NormalCurve(Curve):
## Constructor
def __init__(self, cvs=[], segments_cvIDs=[], normals=[]):
super(NormalCurve, self).__init__(cvs, segments_cvIDs)
self._normals = np.array(normals)
def normals(self):
return self._normals
def setNormals(self, normals):
self._normals = normals
def setNormalImage(self, N_32F):
normals = []
for cv in self._cvs:
normals.append(N_32F[int(cv[1])][int(cv[0])])
self._normals = np.array(normals)
def toCurveSegments(self):
segments = []
cvs = self._cvs
normals = self._normals
for cv_ids in self._segments_cvIDs:
segment = IsophoteSegment(cvs[cv_ids], cv_ids)
segment.setNormals(normals[cv_ids])
segments.append(segment)
return segments
#################
# Data IO
#################
def _dataDict(self):
data = super(NormalCurve, self)._dataDict()
data["normals"] = self._normals.tolist()
return data
def _setDataDict(self, data):
super(NormalCurve, self)._setDataDict(data)
self._normals = np.array(data["normals"])
## Isophote curve data class.
#
# Attributes:
# * cvs
# * segments_cvIDs
# * normals
# * L: light direction.
# * iso_value: luminance value for the isophote.
# * silhouette_cvIDs: control vertex IDs of silhoutte vertices.
class IsophoteCurve(NormalCurve):
## Constructor
def __init__(self, cvs=[], segments=[], normals=[], L=np.array([0, 0, 1]),
iso_value=0, silhouette_cvIDs=[]):
super(IsophoteCurve, self).__init__(cvs, segments, normals)
self._L = L
self._iso_value = iso_value
self._silhouette_cvIDs = silhouette_cvIDs
def LightDir(self):
return self._L
def setLightDir(self, L):
self._L = L
def isoValue(self):
return self._iso_value
def setIsoValue(self, iso_value):
self._iso_value = iso_value
def silhouetteCVIDs(self):
return self._silhouette_cvIDs
def setSilhouetteCVIDs(self, silhouetteCVIDs):
self._silhouette_cvIDs = sorted(list(set(silhouetteCVIDs)))
def setSilhouetteMask(self, S_8U):
cvs = self.CVs()
segments = self.segmentsCVIDs()
silhouetteCVIDs = []
for segment in segments:
for cvID in segment:
p = cvs[cvID]
if S_8U[p[1], p[0]] == 255:
silhouetteCVIDs.append(cvID)
self.setSilhouetteCVIDs(silhouetteCVIDs)
def toCurveSegments(self):
segments = []
cvs = self._cvs
normals = self._normals
for cv_ids in self._segments_cvIDs:
segment = IsophoteSegment(cvs[cv_ids], cv_ids)
segment.setLightDir(self._L)
segment.setIsoValue(self._iso_value)
segment.setNormals(normals[cv_ids])
segments.append(segment)
return segments
#################
# Data IO
#################
def _dataDict(self):
data = super(IsophoteCurve, self)._dataDict()
data["L"] = self._L.tolist()
data["isoValue"] = self._iso_value
data["silhouetteCVIDs"] = list(self._silhouette_cvIDs)
return data
def _setDataDict(self, data):
super(IsophoteCurve, self)._setDataDict(data)
self._L = np.array(data["L"])
self._iso_value = data["isoValue"]
self._silhouette_cvIDs = data["silhouetteCVIDs"]
|
{
"content_hash": "7317be0c6f8d19a928e0818946acb51b",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 78,
"avg_line_length": 27.624454148471617,
"alnum_prop": 0.5758773316471704,
"repo_name": "tody411/InverseToon",
"id": "04af59769f55ed2a2c3c4c609b17fcdede94a847",
"size": "6460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inversetoon/data/curve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113973"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import os.path
import sys
from unittest import mock
import pytest
import pre_commit.constants as C
from pre_commit.envcontext import envcontext
from pre_commit.languages import python
from pre_commit.prefix import Prefix
from pre_commit.util import make_executable
from pre_commit.util import win_exe
def test_read_pyvenv_cfg(tmpdir):
pyvenv_cfg = tmpdir.join('pyvenv.cfg')
pyvenv_cfg.write(
'# I am a comment\n'
'\n'
'foo = bar\n'
'version-info=123\n',
)
expected = {'foo': 'bar', 'version-info': '123'}
assert python._read_pyvenv_cfg(pyvenv_cfg) == expected
def test_read_pyvenv_cfg_non_utf8(tmpdir):
pyvenv_cfg = tmpdir.join('pyvenv_cfg')
pyvenv_cfg.write_binary('hello = hello john.š\n'.encode())
expected = {'hello': 'hello john.š'}
assert python._read_pyvenv_cfg(pyvenv_cfg) == expected
def test_norm_version_expanduser():
home = os.path.expanduser('~')
if os.name == 'nt': # pragma: nt cover
path = r'~\python343'
expected_path = fr'{home}\python343'
else: # pragma: nt no cover
path = '~/.pyenv/versions/3.4.3/bin/python'
expected_path = f'{home}/.pyenv/versions/3.4.3/bin/python'
result = python.norm_version(path)
assert result == expected_path
def test_norm_version_of_default_is_sys_executable():
assert python.norm_version('default') is None
@pytest.mark.parametrize('v', ('python3.9', 'python3', 'python'))
def test_sys_executable_matches(v):
with mock.patch.object(sys, 'version_info', (3, 9, 10)):
assert python._sys_executable_matches(v)
assert python.norm_version(v) is None
@pytest.mark.parametrize('v', ('notpython', 'python3.x'))
def test_sys_executable_matches_does_not_match(v):
with mock.patch.object(sys, 'version_info', (3, 9, 10)):
assert not python._sys_executable_matches(v)
@pytest.mark.parametrize(
('exe', 'realpath', 'expected'), (
('/usr/bin/python3', '/usr/bin/python3.7', 'python3'),
('/usr/bin/python', '/usr/bin/python3.7', 'python3.7'),
('/usr/bin/python', '/usr/bin/python', None),
('/usr/bin/python3.7m', '/usr/bin/python3.7m', 'python3.7m'),
('v/bin/python', 'v/bin/pypy', 'pypy'),
),
)
def test_find_by_sys_executable(exe, realpath, expected):
with mock.patch.object(sys, 'executable', exe):
with mock.patch.object(os.path, 'realpath', return_value=realpath):
with mock.patch.object(python, 'find_executable', lambda x: x):
assert python._find_by_sys_executable() == expected
@pytest.fixture
def python_dir(tmpdir):
with tmpdir.as_cwd():
prefix = tmpdir.join('prefix').ensure_dir()
prefix.join('setup.py').write('import setuptools; setuptools.setup()')
prefix = Prefix(str(prefix))
yield prefix, tmpdir
def test_healthy_default_creator(python_dir):
prefix, tmpdir = python_dir
python.install_environment(prefix, C.DEFAULT, ())
# should be healthy right after creation
assert python.health_check(prefix, C.DEFAULT) is None
# even if a `types.py` file exists, should still be healthy
tmpdir.join('types.py').ensure()
assert python.health_check(prefix, C.DEFAULT) is None
def test_healthy_venv_creator(python_dir):
# venv creator produces slightly different pyvenv.cfg
prefix, tmpdir = python_dir
with envcontext((('VIRTUALENV_CREATOR', 'venv'),)):
python.install_environment(prefix, C.DEFAULT, ())
assert python.health_check(prefix, C.DEFAULT) is None
def test_unhealthy_python_goes_missing(python_dir):
prefix, tmpdir = python_dir
python.install_environment(prefix, C.DEFAULT, ())
exe_name = win_exe('python')
py_exe = prefix.path(python.bin_dir('py_env-default'), exe_name)
os.remove(py_exe)
ret = python.health_check(prefix, C.DEFAULT)
assert ret == (
f'virtualenv python version did not match created version:\n'
f'- actual version: <<error retrieving version from {py_exe}>>\n'
f'- expected version: {python._version_info(sys.executable)}\n'
)
def test_unhealthy_with_version_change(python_dir):
prefix, tmpdir = python_dir
python.install_environment(prefix, C.DEFAULT, ())
with open(prefix.path('py_env-default/pyvenv.cfg'), 'a+') as f:
f.write('version_info = 1.2.3\n')
ret = python.health_check(prefix, C.DEFAULT)
assert ret == (
f'virtualenv python version did not match created version:\n'
f'- actual version: {python._version_info(sys.executable)}\n'
f'- expected version: 1.2.3\n'
)
def test_unhealthy_system_version_changes(python_dir):
prefix, tmpdir = python_dir
python.install_environment(prefix, C.DEFAULT, ())
with open(prefix.path('py_env-default/pyvenv.cfg'), 'a') as f:
f.write('base-executable = /does/not/exist\n')
ret = python.health_check(prefix, C.DEFAULT)
assert ret == (
f'base executable python version does not match created version:\n'
f'- base-executable version: <<error retrieving version from /does/not/exist>>\n' # noqa: E501
f'- expected version: {python._version_info(sys.executable)}\n'
)
def test_unhealthy_old_virtualenv(python_dir):
prefix, tmpdir = python_dir
python.install_environment(prefix, C.DEFAULT, ())
# simulate "old" virtualenv by deleting this file
os.remove(prefix.path('py_env-default/pyvenv.cfg'))
ret = python.health_check(prefix, C.DEFAULT)
assert ret == 'pyvenv.cfg does not exist (old virtualenv?)'
def test_unhealthy_unexpected_pyvenv(python_dir):
prefix, tmpdir = python_dir
python.install_environment(prefix, C.DEFAULT, ())
# simulate a buggy environment build (I don't think this is possible)
with open(prefix.path('py_env-default/pyvenv.cfg'), 'w'):
pass
ret = python.health_check(prefix, C.DEFAULT)
assert ret == "created virtualenv's pyvenv.cfg is missing `version_info`"
def test_unhealthy_then_replaced(python_dir):
prefix, tmpdir = python_dir
python.install_environment(prefix, C.DEFAULT, ())
# simulate an exe which returns an old version
exe_name = win_exe('python')
py_exe = prefix.path(python.bin_dir('py_env-default'), exe_name)
os.rename(py_exe, f'{py_exe}.tmp')
with open(py_exe, 'w') as f:
f.write('#!/usr/bin/env bash\necho 1.2.3\n')
make_executable(py_exe)
# should be unhealthy due to version mismatch
ret = python.health_check(prefix, C.DEFAULT)
assert ret == (
f'virtualenv python version did not match created version:\n'
f'- actual version: 1.2.3\n'
f'- expected version: {python._version_info(sys.executable)}\n'
)
# now put the exe back and it should be healthy again
os.replace(f'{py_exe}.tmp', py_exe)
assert python.health_check(prefix, C.DEFAULT) is None
|
{
"content_hash": "9d8e7e10a9900a22210a59700b6ff278",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 103,
"avg_line_length": 32.70754716981132,
"alnum_prop": 0.6580617248341506,
"repo_name": "pre-commit/pre-commit",
"id": "54fb98feb5fcd9618b91646f8cfaccda1d4fd931",
"size": "6936",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/languages/python_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "753"
},
{
"name": "Dart",
"bytes": "142"
},
{
"name": "Dockerfile",
"bytes": "508"
},
{
"name": "Go",
"bytes": "240"
},
{
"name": "JavaScript",
"bytes": "128"
},
{
"name": "Lua",
"bytes": "513"
},
{
"name": "Perl",
"bytes": "532"
},
{
"name": "PowerShell",
"bytes": "744"
},
{
"name": "Python",
"bytes": "511310"
},
{
"name": "R",
"bytes": "24268"
},
{
"name": "Ruby",
"bytes": "829"
},
{
"name": "Rust",
"bytes": "56"
},
{
"name": "Shell",
"bytes": "3952"
},
{
"name": "Swift",
"bytes": "181"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtCore, QtGui
app = QtGui.QApplication([''])
mw = QtGui.QMainWindow() # mw = MainWindow
mw.setCentralWidget(None)
mw.showMaximized()
mw.dockWdg1 = QtGui.QDockWidget(mw)
mw.content1 = QtGui.QTreeWidget()
mw.dockWdg1.setWidget(mw.content1)
mw.addDockWidget(QtCore.Qt.TopDockWidgetArea, mw.dockWdg1)
mw.dockWdg1.setWindowTitle("1st dock widget")
mw.dockWdg2 = QtGui.QDockWidget(mw)
mw.content2 = QtGui.QTreeWidget()
mw.dockWdg2.setWidget(mw.content2)
mw.addDockWidget(QtCore.Qt.RightDockWidgetArea, mw.dockWdg2)
mw.dockWdg2.setWindowTitle("2nd dock widget")
mw.dockWdg3 = QtGui.QDockWidget(mw)
mw.content3 = QtGui.QTreeWidget()
mw.dockWdg3.setWidget(mw.content3)
mw.addDockWidget(QtCore.Qt.BottomDockWidgetArea, mw.dockWdg3)
mw.dockWdg3.setWindowTitle("3rd dock widget")
mw.show()
app.exec_()
|
{
"content_hash": "f53fcb1eae757cb27a768b0f53ade095",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 61,
"avg_line_length": 29.071428571428573,
"alnum_prop": 0.7874692874692875,
"repo_name": "dls-controls/qt_python_dock",
"id": "9554620e3b39df0e7cc5977b5680cede52f65b8d",
"size": "836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docktest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3692"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django_token.models import Token
admin.site.register(Token)
|
{
"content_hash": "782bebcb8712b74868c7196ab38b5f9d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 37,
"avg_line_length": 20,
"alnum_prop": 0.82,
"repo_name": "jasonbeverage/django-token",
"id": "97aba63d5813daade2383daa5bb2751f56553016",
"size": "100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_token/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "148"
},
{
"name": "Python",
"bytes": "5164"
}
],
"symlink_target": ""
}
|
from numba import njit, types
from numba.tests.gdb_support import GdbMIDriver
from numba.tests.support import TestCase, needs_subprocess
import unittest
@njit(debug=True)
def foo(x):
z = 7 + x
return x, z
@needs_subprocess
class Test(TestCase):
def test(self):
foo(120)
sz = types.intp.bitwidth
driver = GdbMIDriver(__file__)
driver.set_breakpoint(symbol="__main__::foo_241")
driver.run() # will hit cpython symbol match
driver.check_hit_breakpoint(number=1)
driver.cont() # will hit njit symbol match
driver.check_hit_breakpoint(number=1, line=10) # Ensure line number
driver.stack_list_arguments(2)
expect = ('[frame={level="0",args=[{name="x",type="int%s",'
'value="120"}]}]' % sz)
driver.assert_output(expect)
driver.quit()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9736af86185a6c0efe6ad730def75962",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 27.515151515151516,
"alnum_prop": 0.6156387665198237,
"repo_name": "seibert/numba",
"id": "da74077bbdf5196e9ff5ced0754579210d9295fc",
"size": "980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/tests/gdb/test_break_on_symbol.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6877"
},
{
"name": "C",
"bytes": "639446"
},
{
"name": "C++",
"bytes": "93702"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "8688132"
},
{
"name": "Shell",
"bytes": "13404"
}
],
"symlink_target": ""
}
|
import test_classification
import numpy as np
from SimpleGP import SparseGPPG, SparseArray
from SimpleGP.gppg import PGCl
import os
cl = test_classification.cl
X = test_classification.X
def test_gppg():
x = map(lambda x: SparseArray.fromlist(X[x]), range(X.shape[0]))
fname = 'gppg.npy.gz'
gp = SparseGPPG.run_cl(x, cl, nprototypes=2,
fname_best=fname,
verbose=True, generations=2)
assert len(gp.prototypes) == 2
os.unlink(fname)
def test_tol():
x = map(lambda x: SparseArray.fromlist(X[x]), range(X.shape[0]))
gp = SparseGPPG.run_cl(x, cl, nprototypes=3, tol=0.05, popsize=100,
verbose=True, generations=2)
assert np.all(np.array(map(lambda x: len(x[-1]),
gp.prototypes)) == np.array([3, 1, 3]))
def test_sort_prototypes():
x = map(lambda x: SparseArray.fromlist(X[x]), range(X.shape[0]))
gp = SparseGPPG.run_cl(x, cl, nprototypes=3, tol=0.05, popsize=100,
verbose=True, generations=2)
_, perf = gp.prototypes_performance()
ps = gp._prototypes_argsort
assert np.all((ps[1:] - ps[:-1]) == 1)
def test_recall_d():
class GPD(SparseGPPG):
def distance(self, y, yh):
return -self.recall_distance(y, yh).mean()
x = map(lambda x: SparseArray.fromlist(X[x]), range(X.shape[0]))
gp = GPD.run_cl(x, cl, nprototypes=2,
verbose=True, generations=2)
r = gp.recall_distance(gp._f, gp.eval())
print r
assert np.all(r <= 1)
def test_func_select():
def func_select(x, y):
raise Exception("!!!")
x = map(lambda x: SparseArray.fromlist(X[x]), range(X.shape[0]))
try:
SparseGPPG.run_cl(x, cl, nprototypes=2,
func_select=func_select,
verbose=True, generations=2)
except Exception:
return
assert False
def test_pgcl():
x = map(lambda x: SparseArray.fromlist(X[x]), range(X.shape[0]))
fname = 'pgcl.npy.gz'
gp = PGCl.run_cl(x, cl, nprototypes=2,
fname_best=fname, popsize=100,
verbose=True, generations=2)
os.unlink(fname)
assert gp._model.sigma_.shape[1] > 3
def test_pgcl_prev_prot():
x = map(lambda x: SparseArray.fromlist(X[x]), range(X.shape[0]))
gp = PGCl.run_cl(x, cl, nprototypes=2,
verbose=True, generations=2)
print gp._pg_d.shape
assert gp._pg_d.shape[0] == 150
assert gp._pg_d.shape[1] == 3
def test_pgcl_predict():
x = map(lambda x: SparseArray.fromlist(X[x]), range(X.shape[0]))
fname = 'pgcl.npy.gz'
gp = PGCl.run_cl(x, cl, nprototypes=2,
fname_best=fname, popsize=100,
verbose=True, generations=2)
os.unlink(fname)
assert gp.fitness(gp.best) == -gp.distance(gp._f, gp.predict(x))
def test_get_params():
x = map(lambda x: SparseArray.fromlist(X[x]), range(X.shape[0]))
gp = SparseGPPG.run_cl(x, cl, nprototypes=3, tol=0.05, popsize=10,
verbose=True, generations=2)
p = gp.get_params()
assert p['popsize'] == 10
assert p['generations'] == 2
assert p['tree_cl']
|
{
"content_hash": "17ba3b85e9054a234f84f688ca2482d6",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 71,
"avg_line_length": 32.4,
"alnum_prop": 0.575925925925926,
"repo_name": "mgraffg/simplegp",
"id": "12482b1ff107157da56ff15cc84b2172386b95f3",
"size": "3240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SimpleGP/tests/test_gppg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "452908"
}
],
"symlink_target": ""
}
|
"""
List available ports.
This is a simple version of mido-ports.
"""
from __future__ import print_function
import mido
def print_ports(heading, port_names):
print(heading)
for name in port_names:
print(" '{}'".format(name))
print()
print()
print_ports('Input Ports:', mido.get_input_names())
print_ports('Output Ports:', mido.get_output_names())
|
{
"content_hash": "54f5f2f443e422f98373eb4d56a2ae2d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 53,
"avg_line_length": 19.736842105263158,
"alnum_prop": 0.6613333333333333,
"repo_name": "olemb/mido",
"id": "947a299305543481b41be4aeeff7876e05da48a9",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/ports/list_ports.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "163693"
}
],
"symlink_target": ""
}
|
from django.core.cache import cache
class SingletonMixin:
"""A base model to represents a singleton."""
def set_cache(self):
cache.set(self.__class__.__name__, self)
def save(self, *args, **kwargs): # pylint:disable=arguments-differ
self.pk = 1
super().save(*args, **kwargs)
self.set_cache()
def delete(self, *args, **kwargs): # pylint:disable=arguments-differ
pass
@classmethod
def load(cls):
raise NotImplementedError
|
{
"content_hash": "e9b810d3af169be12d8d0e931a8c1eac",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 73,
"avg_line_length": 25,
"alnum_prop": 0.616,
"repo_name": "polyaxon/polyaxon",
"id": "fd94179a3b095d056418e431f3417ead8c687115",
"size": "1105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "platform/coredb/coredb/mixins/singleton.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "Python",
"bytes": "5201898"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
}
|
import unittest
import tempfile
import sys
import os
import io
import datashape
from dynd import nd
import numpy as np
from blaze.datadescriptor import (
HDF5DataDescriptor, DyNDDataDescriptor, IDataDescriptor, dd_as_py)
from blaze.py2help import skipIf
from blaze.optional_packages import tables_is_here
if tables_is_here:
import tables as tb
class TestHDF5DataDescriptor(unittest.TestCase):
def setUp(self):
handle, self.hdf5_file = tempfile.mkstemp(".h5")
os.close(handle) # close the non needed file handle
self.a1 = np.array([[1, 2, 3], [4, 5, 6]], dtype="int32")
self.a2 = np.array([[1, 2, 3], [3, 2, 1]], dtype="int64")
self.t1 = np.array([(1, 2, 3), (3, 2, 1)], dtype="i4,i8,f8")
with tb.open_file(self.hdf5_file, "w") as f:
f.create_array(f.root, 'a1', self.a1)
f.create_table(f.root, 't1', self.t1)
f.create_group(f.root, 'g')
f.create_array(f.root.g, 'a2', self.a2)
def tearDown(self):
os.remove(self.hdf5_file)
@skipIf(not tables_is_here, 'pytables is not installed')
def test_basic_object_type(self):
self.assertTrue(issubclass(HDF5DataDescriptor, IDataDescriptor))
dd = HDF5DataDescriptor(self.hdf5_file, '/a1')
# Make sure the right type is returned
self.assertTrue(isinstance(dd, IDataDescriptor))
self.assertEqual(dd_as_py(dd), [[1, 2, 3], [4, 5, 6]])
@skipIf(not tables_is_here, 'pytables is not installed')
def test_descriptor_iter_types(self):
dd = HDF5DataDescriptor(self.hdf5_file, '/a1')
self.assertEqual(dd.dshape, datashape.dshape('2, 3, int32'))
# Iteration should produce DyNDDataDescriptor instances
vals = []
for el in dd:
self.assertTrue(isinstance(el, DyNDDataDescriptor))
self.assertTrue(isinstance(el, IDataDescriptor))
vals.append(dd_as_py(el))
self.assertEqual(vals, [[1, 2, 3], [4, 5, 6]])
@skipIf(not tables_is_here, 'pytables is not installed')
def test_descriptor_getitem_types(self):
dd = HDF5DataDescriptor(self.hdf5_file, '/g/a2')
self.assertEqual(dd.dshape, datashape.dshape('2, 3, int64'))
# Indexing should produce DyNDDataDescriptor instances
self.assertTrue(isinstance(dd[0], DyNDDataDescriptor))
self.assertEqual(dd_as_py(dd[0]), [1,2,3])
self.assertTrue(isinstance(dd[1,2], DyNDDataDescriptor))
self.assertEqual(dd_as_py(dd[1,2]), 1)
@skipIf(not tables_is_here, 'pytables is not installed')
def test_descriptor_setitem(self):
dd = HDF5DataDescriptor(self.hdf5_file, '/g/a2')
self.assertEqual(dd.dshape, datashape.dshape('2, 3, int64'))
dd[1,2] = 10
self.assertEqual(dd_as_py(dd[1,2]), 10)
dd[1] = [10, 11, 12]
self.assertEqual(dd_as_py(dd[1]), [10, 11, 12])
@skipIf(not tables_is_here, 'pytables is not installed')
def test_descriptor_append(self):
dd = HDF5DataDescriptor(self.hdf5_file, '/t1')
tshape = '2, { f0 : int32; f1 : int64; f2 : float64 }'
self.assertEqual(dd.dshape, datashape.dshape(tshape))
dd.append([(10, 11, 12)])
dvals = {'f0': 10, 'f1': 11, 'f2': 12.}
rvals = dd_as_py(dd[2])
is_equal = [(rvals[k] == dvals[k]) for k in dvals]
self.assertEqual(is_equal, [True]*3)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "935f0df95731d80e38084ed0dc0fabc2",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 72,
"avg_line_length": 37.096774193548384,
"alnum_prop": 0.6197101449275362,
"repo_name": "cezary12/blaze",
"id": "c94f43c5e4e6b1c263a4c781b060daba18f49fa9",
"size": "3450",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "blaze/datadescriptor/tests/test_hdf5_data_descriptor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from beritest_tools import BaseBERITestCase
class test_break(BaseBERITestCase):
def test_break_epc(self):
self.assertRegisterEqual(self.MIPS.a0, self.MIPS.a5, "Unexpected EPC")
def test_break_returned(self):
self.assertRegisterEqual(self.MIPS.a1, 1, "flow broken by breakpoint instruction")
def test_break_handled(self):
self.assertRegisterEqual(self.MIPS.a2, 1, "breakpoint exception handler not run")
def test_break_exl_in_handler(self):
self.assertRegisterEqual((self.MIPS.a3 >> 1) & 0x1, 1, "EXL not set in exception handler")
def test_break_cause_bd(self):
self.assertRegisterEqual((self.MIPS.a4 >> 31) & 0x1, 0, "Branch delay (BD) flag improperly set")
def test_break_cause_code(self):
self.assertRegisterEqual((self.MIPS.a4 >> 2) & 0x1f, 9, "Code not set to Sys")
def test_break_not_exl_after_handler(self):
self.assertRegisterEqual((self.MIPS.a6 >> 1) & 0x1, 0, "EXL still set after ERET")
|
{
"content_hash": "0b135e33fc3948720fb6637d1e94e6dd",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 104,
"avg_line_length": 43,
"alnum_prop": 0.6875631951466128,
"repo_name": "8l/beri",
"id": "93288415a407a53e9138e7591da812aead0a7d14",
"size": "2129",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cheritest/trunk/tests/cp0/test_break.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1629022"
},
{
"name": "Bluespec",
"bytes": "2336405"
},
{
"name": "C",
"bytes": "1058899"
},
{
"name": "C++",
"bytes": "1864"
},
{
"name": "Groff",
"bytes": "14381"
},
{
"name": "Haskell",
"bytes": "11711"
},
{
"name": "Lex",
"bytes": "2894"
},
{
"name": "Makefile",
"bytes": "242450"
},
{
"name": "Mathematica",
"bytes": "291"
},
{
"name": "Objective-C",
"bytes": "2387"
},
{
"name": "OpenEdge ABL",
"bytes": "568"
},
{
"name": "Perl",
"bytes": "19159"
},
{
"name": "Python",
"bytes": "1491002"
},
{
"name": "Shell",
"bytes": "91130"
},
{
"name": "SystemVerilog",
"bytes": "12058"
},
{
"name": "Tcl",
"bytes": "132818"
},
{
"name": "TeX",
"bytes": "4996"
},
{
"name": "Verilog",
"bytes": "125674"
},
{
"name": "Yacc",
"bytes": "5871"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import multiprocessing
from build_swift import argparse
from build_swift import defaults
from swift_build_support.swift_build_support import targets
__all__ = [
'HelpOption',
'SetOption',
'SetTrueOption',
'SetFalseOption',
'DisableOption',
'EnableOption',
'ChoicesOption',
'IntOption',
'StrOption',
'PathOption',
'AppendOption',
'UnsupportedOption',
'IgnoreOption',
'EXPECTED_OPTIONS',
'EXPECTED_DEFAULTS',
]
# -----------------------------------------------------------------------------
EXPECTED_DEFAULTS = {
'android': False,
'android_api_level': '21',
'android_deploy_device_path': '/data/local/tmp',
'android_icu_i18n': None,
'android_icu_i18n_include': None,
'android_icu_uc': None,
'android_icu_uc_include': None,
'android_icu_data': None,
'android_ndk': None,
'android_ndk_gcc_version': '4.9',
'android_arch': 'armv7',
'assertions': True,
'benchmark': False,
'benchmark_num_o_iterations': 3,
'benchmark_num_onone_iterations': 3,
'build_android': False,
'build_args': [],
'build_backdeployconcurrency': False,
'build_benchmarks': True,
'build_clang_tools_extra': True,
'build_cygwin': True,
'build_external_benchmarks': False,
'build_foundation': False,
'build_cmark': True,
'build_swift': True,
'build_llvm': True,
'build_freebsd': True,
'build_ios': True,
'build_ios_device': False,
'build_ios_simulator': False,
'build_jobs': multiprocessing.cpu_count(),
'build_libdispatch': False,
'build_libicu': False,
'build_linux': True,
'build_llbuild': False,
'build_lldb': False,
'build_libcxx': False,
'build_ninja': False,
'build_osx': True,
'build_playgroundsupport': False,
'build_runtime_with_host_compiler': False,
'build_stdlib_deployment_targets': ['all'],
'build_subdir': None,
'build_swift_dynamic_sdk_overlay': True,
'build_swift_dynamic_stdlib': True,
'build_swift_inspect': False,
'build_swift_static_sdk_overlay': False,
'build_swift_static_stdlib': False,
'build_swift_stdlib_unittest_extra': False,
'build_swiftpm': False,
'build_swift_driver': False,
'build_early_swift_driver': True,
'build_swiftsyntax': False,
'build_libparser_only': False,
'build_skstresstester': False,
'build_swiftformat': False,
'build_swiftevolve': False,
'build_indexstoredb': False,
'test_indexstoredb_sanitize_all': False,
'test_sourcekitlsp_sanitize_all': False,
'build_sourcekitlsp': False,
'install_swiftpm': False,
'install_swiftsyntax': False,
'install_swift_driver': False,
'swiftsyntax_verify_generated_files': False,
'install_playgroundsupport': False,
'install_sourcekitlsp': False,
'install_skstresstester': False,
'install_swiftevolve': False,
'build_toolchainbenchmarks': False,
'build_tvos': True,
'build_tvos_device': False,
'build_tvos_simulator': False,
'build_variant': 'Debug',
'build_watchos': True,
'build_watchos_device': False,
'build_watchos_simulator': False,
'build_xctest': False,
'cmake_c_launcher': None,
'cmake_cxx_launcher': None,
'clang_compiler_version': None,
'clang_profile_instr_use': None,
'clang_user_visible_version': defaults.CLANG_USER_VISIBLE_VERSION,
'clean': False,
'cmake': None,
'cmake_generator': 'Ninja',
'cmark_assertions': True,
'cmark_build_variant': 'Debug',
'compiler_vendor': defaults.COMPILER_VENDOR,
'coverage_db': None,
'cross_compile_hosts': [],
'darwin_deployment_version_ios':
defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
'darwin_deployment_version_osx':
defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
'darwin_deployment_version_tvos':
defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
'darwin_deployment_version_watchos':
defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
'darwin_symroot_path_filters': [],
'darwin_xcrun_toolchain': None,
'distcc': False,
'sccache': False,
'dry_run': False,
'dsymutil_jobs': defaults.DSYMUTIL_JOBS,
'enable_asan': False,
'enable_experimental_differentiable_programming': True,
'enable_experimental_concurrency': True,
'enable_experimental_distributed': True,
'enable_lsan': False,
'enable_sanitize_coverage': False,
'disable_guaranteed_normal_arguments': False,
'enable_stdlibcore_exclusivity_checking': False,
'enable_tsan': False,
'enable_tsan_runtime': False,
'enable_ubsan': False,
'export_compile_commands': False,
'extra_cmake_options': [],
'extra_swift_args': [],
'force_optimized_typechecker': False,
'foundation_build_variant': 'Debug',
'host_cc': None,
'host_cxx': None,
'host_libtool': None,
'host_lipo': None,
'host_target': targets.StdlibDeploymentTarget.host_target().name,
'host_test': False,
'only_executable_test': False,
'only_non_executable_test': False,
'infer_dependencies': False,
'install_backdeployconcurrency': False,
'install_prefix': targets.install_prefix(),
'install_symroot': None,
'install_destdir': None,
'install_all': False,
'ios': False,
'ios_all': False,
'legacy_impl': False,
'libdispatch_build_variant': 'Debug',
'libicu_build_variant': 'Debug',
'libswift_mode': None,
'lit_args': '-sv',
'llbuild_assertions': True,
'lldb_assertions': True,
'lldb_build_variant': 'Debug',
'lldb_build_with_xcode': '0',
'llvm_assertions': True,
'llvm_build_variant': 'Debug',
'llvm_ninja_targets': [],
'llvm_ninja_targets_for_cross_compile_hosts': [],
'llvm_max_parallel_lto_link_jobs':
defaults.LLVM_MAX_PARALLEL_LTO_LINK_JOBS,
'llvm_targets_to_build': 'X86;ARM;AArch64;PowerPC;SystemZ;Mips',
'tsan_libdispatch_test': False,
'long_test': False,
'lto_type': None,
'maccatalyst': False,
'maccatalyst_ios_tests': False,
'native_clang_tools_path': None,
'native_llvm_tools_path': None,
'native_swift_tools_path': None,
'dump_config': False,
'reconfigure': False,
'relocate_xdg_cache_home_under_build_subdir': False,
'show_sdks': False,
'skip_build': False,
'skip_local_build': False,
'stdlib_deployment_targets': None,
'stress_test': False,
'swift_analyze_code_coverage': defaults.SWIFT_ANALYZE_CODE_COVERAGE,
'swift_assertions': True,
'swift_build_variant': 'Debug',
'swift_compiler_version': None,
'swift_disable_dead_stripping': False,
'swift_darwin_module_archs': None,
'swift_darwin_supported_archs': None,
'swift_stdlib_assertions': True,
'swift_stdlib_build_variant': 'Debug',
'swift_tools_max_parallel_lto_link_jobs':
defaults.SWIFT_MAX_PARALLEL_LTO_LINK_JOBS,
'swift_user_visible_version': defaults.SWIFT_USER_VISIBLE_VERSION,
'symbols_package': None,
'clean_libdispatch': True,
'clean_foundation': True,
'clean_xctest': True,
'clean_llbuild': True,
'clean_swiftpm': True,
'clean_swift_driver': True,
'clean_early_swift_driver': False,
'test': None,
'test_early_swift_driver': None,
'test_android': False,
'test_android_host': False,
'test_cygwin': False,
'test_freebsd': False,
'test_ios': False,
'test_ios_32bit_simulator': False,
'test_watchos_32bit_simulator': True,
'test_ios_host': False,
'test_ios_simulator': False,
'test_linux': False,
'test_optimize_for_size': None,
'test_optimize_none_with_implicit_dynamic': None,
'test_optimized': None,
'test_osx': False,
'test_paths': [],
'test_swift_inspect': True,
'test_tvos': False,
'test_tvos_host': False,
'test_tvos_simulator': False,
'test_watchos': False,
'test_watchos_host': False,
'test_watchos_simulator': False,
'test_playgroundsupport': True,
'test_cmark': False,
'test_swiftpm': False,
'test_swift_driver': False,
'test_swiftsyntax': False,
'test_indexstoredb': False,
'test_sourcekitlsp': False,
'test_skstresstester': False,
'test_swiftformat': False,
'test_swiftevolve': False,
'test_toolchainbenchmarks': False,
'tvos': False,
'tvos_all': False,
'validation_test': None,
'verbose_build': False,
'watchos': False,
'watchos_all': False,
'llvm_install_components': defaults.llvm_install_components(),
'clean_install_destdir': False,
}
# -----------------------------------------------------------------------------
def _sanitize_option_string(option_string):
if option_string.startswith('--'):
return option_string[2:].replace('-', '_')
if len(option_string) == 2 and option_string[0] == '-':
return option_string[1]
raise ValueError('invalid option_string format: ' + option_string)
class _BaseOption(object):
def __init__(self, option_string, dest=None, default=None):
if dest is None:
dest = _sanitize_option_string(option_string)
if default is None:
default = EXPECTED_DEFAULTS.get(dest, None)
self.option_string = option_string
self.dest = dest
self.default = default
def sanitized_string(self):
return _sanitize_option_string(self.option_string)
class HelpOption(_BaseOption):
"""Option that prints the help message and exits."""
pass
class SetOption(_BaseOption):
"""Option that accepts no arguments, setting the destination to a
hard-coded value or None.
"""
def __init__(self, *args, **kwargs):
self.value = kwargs.pop('value', None)
super(SetOption, self).__init__(*args, **kwargs)
class SetTrueOption(_BaseOption):
"""Option that accepts no arguments, setting the destination value to True
if parsed and defaulting to False otherwise.
"""
pass
class SetFalseOption(_BaseOption):
"""Option that accepts no arguments, setting the destination value to False
if parsed and defaulting to True otherwise.
"""
pass
class EnableOption(_BaseOption):
"""Option that sets the destination to True when parsed and False by default.
Can be toggled True or False with an optional bool argument.
"""
pass
class DisableOption(_BaseOption):
"""Option that sets the destination to False when parsed and True by default.
Can be toggled True or False with an optional bool argument, which is then
negated. Thus if an option is passed the value 'True' it will set the
destination to False and vice versa.
"""
pass
class ChoicesOption(_BaseOption):
"""Option that accepts an argument from a predifined list of choices."""
def __init__(self, *args, **kwargs):
self.choices = kwargs.pop('choices', None)
super(ChoicesOption, self).__init__(*args, **kwargs)
class IntOption(_BaseOption):
"""Option that accepts an int argument."""
pass
class StrOption(_BaseOption):
"""Option that accepts a str argument."""
pass
class PathOption(_BaseOption):
"""Option that accepts a path argument."""
pass
class AppendOption(_BaseOption):
"""Option that can be called more than once to append argument to internal
list.
"""
pass
class UnsupportedOption(_BaseOption):
"""Option that is not supported."""
pass
class IgnoreOption(_BaseOption):
"""Option that should be ignored when generating tests. Instead a test
should be written manually as the behavior cannot or should not be auto-
generated.
"""
pass
class BuildScriptImplOption(_BaseOption):
"""Option that gets forwarded to build-script-impl by migration.py and is
only listed for disambiguation by argparse.
"""
pass
# -----------------------------------------------------------------------------
EXPECTED_OPTIONS = [
# Ignore the help options since they always call sys.exit(0)
HelpOption('-h', dest='help', default=argparse.SUPPRESS),
HelpOption('--help', dest='help', default=argparse.SUPPRESS),
SetOption('--debug', dest='build_variant', value='Debug'),
SetOption('--debug-cmark', dest='cmark_build_variant', value='Debug'),
SetOption('--debug-foundation',
dest='foundation_build_variant', value='Debug'),
SetOption('--debug-libdispatch',
dest='libdispatch_build_variant', value='Debug'),
SetOption('--debug-libicu', dest='libicu_build_variant', value='Debug'),
SetOption('--debug-lldb', dest='lldb_build_variant', value='Debug'),
SetOption('--lldb-build-with-xcode', dest='lldb_build_with_xcode',
value='1'),
SetOption('--lldb-build-with-cmake', dest='lldb_build_with_xcode',
value='0'),
SetOption('--debug-llvm', dest='llvm_build_variant', value='Debug'),
SetOption('--debug-swift', dest='swift_build_variant', value='Debug'),
SetOption('--debug-swift-stdlib',
dest='swift_stdlib_build_variant', value='Debug'),
SetOption('--eclipse',
dest='cmake_generator', value='Eclipse CDT4 - Ninja'),
SetOption('--make', dest='cmake_generator', value='Unix Makefiles'),
SetOption('--release', dest='build_variant', value='Release'),
SetOption('--release-debuginfo',
dest='build_variant', value='RelWithDebInfo'),
SetOption('--min-size-release',
dest='build_variant', value='MinSizeRel'),
SetOption('--xcode', dest='cmake_generator', value='Xcode'),
SetOption('-R', dest='build_variant', value='Release'),
SetOption('-d', dest='build_variant', value='Debug'),
SetOption('-e', dest='cmake_generator', value='Eclipse CDT4 - Ninja'),
SetOption('-m', dest='cmake_generator', value='Unix Makefiles'),
SetOption('-r', dest='build_variant', value='RelWithDebInfo'),
SetOption('-x', dest='cmake_generator', value='Xcode'),
# FIXME: Convert these options to set_true actions
SetOption('--assertions', value=True),
SetOption('--cmark-assertions', value=True),
SetOption('--lldb-assertions', value=True),
SetOption('--llvm-assertions', value=True),
SetOption('--llbuild-assertions', value=True),
SetOption('--swift-assertions', value=True),
SetOption('--swift-stdlib-assertions', value=True),
SetOption('-T', dest='validation_test', value=True),
SetOption('-o', dest='test_optimized', value=True),
SetOption('-s', dest='test_optimize_for_size', value=True),
SetOption('-y',
dest='test_optimize_none_with_implicit_dynamic', value=True),
SetOption('-t', dest='test', value=True),
SetOption('-a', dest='assertions', value=True),
# FIXME: Convert these options to set_false actions
SetOption('--no-assertions', dest='assertions', value=False),
SetOption('-A', dest='assertions', value=False),
SetOption('--no-lldb-assertions', dest='lldb_assertions', value=False),
SetOption('--no-llvm-assertions', dest='llvm_assertions', value=False),
SetOption('--no-llbuild-assertions',
dest='llbuild_assertions', value=False),
SetOption('--no-swift-assertions', dest='swift_assertions', value=False),
SetOption('--no-swift-stdlib-assertions',
dest='swift_stdlib_assertions', value=False),
SetOption('--skip-ios', dest='ios', value=False),
SetOption('--skip-tvos', dest='tvos', value=False),
SetOption('--skip-watchos', dest='watchos', value=False),
SetOption('--skip-test-early-swift-driver',
dest='test_early_swift_driver', value=False),
SetTrueOption('--back-deploy-concurrency', dest='build_backdeployconcurrency'),
SetTrueOption('--install-back-deploy-concurrency',
dest='install_backdeployconcurrency'),
SetTrueOption('--benchmark'),
SetTrueOption('--clean'),
SetTrueOption('--clean-install-destdir'),
SetTrueOption('--dry-run'),
SetTrueOption('--dump-config'),
SetTrueOption('--disable-guaranteed-normal-arguments'),
SetTrueOption('--enable-stdlibcore-exclusivity-checking'),
SetTrueOption('--force-optimized-typechecker'),
SetTrueOption('--ios'),
SetTrueOption('--llbuild', dest='build_llbuild'),
SetTrueOption('--lldb', dest='build_lldb'),
SetTrueOption('--libcxx', dest='build_libcxx'),
SetTrueOption('--maccatalyst', dest='maccatalyst'),
SetTrueOption('--maccatalyst-ios-tests', dest='maccatalyst_ios_tests'),
SetTrueOption('--playgroundsupport', dest='build_playgroundsupport'),
SetTrueOption('--install-playgroundsupport',
dest='install_playgroundsupport'),
SetTrueOption('--skip-build'),
SetTrueOption('--swiftpm', dest='build_swiftpm'),
SetTrueOption('--swift-driver', dest='build_swift_driver'),
SetTrueOption('--swiftsyntax', dest='build_swiftsyntax'),
SetTrueOption('--build-libparser-only', dest='build_libparser_only'),
SetTrueOption('--skstresstester', dest='build_skstresstester'),
SetTrueOption('--swiftformat', dest='build_swiftformat'),
SetTrueOption('--swiftevolve', dest='build_swiftevolve'),
SetTrueOption('-B', dest='benchmark'),
SetTrueOption('-S', dest='skip_build'),
SetTrueOption('-b', dest='build_llbuild'),
SetTrueOption('-c', dest='clean'),
SetTrueOption('-i', dest='ios'),
SetTrueOption('-l', dest='build_lldb'),
SetTrueOption('-n', dest='dry_run'),
SetTrueOption('-p', dest='build_swiftpm'),
SetTrueOption('--legacy-impl', dest='legacy_impl'),
SetTrueOption('--infer', dest='infer_dependencies'),
SetTrueOption('--reconfigure'),
EnableOption('--android'),
EnableOption('--build-external-benchmarks'),
EnableOption('--build-ninja'),
EnableOption('--build-runtime-with-host-compiler'),
EnableOption('--build-swift-dynamic-sdk-overlay'),
EnableOption('--build-swift-dynamic-stdlib'),
EnableOption('--build-swift-static-sdk-overlay'),
EnableOption('--build-swift-static-stdlib'),
EnableOption('--build-swift-stdlib-unittest-extra'),
EnableOption('--distcc'),
EnableOption('--sccache'),
EnableOption('--enable-asan'),
EnableOption('--enable-experimental-differentiable-programming'),
EnableOption('--enable-experimental-concurrency'),
EnableOption('--enable-experimental-distributed'),
EnableOption('--enable-lsan'),
EnableOption('--enable-sanitize-coverage'),
EnableOption('--enable-tsan'),
EnableOption('--enable-tsan-runtime'),
EnableOption('--enable-ubsan'),
EnableOption('--export-compile-commands'),
EnableOption('--foundation', dest='build_foundation'),
EnableOption('--host-test'),
EnableOption('--only-executable-test'),
EnableOption('--only-non-executable-test'),
EnableOption('--libdispatch', dest='build_libdispatch'),
EnableOption('--libicu', dest='build_libicu'),
EnableOption('--indexstore-db', dest='build_indexstoredb'),
EnableOption('--test-indexstore-db-sanitize-all',
dest='test_indexstoredb_sanitize_all'),
EnableOption('--sourcekit-lsp', dest='build_sourcekitlsp'),
EnableOption('--test-sourcekit-lsp-sanitize-all',
dest='test_sourcekitlsp_sanitize_all'),
EnableOption('--install-swiftsyntax', dest='install_swiftsyntax'),
EnableOption('--swiftsyntax-verify-generated-files',
dest='swiftsyntax_verify_generated_files'),
EnableOption('--install-swiftpm', dest='install_swiftpm'),
EnableOption('--install-swift-driver', dest='install_swift_driver'),
EnableOption('--install-sourcekit-lsp', dest='install_sourcekitlsp'),
EnableOption('--install-skstresstester', dest='install_skstresstester'),
EnableOption('--install-swiftevolve', dest='install_swiftevolve'),
EnableOption('--toolchain-benchmarks', dest='build_toolchainbenchmarks'),
EnableOption('--swift-inspect', dest='build_swift_inspect'),
EnableOption('--tsan-libdispatch-test'),
EnableOption('--long-test'),
EnableOption('--show-sdks'),
EnableOption('--skip-local-build'),
EnableOption('--stress-test'),
EnableOption('--test'),
EnableOption('--test-optimize-for-size'),
EnableOption('--test-optimize-none-with-implicit-dynamic'),
EnableOption('--test-optimized'),
EnableOption('--tvos'),
EnableOption('--validation-test'),
EnableOption('--verbose-build'),
EnableOption('--watchos'),
EnableOption('--xctest', dest='build_xctest'),
EnableOption('--swift-disable-dead-stripping'),
EnableOption('--clean-early-swift-driver', dest='clean_early_swift_driver'),
DisableOption('--skip-build-cmark', dest='build_cmark'),
DisableOption('--skip-build-llvm', dest='build_llvm'),
DisableOption('--skip-build-swift', dest='build_swift'),
DisableOption('--skip-build-android', dest='build_android'),
DisableOption('--skip-build-benchmarks', dest='build_benchmarks'),
DisableOption('--skip-build-cygwin', dest='build_cygwin'),
DisableOption('--skip-build-freebsd', dest='build_freebsd'),
DisableOption('--skip-build-ios', dest='build_ios'),
DisableOption('--skip-build-ios-device', dest='build_ios_device'),
DisableOption('--skip-build-ios-simulator',
dest='build_ios_simulator'),
DisableOption('--skip-build-linux', dest='build_linux'),
DisableOption('--skip-build-osx', dest='build_osx'),
DisableOption('--skip-build-tvos', dest='build_tvos'),
DisableOption('--skip-build-tvos-device', dest='build_tvos_device'),
DisableOption('--skip-build-tvos-simulator',
dest='build_tvos_simulator'),
DisableOption('--skip-build-watchos', dest='build_watchos'),
DisableOption('--skip-build-watchos-device',
dest='build_watchos_device'),
DisableOption('--skip-build-watchos-simulator',
dest='build_watchos_simulator'),
DisableOption('--skip-clean-libdispatch', dest='clean_libdispatch'),
DisableOption('--skip-clean-foundation', dest='clean_foundation'),
DisableOption('--skip-clean-xctest', dest='clean_xctest'),
DisableOption('--skip-clean-llbuild', dest='clean_llbuild'),
DisableOption('--skip-early-swift-driver', dest='build_early_swift_driver'),
DisableOption('--skip-clean-swiftpm', dest='clean_swiftpm'),
DisableOption('--skip-clean-swift-driver', dest='clean_swift_driver'),
DisableOption('--skip-test-android', dest='test_android'),
DisableOption('--skip-test-android-host', dest='test_android_host'),
DisableOption('--skip-test-cygwin', dest='test_cygwin'),
DisableOption('--skip-test-freebsd', dest='test_freebsd'),
DisableOption('--skip-test-ios', dest='test_ios'),
DisableOption('--skip-test-ios-32bit-simulator',
dest='test_ios_32bit_simulator'),
DisableOption('--skip-test-watchos-32bit-simulator',
dest='test_watchos_32bit_simulator'),
DisableOption('--skip-test-ios-host', dest='test_ios_host'),
DisableOption('--skip-test-ios-simulator', dest='test_ios_simulator'),
DisableOption('--skip-test-linux', dest='test_linux'),
DisableOption('--skip-test-osx', dest='test_osx'),
DisableOption('--skip-test-tvos', dest='test_tvos'),
DisableOption('--skip-test-tvos-host', dest='test_tvos_host'),
DisableOption('--skip-test-tvos-simulator',
dest='test_tvos_simulator'),
DisableOption('--skip-test-watchos', dest='test_watchos'),
DisableOption('--skip-test-watchos-host', dest='test_watchos_host'),
DisableOption('--skip-test-watchos-simulator',
dest='test_watchos_simulator'),
DisableOption('--skip-test-playgroundsupport',
dest='test_playgroundsupport'),
DisableOption('--skip-test-cmark', dest='test_cmark'),
DisableOption('--skip-test-swiftpm', dest='test_swiftpm'),
DisableOption('--skip-test-swift-driver', dest='test_swift_driver'),
DisableOption('--skip-test-swiftsyntax', dest='test_swiftsyntax'),
DisableOption('--skip-test-indexstore-db', dest='test_indexstoredb'),
DisableOption('--skip-test-sourcekit-lsp', dest='test_sourcekitlsp'),
DisableOption('--skip-test-skstresstester', dest='test_skstresstester'),
DisableOption('--skip-test-swiftformat', dest='test_swiftformat'),
DisableOption('--skip-test-swiftevolve', dest='test_swiftevolve'),
DisableOption('--skip-test-toolchain-benchmarks',
dest='test_toolchainbenchmarks'),
DisableOption('--skip-test-swift-inspect',
dest='test_swift_inspect'),
DisableOption('--skip-build-clang-tools-extra',
dest='build_clang_tools_extra'),
ChoicesOption('--android-ndk-gcc-version',
choices=['4.8', '4.9']),
ChoicesOption('--compiler-vendor',
choices=['none', 'apple']),
ChoicesOption('--swift-analyze-code-coverage',
choices=['false', 'not-merged', 'merged']),
ChoicesOption('--android-arch',
choices=['armv7', 'aarch64']),
StrOption('--android-api-level'),
StrOption('--build-args'),
StrOption('--build-stdlib-deployment-targets'),
StrOption('--darwin-deployment-version-ios'),
StrOption('--darwin-deployment-version-osx'),
StrOption('--darwin-deployment-version-tvos'),
StrOption('--darwin-deployment-version-watchos'),
StrOption('--darwin-xcrun-toolchain'),
StrOption('--host-target'),
StrOption('--lit-args'),
StrOption('--llvm-targets-to-build'),
StrOption('--stdlib-deployment-targets'),
StrOption('--swift-darwin-module-archs'),
StrOption('--swift-darwin-supported-archs'),
PathOption('--android-deploy-device-path'),
PathOption('--android-icu-i18n'),
PathOption('--android-icu-i18n-include'),
PathOption('--android-icu-uc'),
PathOption('--android-icu-uc-include'),
PathOption('--android-icu-data'),
PathOption('--android-ndk'),
PathOption('--build-subdir'),
SetTrueOption('--relocate-xdg-cache-home-under-build-subdir'),
PathOption('--clang-profile-instr-use'),
PathOption('--cmake'),
PathOption('--coverage-db'),
PathOption('--host-cc'),
PathOption('--host-cxx'),
PathOption('--host-libtool'),
PathOption('--host-lipo'),
PathOption('--install-prefix'),
PathOption('--install-symroot'),
PathOption('--install-destdir'),
EnableOption('--install-all'),
PathOption('--native-clang-tools-path'),
PathOption('--native-llvm-tools-path'),
PathOption('--native-swift-tools-path'),
PathOption('--symbols-package'),
PathOption('--cmake-c-launcher'),
PathOption('--cmake-cxx-launcher'),
IntOption('--benchmark-num-o-iterations'),
IntOption('--benchmark-num-onone-iterations'),
IntOption('--jobs', dest='build_jobs'),
IntOption('--llvm-max-parallel-lto-link-jobs'),
IntOption('--swift-tools-max-parallel-lto-link-jobs'),
IntOption('-j', dest='build_jobs'),
IntOption('--dsymutil-jobs', dest='dsymutil_jobs'),
AppendOption('--cross-compile-hosts'),
AppendOption('--extra-cmake-options'),
AppendOption('--extra-swift-args'),
AppendOption('--test-paths'),
AppendOption('--llvm-ninja-targets'),
AppendOption('--llvm-ninja-targets-for-cross-compile-hosts'),
AppendOption('--darwin-symroot-path-filters'),
UnsupportedOption('--build-jobs'),
UnsupportedOption('--common-cmake-options'),
UnsupportedOption('--only-execute'),
UnsupportedOption('--skip-test-optimize-for-size'),
UnsupportedOption('--skip-test-optimize-none-with-implicit-dynamic'),
UnsupportedOption('--skip-test-optimized'),
# Options forwared to build-script-impl
BuildScriptImplOption('--skip-test-swift', dest='impl_skip_test_swift'),
BuildScriptImplOption('--install-swift', dest='impl_install_swift'),
# NOTE: LTO flag is a special case that acts both as an option and has
# valid choices
SetOption('--lto', dest='lto_type'),
ChoicesOption('--lto', dest='lto_type', choices=['thin', 'full']),
SetOption('--libswift', dest='libswift_mode'),
ChoicesOption('--libswift', dest='libswift_mode',
choices=['off', 'hosttools', 'bootstrapping',
'bootstrapping-with-hostlibs']),
# NOTE: We'll need to manually test the behavior of these since they
# validate compiler version strings.
IgnoreOption('--clang-compiler-version'),
IgnoreOption('--clang-user-visible-version'),
IgnoreOption('--swift-compiler-version'),
IgnoreOption('--swift-user-visible-version'),
# TODO: Migrate to unavailable options once new parser is in place
IgnoreOption('-I'),
IgnoreOption('--ios-all'),
IgnoreOption('--tvos-all'),
IgnoreOption('--watchos-all'),
StrOption('--llvm-install-components'),
]
|
{
"content_hash": "383f2c16558cd7c9e3ed7b580224a5b3",
"timestamp": "",
"source": "github",
"line_count": 747,
"max_line_length": 83,
"avg_line_length": 38.58902275769746,
"alnum_prop": 0.6511482689238881,
"repo_name": "xwu/swift",
"id": "4004159d2966d03d2ac81154f0184a20b6d24871",
"size": "29172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/build_swift/tests/expected_options.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13232"
},
{
"name": "C",
"bytes": "241220"
},
{
"name": "C++",
"bytes": "35987227"
},
{
"name": "CMake",
"bytes": "576952"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2593"
},
{
"name": "Emacs Lisp",
"bytes": "57302"
},
{
"name": "LLVM",
"bytes": "70638"
},
{
"name": "MATLAB",
"bytes": "2576"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "435272"
},
{
"name": "Objective-C++",
"bytes": "249701"
},
{
"name": "Python",
"bytes": "1702325"
},
{
"name": "Roff",
"bytes": "3495"
},
{
"name": "Ruby",
"bytes": "2091"
},
{
"name": "Shell",
"bytes": "176554"
},
{
"name": "Swift",
"bytes": "32859849"
},
{
"name": "Vim script",
"bytes": "19645"
},
{
"name": "sed",
"bytes": "1050"
}
],
"symlink_target": ""
}
|
from .._iree_structured_transform_ops_gen import *
|
{
"content_hash": "7dbd8a2783f26a6e191fe3ed04ff71bf",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 50,
"avg_line_length": 51,
"alnum_prop": 0.7647058823529411,
"repo_name": "iree-org/iree",
"id": "563e20bc0b046f38d87809b9db185e78901884c7",
"size": "269",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "integrations/tensorflow/iree-dialects/python/iree/compiler/dialects/transform/iree_structured.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "23010"
},
{
"name": "Batchfile",
"bytes": "353"
},
{
"name": "C",
"bytes": "3830546"
},
{
"name": "C++",
"bytes": "8161374"
},
{
"name": "CMake",
"bytes": "899403"
},
{
"name": "Dockerfile",
"bytes": "28245"
},
{
"name": "GLSL",
"bytes": "2629"
},
{
"name": "HTML",
"bytes": "31018"
},
{
"name": "Java",
"bytes": "31697"
},
{
"name": "JavaScript",
"bytes": "18714"
},
{
"name": "MLIR",
"bytes": "5606822"
},
{
"name": "NASL",
"bytes": "3852"
},
{
"name": "PowerShell",
"bytes": "7893"
},
{
"name": "Python",
"bytes": "1143963"
},
{
"name": "Shell",
"bytes": "248374"
},
{
"name": "Starlark",
"bytes": "600260"
}
],
"symlink_target": ""
}
|
"""
$Id$
"""
from email import message_from_string
from utils import log, log_exc
from exceptions import MailInException
from config import config_instance
from django.shortcuts import render_to_response
from django.http import HttpResponse
class MailInTransport(object):
def __call__(self, request, *args, **kw):
mail = request.POST.get('mail') or request.GET.get('mail')
if not mail:
return HttpResponse('failed', status=500)
# convert mail
try:
msg = message_from_string(mail.encode('utf-8'))
except:
log_exc('Error parsing email')
return HttpResponse('failed on parsing', status=500)
# check message for loops, wrong mta hosts, etc
try:
config_instance.checkMessage(msg, mail, request)
except MailInException, msg:
log(str(msg))
return HttpResponse('failed on checking', status=500)
# process message
try:
config_instance.process(msg)
except MailInException, msg:
log_exc('Error processing email')
return HttpResponse('failed on processing', status=500)
return HttpResponse('success')
transport = MailInTransport()
|
{
"content_hash": "94df6b5a323eec935986535790fb1b61",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 67,
"avg_line_length": 27.77777777777778,
"alnum_prop": 0.6304,
"repo_name": "blw0rm/django-email-processing",
"id": "716e02065e7d6753ba2324b7701e0edec0925d74",
"size": "1250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "email_processing/transport.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "322"
},
{
"name": "Python",
"bytes": "14576"
}
],
"symlink_target": ""
}
|
import logging
import requests
import anyjson as json
import urllib2
from daikon import exceptions
# ---------------------
# Logging
# ---------------------
log = logging.getLogger('daikon')
# ---------------------
# Classes
# ---------------------
class Index(object):
def __init__(self, connection):
self._connection = connection
def create(self, index_name, shards, replicas):
try:
data = {"settings": {"number_of_shards": shards,
"number_of_replicas": replicas}}
self._connection.post(index_name, data)
except (requests.RequestException, urllib2.HTTPError), e:
msg = 'Error Creating Index - %s' % (e)
raise exceptions.ActionIndexError(msg)
return index_name
def delete(self, index_name):
try:
self._connection.delete(index_name)
except (requests.RequestException, urllib2.HTTPError), e:
msg = 'Error Deleting Index - %s' % (e)
raise exceptions.ActionIndexError(msg)
return index_name
def open(self, index_name):
try:
url = '%s/_open' % index_name
self._connection.post(url)
except (requests.RequestException, urllib2.HTTPError), e:
msg = 'Error Opening Index - %s' % (e)
raise exceptions.ActionIndexError(msg)
return index_name
def close(self, index_name):
try:
url = '%s/_close' % index_name
self._connection.post(url)
except (requests.RequestException, urllib2.HTTPError), e:
msg = 'Error Closing Index - %s' % (e)
raise exceptions.ActionIndexError(msg)
return index_name
def status(self, index_name, extended=False):
try:
url = '%s/_status' % index_name
res = json.loads(self._connection.get(url).content)
except (requests.RequestException, urllib2.HTTPError), e:
msg = 'Error Fetching Index Status - %s' % (e)
raise exceptions.ActionIndexError(msg)
output = {}
size = {}
doc = {}
merge = {}
if index_name not in res['indices']:
output['Status'] = 'Closed'
return {index_name: output}
else:
output['Status'] = 'Open'
output['Size'] = size
output['Documents'] = doc
output['Merge'] = merge
status = res['indices'][index_name]
size['Primary'] = status['index']['primary_size']
doc['Current'] = status['docs']['num_docs']
merge['Total'] = status['merges']['total']
if extended:
size['Total'] = status['index']['size']
doc['Max'] = status['docs']['max_doc']
doc['Deleted'] = status['docs']['deleted_docs']
merge['Current'] = status['merges']['current']
shards = {}
for shard, value in status['shards'].iteritems():
s_data = {}
value = value[0]
s_data['State'] = value['routing']['state']
s_data['Size'] = value['index']['size']
s_docs = {}
s_docs['Current'] = value['docs']['num_docs']
s_docs['Max'] = value['docs']['max_doc']
s_docs['Deleted'] = value[u'docs']['deleted_docs']
s_data['Documents'] = s_docs
shards['Shard %s' % shard] = s_data
output['Shards'] = shards
return {index_name: output}
def list(self, extended=False):
try:
health = self._connection.health
state = self._connection.state
except (requests.RequestException, urllib2.HTTPError), e:
msg = 'Error Listing Indexes - %s' % (e)
raise exceptions.ActionIndexError(msg)
output = {}
for index in state:
out = {}
if extended:
out['state'] = state[index][u'state']
if out['state'] == 'close':
out['status'] = 'closed'
else:
out['status'] = health[index][u'status']
settings = state[index]['settings']
out['shards'] = settings['index.number_of_shards']
out['replicas'] = settings['index.number_of_replicas']
output[index] = out
return output
|
{
"content_hash": "2ae0fc522868d7340b530b7e5c459657",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 70,
"avg_line_length": 31.91304347826087,
"alnum_prop": 0.5122615803814714,
"repo_name": "neogenix/daikon",
"id": "4d4a504b3b70e34248a5ffb3e6f5aeaae57db3d7",
"size": "5133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daikon/managers/index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40380"
}
],
"symlink_target": ""
}
|
"""Generating migration for a PRAW object
Revision ID: 960085fce39c
Revises: 4d46b88366fc
Create Date: 2016-06-13 17:30:49.056215
"""
# revision identifiers, used by Alembic.
revision = '960085fce39c'
down_revision = '4d46b88366fc'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.create_table('praw_keys',
sa.Column('id', sa.String(256), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('access_token', sa.String(256), nullable=True),
sa.Column('scope', sa.String(256), nullable=True),
sa.Column('refresh_token', sa.String(256), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('praw_keys')
### end Alembic commands ###
def upgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.create_table('praw_keys',
sa.Column('id', sa.String(256), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('access_token', sa.String(256), nullable=True),
sa.Column('scope', sa.String(256), nullable=True),
sa.Column('refresh_token', sa.String(256), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('praw_keys')
### end Alembic commands ###
def upgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.create_table('praw_keys',
sa.Column('id', sa.String(256), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('access_token', sa.String(256), nullable=True),
sa.Column('scope', sa.String(256), nullable=True),
sa.Column('refresh_token', sa.String(256), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('praw_keys')
### end Alembic commands ###
|
{
"content_hash": "58ee9a8095c99c61959bcd378f17e82c",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 63,
"avg_line_length": 28.32941176470588,
"alnum_prop": 0.6611295681063123,
"repo_name": "c4fcm/CivilServant",
"id": "b232a77fa11a92f0c22876f94369e7164b1f764c",
"size": "2408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alembic/versions/960085fce39c_generating_migration_for_a_praw_object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "923"
},
{
"name": "Python",
"bytes": "209127"
}
],
"symlink_target": ""
}
|
import os.path
from getpass import getpass
from nexus import Client
# First instantiate a client object either with a dictionary or with a yaml file
pwd = os.path.dirname(__file__)
client = Client(config_file=os.path.join(pwd, 'sample.yml'))
# Generate a url for the end user to use to authorize this client/authenticate.
url = client.generate_request_url()
print "Please authenticate using the following url"
print url
token = raw_input("Please copy the resulting code here: ")
# At this point the end user needs to authenticate with the supplied url. The
# easiest way to do this is: curl -k --user test:test1 "<supplied_url>". The
# result will contain the token in the "code" field. Paste that here.
# Validate the token:
user = client.authenticate_user(token)
if user is not None:
print "Yup, you are {0}".format(user)
else:
print "That is not a valid authorization code"
#Get an access key for yourself using rsa:
print client.request_client_credential(user, lambda: getpass("Private Key Password"))
print "Get a request token using rsa authentication"
print client.rsa_get_request_token(user, lambda: getpass("Private Key Password"))
|
{
"content_hash": "eabb0569328251bada38ee2f7b7a4c57",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 39.89655172413793,
"alnum_prop": 0.7536732929991357,
"repo_name": "kbase/auth_service",
"id": "abba283c230317f05db5f6dff8e1a184a1d9528d",
"size": "1157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-libs/python-nexus-client/sample/use_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "8892"
},
{
"name": "Perl",
"bytes": "3551"
},
{
"name": "Python",
"bytes": "187138"
},
{
"name": "Shell",
"bytes": "932"
}
],
"symlink_target": ""
}
|
import re
import sys
import traceback
import unittest
from robot.utils.asserts import assert_equal, assert_true, assert_raises
from robot.utils.error import get_error_details, get_error_message, ErrorDetails
def format_traceback():
return ''.join(traceback.format_exception(*sys.exc_info())).rstrip()
def format_message():
return ''.join(traceback.format_exception_only(*sys.exc_info()[:2])).rstrip()
class TestGetErrorDetails(unittest.TestCase):
def test_get_error_details(self):
for exception, args, exp_msg in [
(AssertionError, ['My Error'], 'My Error'),
(AssertionError, [None], 'None'),
(AssertionError, [], 'AssertionError'),
(Exception, ['Another Error'], 'Another Error'),
(ValueError, ['Something'], 'ValueError: Something'),
(AssertionError, ['Msg\nin 3\nlines'], 'Msg\nin 3\nlines'),
(ValueError, ['2\nlines'], 'ValueError: 2\nlines')]:
try:
raise exception(*args)
except:
error1 = ErrorDetails()
error2 = ErrorDetails(full_traceback=False)
message1, tb1 = get_error_details()
message2, tb2 = get_error_details(full_traceback=False)
message3 = get_error_message()
python_msg = format_message()
python_tb = format_traceback()
for msg in message1, message2, message3, error1.message, error2.message:
assert_equal(msg, exp_msg)
assert_true(tb1.startswith('Traceback (most recent call last):'))
assert_true(tb1.endswith(exp_msg))
assert_true(tb2.startswith('Traceback (most recent call last):'))
assert_true(exp_msg not in tb2)
assert_equal(tb1, error1.traceback)
assert_equal(tb2, error2.traceback)
assert_equal(tb1, python_tb)
assert_equal(tb1, f'{tb2}\n{python_msg}')
def test_chaining(self):
try:
1/0
except Exception:
try:
raise ValueError
except Exception as err:
try:
raise RuntimeError('last error') from err
except Exception as err:
assert_equal(ErrorDetails(err).traceback, format_traceback())
class TestRemoveRobotEntriesFromTraceback(unittest.TestCase):
def test_both_robot_and_non_robot_entries(self):
def raises():
raise Exception
self._verify_traceback(r'''
Traceback \(most recent call last\):
File ".*", line \d+, in raises
raise Exception
'''.strip(), assert_raises, AssertionError, raises)
def test_remove_entries_with_lambda_and_multiple_entries(self):
def raises():
1/0
raising_lambda = lambda: raises()
self._verify_traceback(r'''
Traceback \(most recent call last\):
File ".*", line \d+, in <lambda.*>
raising_lambda = lambda: raises\(\)
File ".*", line \d+, in raises
1/0
'''.strip(), assert_raises, AssertionError, raising_lambda)
def test_only_robot_entries(self):
self._verify_traceback(r'''
Traceback \(most recent call last\):
None
'''.strip(), assert_equal, 1, 2)
def _verify_traceback(self, expected, method, *args):
try:
method(*args)
except Exception as error:
# first tb entry originates from this file and must be excluded
error.__traceback__ = error.__traceback__.tb_next
tb = ErrorDetails(error).traceback
else:
raise AssertionError
if not re.match(expected, tb):
raise AssertionError('\nExpected:\n%s\n\nActual:\n%s' % (expected, tb))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "36d6c7466a7a86378d8b6ea700e7ccbd",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 84,
"avg_line_length": 36.198113207547166,
"alnum_prop": 0.5830075579880115,
"repo_name": "HelioGuilherme66/robotframework",
"id": "38cad78c0588a724d061568fab302ffedcceb5e3",
"size": "3837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utest/utils/test_error.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44706"
},
{
"name": "HTML",
"bytes": "86409"
},
{
"name": "JavaScript",
"bytes": "162950"
},
{
"name": "Python",
"bytes": "2671114"
},
{
"name": "RobotFramework",
"bytes": "1231105"
}
],
"symlink_target": ""
}
|
import sys
import os
import time
import datetime
import TestHelper
import re
from tempfile import gettempdir
from OSEHRAHelper import PROMPT
introText = """**************************************************
* Welcome to VistA
**************************************************
*
* Use the following credentials for Robert Alexander
* Access: fakedoc1
* Verify: 1Doc!@#$
* Electronic Signature: ROBA123
*
* Use the following credentials for Mary Smith (Nurse)
* Access: fakenurse1
* Verify: 1Nur!@#$
* Electronic Signature: MARYS123
*
* Use the following credentials for Joe Clerk (Clerk)
* Access: fakeclerk1
* Verify: 1Cle!@#$
* Electronic Signature: CLERKJ123
*
*
* This instance was built from a VistA-M repository from
* %s built on %s
*
* If you have any issue, please email your question to admin@osehra.org
**************************************************
"""
def startFileman(VistA):
# Start FileMan as the programmer user and set XUMF to 1 which lets the user
# change information in Kernel files
# Starts at the VistA Prompt
VistA.wait(PROMPT)
VistA.write('S DUZ=1 S XUMF=1 D Q^DI')
VistA.wait('Select OPTION:')
def signonZU(VistA,acc_code,ver_code):
# Sign a user into the ZU menu system
# The User must have a valid access code and verify code.
# If the user needs to change the Verify Code, the script will append a "!" to the old code
# and use that as the new one.
# Starts at the VistA prompt.
VistA.wait(PROMPT,60)
VistA.write('D ^ZU')
VistA.wait('ACCESS CODE:')
VistA.write(acc_code)
VistA.wait('VERIFY CODE:')
VistA.write(ver_code)
index = VistA.multiwait(['TYPE NAME','verify code:'])
if index==1:
VistA.write(ver_code)
VistA.wait('VERIFY CODE:')
VistA.write(ver_code+"!")
VistA.wait('right:')
VistA.write(ver_code+"!")
VistA.wait('TYPE NAME:')
VistA.write('')
def initializeFileman(VistA,site_name,site_number):
# Initializes FileMan via the DINIT routine.
# The command needs a site name to change to and a local site number
# Script uses value of CMake variable TEST_VISTA_SETUP_SITE_NAME as the name
# and 6161 as the site number.
VistA.write('D ^DINIT')
VistA.wait('Initialize VA FileMan now?')
VistA.write('Yes')
VistA.wait('SITE NAME:')
VistA.write(site_name)
VistA.wait('SITE NUMBER')
VistA.write(site_number)
# It will also change the operating system file to match the local environment type
# found by the set up.
VistA.wait('Do you want to change the MUMPS OPERATING SYSTEM File?')
VistA.write('Yes')
VistA.wait('TYPE OF MUMPS SYSTEM YOU ARE USING')
if VistA.type=='cache':
VistA.write('CACHE')
else:
VistA.write('GT.M(UNIX)')
VistA.wait(PROMPT,60)
# Use the ZUSET routine to rename the correct ZU* for the system.
VistA.write('D ^ZUSET')
VistA.wait('Rename')
VistA.write('Yes')
def setupPrimaryHFSDir(VistA,hfs_dir):
# Set up the primary HFS directory from the
# Kernel System Parameters file
#
# "@" to remove or set a new file path.
if (hfs_dir=="@" or hfs_dir=='') : hfs_dir = gettempdir()
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('KERNEL SYSTEM PARAMETERS')
VistA.wait('EDIT WHICH FIELD')
VistA.write('PRIMARY HFS DIRECTORY')
VistA.wait('THEN EDIT FIELD')
VistA.write('')
VistA.wait('DOMAIN NAME')
# `1 is the notation to grab the entry with a number of 1
VistA.write('`1')
VistA.wait('PRIMARY HFS DIRECTORY')
VistA.write(os.path.normpath(hfs_dir))
# Multiwait to capture the possible outcomes:
# SURE YOU WANT TO DELETE: File has an entry and the @ will delete it
# DOMAIN NAME: Entry was an acceptable response
# PRIMARY HFS DIRECTORY: Response was not accepted, could be due to
# deleting an empty file entry
index = VistA.multiwait(['SURE YOU WANT TO DELETE','DOMAIN NAME','PRIMARY HFS DIRECTORY'])
if index == 0:
VistA.write('Y')
VistA.wait('DOMAIN NAME')
if index == 2:
VistA.write("")
VistA.wait("DOMAIN NAME")
VistA.write('')
VistA.wait('Select OPTION:')
VistA.write('')
def removeResourceUsageLogging(VistA):
# By default, prevent logging of Resource Usage
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('VOLUME SET')
VistA.wait('EDIT WHICH FIELD')
VistA.write("LINK ACCESS")
VistA.wait('THEN EDIT FIELD')
VistA.write('')
VistA.wait('VOLUME SET')
# `1 is the notation to grab the entry with a number of 1
VistA.write('`1')
VistA.wait('LINK ACCESS')
VistA.write("NO")
VistA.wait('VOLUME SET')
VistA.write('')
VistA.wait('Select OPTION:')
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('KERNEL SYSTEM PARAMETERS')
VistA.wait('EDIT WHICH FIELD')
VistA.write('LOG RESOURCE USAGE')
VistA.wait('THEN EDIT FIELD')
VistA.write('')
VistA.wait('DOMAIN NAME')
# `1 is the notation to grab the entry with a number of 1
VistA.write('`1')
VistA.wait('LOG RESOURCE USAGE')
VistA.write('NO')
VistA.wait('DOMAIN NAME')
VistA.write('')
VistA.wait('Select OPTION:')
VistA.write('')
def addMPILocalNumber(VistA):
VistA.wait(PROMPT)
VistA.write("W $$SITE^VASITE($$DT^XLFDT)")
VistA.wait(PROMPT)
localNum = re.search("\^([0-9]+)\W", VistA.lastconnection).groups()[0]
VistA.write("")
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('MASTER PATIENT INDEX (LOCAL')
VistA.wait('EDIT WHICH FIELD')
VistA.write('')
VistA.wait('MASTER PATIENT INDEX (LOCAL')
VistA.write('`1')
VistA.wait('SITE ID NUMBER')
VistA.write(localNum)
VistA.wait('LAST NUMBER')
VistA.write('^')
VistA.wait('MASTER PATIENT INDEX (LOCAL')
VistA.write('')
VistA.wait('Select OPTION:')
VistA.write('')
def setupIntroText(VistA, introTextSHA):
# Set up the introduction text for the VistA system
#
# Normally, is displayed on CPRS and other GUIs
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('KERNEL SYSTEM PARAMETERS')
VistA.wait('EDIT WHICH FIELD')
VistA.write('INTRO MESSAGE')
VistA.wait('THEN EDIT FIELD')
VistA.write('')
VistA.wait('DOMAIN NAME')
# `1 is the notation to grab the entry with a number of 1
VistA.write('`1')
index = VistA.multiwait(['EDIT Option','1>$'])
if index == 0:
VistA.write("D")
VistA.wait('Delete from line')
VistA.write("1")
VistA.wait('thru')
VistA.write("")
VistA.wait('OK TO REMOVE')
VistA.write("Y")
VistA.wait('ARE YOU SURE')
VistA.write("Y")
VistA.wait('EDIT Option')
VistA.write("A")
VistA.wait('Add lines')
VistA.write((introText % (introTextSHA, datetime.date.today() )) +'\r')
VistA.wait("EDIT Option")
VistA.write("")
VistA.wait("DOMAIN NAME")
VistA.write('')
VistA.wait('Select OPTION:')
VistA.write('')
def configureNULLDevice(VistA):
# Ensure that the null device is correctly configured by adding
# a $I for the correct platform rather than VMS and removing
# sign-on capabilities
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('DEVICE')
VistA.wait('EDIT WHICH FIELD')
VistA.write('$I\rSIGN-ON/SYSTEM DEVICE\r')
VistA.wait('NAME:')
VistA.write('NULL\r1')
VistA.wait('//')
# Path added is dependent on the platform that is being used.
if sys.platform=='win32':
VistA.write('//./nul\rNO\r')
else:
VistA.write('/dev/null\rNO\r')
VistA.wait("Select OPTION")
VistA.write("")
def configureConsoleDevice(VistA):
# Ensure that the console device is correctly configured by adding
# sign-on capabilities
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('DEVICE')
VistA.wait('EDIT WHICH FIELD')
VistA.write('$I\rSIGN-ON/SYSTEM DEVICE\r')
VistA.wait('NAME:')
VistA.write('/dev/tty')
VistA.wait('//')
if sys.platform == 'cygwin':
VistA.write('/dev/pty')
else:
VistA.write('')
VistA.wait('SYSTEM DEVICE')
VistA.write('Y')
index = VistA.multiwait(['SYSTEM DEVICE', 'DEVICE NAME'])
if index == 0:
VistA.write('^')
VistA.wait("Select DEVICE")
VistA.write('')
VistA.wait("Select OPTION")
VistA.write("")
def configureHFSDevice(VistA):
# Ensure that the console device is correctly configured by adding
# sign-on capabilities
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('DEVICE')
VistA.wait('EDIT WHICH FIELD')
VistA.write('OPEN PARAMETERS')
VistA.wait_re("then edit field")
VistA.write('ASK PARAMETERS')
VistA.wait_re("then edit field")
VistA.write('SUBTYPE')
VistA.wait_re("then edit field")
VistA.write('')
VistA.wait('NAME:')
VistA.write('HFS')
VistA.wait('OPEN PARAMETERS')
if VistA.type=='cache':
VistA.write('"NWS"')
else:
VistA.write('"nowrap:stream:newversion"')
VistA.wait("ASK PARAMETERS")
VistA.write("1")
VistA.wait("SUBTYPE")
VistA.write("P-OTHER")
index= VistA.multiwait(['CHOOSE', 'DEVICE NAME'])
if index == 0:
VistA.write('1')
VistA.wait("Select DEVICE")
VistA.write('')
VistA.wait("Select OPTION")
VistA.write("")
def setupVistADomain(VistA,site_name):
# Enter the site name into the DOMAIN file via FileMan
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('DOMAIN\r')
VistA.wait('Select DOMAIN NAME')
VistA.write(site_name)
# Multiwait for possible outcomes:
# Are you adding: Domain is new and will add it to the system
# NAME: Domain exists already
index = VistA.multiwait(["Are you adding","NAME"])
if index == 0:
VistA.write("Y")
else:
VistA.write("")
VistA.wait("FLAGS")
VistA.write('^\r\r')
VistA.wait(PROMPT,60)
# christen the domain via the XMUDCHR routine.
VistA.write('D CHRISTEN^XMUDCHR')
VistA.wait('Are you sure you want to change the name of this facility?')
VistA.write('Yes')
VistA.wait('Select DOMAIN NAME')
VistA.write(site_name)
VistA.wait('PARENT')
VistA.write('')
VistA.wait('TIME ZONE')
# Attempts to pull the timezone from the local machine via Python
# If entry is not accepted, will default to EST
VistA.write(time.strftime('%Z').replace(' Time',''))
index = VistA.multiwait([VistA.prompt,'TIME ZONE'])
if index==1:
VistA.write('EST')
VistA.wait(PROMPT,60)
# Next, Find IEN of new site name and add entries of new domain to
# Kernel System Parameters and RPC Broker Site Parameters files
VistA.IEN('DOMAIN',site_name)
VistA.wait(PROMPT,60)
VistA.write('S $P(^XWB(8994.1,1,0),"^")=' + VistA.IENumber)
VistA.write('S $P(^XTV(8989.3,1,0),"^")=' + VistA.IENumber)
reindexFile(VistA, "8989.3")
reindexFile(VistA, "8994.1")
def reindexFile(VistA, fileNo):
# Then, re-index both files with the FileMan Utility.
startFileman(VistA)
VistA.write('UTILITY')
VistA.wait('UTILITY OPTION')
VistA.write('RE')
VistA.wait_re('MODIFY WHAT FILE')
VistA.write(fileNo)
VistA.wait("PARTICULAR INDEX")
VistA.write('NO')
VistA.wait("EXISTING")
VistA.write('Y')
VistA.wait("RE-CROSS-REFERENCE")
VistA.write('Y')
index = VistA.multiwait(['UTILITY OPTION', "Start Time"])
if index == 1:
VistA.write('')
VistA.wait('UTILITY OPTION')
VistA.write("")
VistA.wait('Select OPTION')
VistA.write("")
def setupBoxVolPair(VistA,volume_set,site_name,tcp_port):
# Query the instance for the Box-volume pair of the machine
VistA.getenv(volume_set)
# Rename the first Box-volume entry in the Taskman Site Parameters file
# to match what was queried above
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('14.7')
VistA.wait('ALL//')
VistA.write('BOX-VOLUME')
VistA.wait_re("Then edit field")
VistA.write('Manager Startup Delay')
VistA.wait_re("Then edit field")
VistA.write('')
VistA.wait('Select TASKMAN SITE PARAMETERS BOX-VOLUME PAIR:')
VistA.write('`1')
VistA.wait('PAIR')
VistA.write(VistA.boxvol)
VistA.wait('Manager Startup')
VistA.write('1')
VistA.wait('Select TASKMAN SITE PARAMETERS BOX-VOLUME PAIR:')
VistA.write('')
#time.sleep(5)
# Add the Box-volume pair to the RPC Broker parameters for the local domain
# Also adds the information for the new style RPC Broker Listener on the supplied TCP port
# if a Cache system, will start a task to start the Listener, and put the
# listener under the Listener Starter's control
# if a GT.M system, will create the information but not start it.
VistA.wait('Select OPTION')
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('8994.1')
VistA.wait('EDIT WHICH FIELD')
VistA.write('LISTENER')
VistA.wait("SUB-FIELD")
VistA.write("")
VistA.wait("THEN EDIT FIELD")
VistA.write("")
VistA.wait('Select RPC BROKER SITE PARAMETERS DOMAIN NAME')
VistA.write(site_name)
VistA.wait("OK")
VistA.write("Y")
VistA.wait("BOX-VOLUME PAIR")
VistA.write(VistA.boxvol)
VistA.wait("OK")
VistA.write("Y")
index = VistA.multiwait(["BOX-VOLUME","Select PORT"])
if index == 0:
VistA.write("")
VistA.wait("Select PORT")
VistA.write(tcp_port + '\rY')
VistA.write('1\r1\r1\r')
VistA.wait("Select OPTION")
VistA.write("")
def setupVolumeSet(VistA,site_name,volume_set,namespace=""):
# Rename first entry in the Volume Set file to match
# the CMake value of TEST_VISTA_SETUP_VOLUME_SET.
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('14.5\r')
VistA.wait('Select VOLUME SET')
VistA.write('`1')
VistA.wait('VOLUME SET:')
VistA.write(volume_set+ '\r\r\r\r\r')
VistA.wait('TASKMAN FILES UCI')
if VistA.type=='cache':
VistA.write(namespace+'\r\r\r\r\r\r')
else:
VistA.write(volume_set +'\r\r\r\r\r\r')
# Add the Volume set information to the Kernel System Parameters File
VistA.wait('Select OPTION')
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('KERNEL SYSTEM PARAMETERS\rVOLUME SET\r\r')
VistA.wait('Select KERNEL SYSTEM PARAMETERS DOMAIN NAME:')
VistA.write(site_name + '\r')
VistA.wait('VOLUME SET')
VistA.write(volume_set)
index = VistA.multiwait(['Are you adding','VOLUME SET'])
if index==0:
VistA.write('Y')
elif index==1:
VistA.write('')
# Set up basic information about sign-on to the domain via the Volume Set
VistA.wait('MAX SIGNON ALLOWED')
VistA.write('500')
VistA.wait('LOG SYSTEM RT')
VistA.write('N')
VistA.wait('VOLUME SET')
VistA.write('\r\r')
def scheduleOption(VistA,optionName, scheduleValue, scheduleTime="0030"):
# If using Cache as the M environment, Schedule a task to start the
# XWB Listener Starter on the start up of TaskMan
VistA.wait(PROMPT)
VistA.write('S DUZ=1 D ^XUP')
VistA.wait('Select OPTION NAME')
VistA.write('EVE\r1')
VistA.wait('Systems Manager Menu')
VistA.write('Taskman Management')
VistA.wait('Select Taskman Management')
VistA.write('SCHED')
VistA.wait('reschedule:')
VistA.write(optionName + '\rY')
VistA.wait('COMMAND:')
if scheduleValue == 'STARTUP':
VistA.write('\r^SPECIAL QUEUEING\rSTARTUP')
else:
VistA.write('^RESCHEDULING FREQUENCY\r%s' % scheduleValue)
VistA.write('^QUEUED TO RUN AT WHAT TIME\rT+1@%s' % scheduleTime)
VistA.write('^')
VistA.write('S\rE')
VistA.wait('reschedule:')
VistA.write('')
VistA.wait('Select Taskman Management')
VistA.write('')
VistA.wait('Systems Manager Menu')
VistA.write('')
VistA.wait('Do you really want to halt')
VistA.write('Y')
def startTaskMan(VistA):
# Starts the TaskMan instance via cold boot entry point ^ZTMB
VistA.wait(PROMPT)
VistA.write('DO ^ZTMB')
def addSystemManager(VistA):
# Add the super user System Manager via the User Management Menu
# Set basic information about the user: Name,SSN, Sex ....
VistA.wait(PROMPT,60)
VistA.write('S DUZ=1 D ^XUP')
VistA.wait('Select OPTION NAME')
VistA.write('EVE\r1')
VistA.wait('Systems Manager Menu')
VistA.write('USER MANAGEMENT')
VistA.wait('User Management')
VistA.write('ADD')
VistA.wait('Enter NEW PERSON')
VistA.write('MANAGER,SYSTEM')
index = VistA.multiwait(['Are you adding','Want to reactivate'])
if index == 0:
VistA.write('Y')
VistA.wait('INITIAL:')
VistA.write('SM')
VistA.wait('SSN:')
VistA.write('000000001')
VistA.wait('SEX:')
VistA.write('M')
VistA.wait('NPI')
VistA.write('')
VistA.wait('NAME COMPONENTS')
# A ScreenMan form opens at this point, and the following information is set:
# Primary Menu: EVE
# Secondary Menu: OR PARAM COORDINATOR MENU, TIU IRM MAINTENANCE MENU,
# XPAR MENU TOOLS,DG REGISTER PATIENT
# Access Code: SM1234
# Verify Code: SM1234!!
VistA.write('\r\r\r\r\r^PRIMARY MENU OPTION\rEVE\r1\r^Want to edit ACCESS CODE\rY\rSM1234\rSM1234\r^Want to edit VERIFY CODE\rY\rSM1234!!\rSM1234!!\r^SECONDARY MENU OPTIONS\rOR PARAM COORDINATOR MENU\rY\r\r\r\rTIU IRM MAINTENANCE MENU\rY\r\r\r\rXPAR MENU TOOLS\rY\r\r\r\rDG REGISTER PATIENT\rY\r\r\r\r^MULTIPLE SIGN-ON\r1\r1\r99\r^SERVICE/SECTION\rIRM\rS\rE')
# Exiting the ScreenMan form, Allocate Security Keys
# For Kernel Access: XUMGR, XUPROG, XUPROGMODE
# and Scheduling Access: SD SUPERVISOR, SDWL PARAMETER, SDWL MENU
VistA.wait('User Account Access Letter')
VistA.write('NO')
VistA.wait('wish to allocate security keys?')
VistA.write('Y')
VistA.wait('Allocate key')
VistA.write('XUMGR')
VistA.wait('Another key')
VistA.write('XUPROG\r1')
VistA.wait('Another key')
VistA.write('XUPROGMODE')
VistA.wait('Another key')
VistA.write('SD SUPERVISOR')
VistA.wait('Another key')
VistA.write('SDWL PARAMETER')
VistA.wait('Another key')
VistA.write('SDWL MENU')
VistA.wait('Another key')
VistA.write('')
VistA.wait('Another holder')
VistA.write('')
VistA.wait('YES//')
VistA.write('')
VistA.wait('mail groups?')
VistA.write('\r')
VistA.wait('Systems Manager Menu')
VistA.write('\rY')
VistA.wait(PROMPT,60)
# Get the record number of the user that was just created
VistA.IEN('NEW PERSON','MANAGER,SYSTEM')
VistA.wait(PROMPT,60)
# Set a piece of the New Person global corresponding to the MANAGER,SYSTEM
# to "@" to tell FileMan that user is a programmer
VistA.write('S DUZ=' + VistA.IENumber + ' S $P(^VA(200,DUZ,0),"^",4)="@"')
def addInstitution(VistA,inst_name,station_number):
# In FileMan, add a entry to the Institution file
# Pass in the name and number as arguments to allow for
# multiple additions.
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE:')
VistA.write('4')
VistA.wait('EDIT WHICH FIELD')
VistA.write('STATION NUMBER')
VistA.wait('THEN EDIT FIELD')
VistA.write('')
VistA.wait('Select INSTITUTION NAME:')
VistA.write(inst_name)
index = VistA.multiwait(['Are you adding','STATION NUMBER'])
if index==0:
VistA.write('Y')
VistA.wait('STATION NUMBER:')
VistA.write(station_number)
VistA.wait('Select INSTITUTION NAME:')
VistA.write('')
VistA.wait('Select OPTION:')
VistA.write('')
def addDivision(VistA,div_name, facility_number,station_number):
# Adds a division to the VistA instance via FileMan,
# Each Division needs a name and a facility number. The station number
# points back to the recently created Institution
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE:')
VistA.write('40.8')
VistA.wait('EDIT WHICH FIELD')
VistA.write('FACILITY NUMBER')
VistA.wait('THEN EDIT FIELD')
VistA.write('INSTITUTION FILE POINTER')
VistA.wait('THEN EDIT FIELD')
VistA.write('')
VistA.wait('DIVISION NAME')
VistA.write(div_name)
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('MEDICAL CENTER DIVISION NUM:')
VistA.write('')
VistA.wait('FACILITY NUMBER')
VistA.write(facility_number)
VistA.write('')
VistA.wait('INSTITUTION FILE POINTER')
VistA.write(station_number)
VistA.wait('DIVISION NAME')
VistA.write('')
VistA.wait('Select OPTION')
VistA.write('')
def setupWard(VistA, division, institution, ward_name, clinic_name, order, specialty='Cardiac Surgery', bed_array = [["1-A","testBed1"]] ):
# Set up an inpatient ward for lodging of users and inpatient medication prescription
# taken from the ADTActions script of Registration Roll-And-Scroll testing
VistA.wait(PROMPT)
VistA.write('S DUZ=1 D ^XUP')
VistA.wait('OPTION NAME:')
# DEFINE THE WARD
VistA.write('WARD DEFINITION ENTRY')
VistA.wait('NAME:')
VistA.write(ward_name)
VistA.wait('No//')
VistA.write('YES')
VistA.wait('POINTER:')
VistA.write(clinic_name)
VistA.wait('ORDER:')
VistA.write(order)
VistA.wait(ward_name)
VistA.write('')
VistA.wait('WRISTBAND:')
VistA.write('YES')
VistA.wait('DIVISION:')
VistA.write(division)
VistA.wait('INSTITUTION:')
VistA.write(institution)
VistA.wait('6100')
VistA.write('')
VistA.wait('BEDSECTION:')
VistA.write('bedselect')
VistA.wait('SPECIALTY:')
VistA.write(specialty)
VistA.wait('SERVICE:')
VistA.write('S')
VistA.wait('LOCATION:')
VistA.write('north')
VistA.wait('WARD:')
VistA.write('1')
VistA.wait('DATE:')
VistA.write('T')
VistA.wait('No//')
VistA.write('YES')
VistA.wait('BEDS:')
VistA.write('20')
VistA.wait('ILL:')
VistA.write('1')
VistA.wait('SYNONYM:')
VistA.write('')
VistA.wait('G&L ORDER:')
VistA.write('')
VistA.wait('TOTALS:')
VistA.write('')
VistA.wait('NAME:')
VistA.write('')
addBedsToWard(VistA, ward_name, bed_array)
def addBedsToWard(VistA, ward_name, bed_array):
VistA.wait(PROMPT)
VistA.write('S DUZ=1 D ^XUP')
# SETUP BEDS
VistA.wait('OPTION NAME:')
VistA.write('ADT SYSTEM')
VistA.wait('Option:')
VistA.write('ADD')
for sitem in bed_array:
VistA.wait('NAME:')
VistA.write(sitem[0])
VistA.wait('No//')
VistA.write('yes')
VistA.wait('NAME:')
VistA.write('')
VistA.wait('DESCRIPTION:')
VistA.write(sitem[1])
VistA.wait('No//')
VistA.write('yes')
VistA.wait('ASSIGN:')
VistA.write(ward_name)
VistA.wait('No//')
VistA.write('yes')
VistA.wait('ASSIGN:')
VistA.write('')
VistA.wait('NAME:')
VistA.write('')
VistA.wait('Option:')
VistA.write('')
VistA.wait('YES//')
VistA.write('')
def modifyDVBParams(VistA):
VistA.wait(PROMPT)
VistA.write('D ^XUP')
# ADD ENTRY TO FILE 395 DVB PARAMETERS
VistA.wait('NAME:')
VistA.write('ZZFILEMAN')
VistA.wait('OPTION:')
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('395')
VistA.wait('EDIT WHICH FIELD')
VistA.write('ALL')
VistA.wait('Select DVB PARAMETERS ONE:')
VistA.write('1')
VistA.wait('No//')
VistA.write('yes')
VistA.wait('SCREENS?:')
VistA.write('NO')
VistA.wait('DAY:')
VistA.write('^NEW IDCU INTERFACE')
VistA.wait('INTERFACE:')
VistA.write('0')
VistA.wait('Difference:')
VistA.write('')
VistA.wait('DIVISION:')
VistA.write('YES')
VistA.wait('GROUP:')
VistA.write('^')
VistA.wait('Select DVB PARAMETERS ONE:')
VistA.write('')
VistA.wait('OPTION:')
VistA.write('')
def addtoMASParameter(VistA, institution, medical_center):
# ADD ENTRY TO MAS PARAMETER
VistA.wait(PROMPT)
VistA.write('D ^XUP')
VistA.write('1')
VistA.wait('Select OPTION NAME')
VistA.write('ADT SYSTEM')
VistA.wait('ADT System Definition Menu')
VistA.write('MAS Parameter Entry')
VistA.wait('Enter 1-3 to EDIT, or RETURN to QUIT')
VistA.write('1')
VistA.wait('MEDICAL CENTER NAME')
VistA.write(medical_center)
VistA.wait('AFFILIATED')
VistA.write('NO')
VistA.wait('MULTIDIVISION MED CENTER')
VistA.write('NO')
VistA.wait('NURSING HOME WARDS')
VistA.write('')
VistA.wait('DOMICILIARY WARDS')
VistA.write('')
VistA.wait('SYSTEM TIMEOUT')
VistA.write('30')
VistA.wait('AUTOMATIC PTF MESSAGES')
VistA.write('')
VistA.wait('PRINT PTF MESSAGES')
VistA.write('')
VistA.wait('DEFAULT PTF MESSAGE PRINTER')
VistA.write('')
VistA.wait('SHOW STATUS SCREEN')
VistA.write('YES')
VistA.wait('USE HIGH INTENSITY ON SCREENS')
VistA.write('^^')
VistA.wait('Enter 1-3 to EDIT, or RETURN to QUIT')
VistA.write('2')
VistA.wait('DAYS TO UPDATE MEDICAID')
VistA.write('365')
VistA.wait('DAYS TO MAINTAIN G&L CORR')
VistA.write('30')
VistA.wait('TIME FOR LATE DISPOSITION')
VistA.write('30')
VistA.wait('SUPPLEMENTAL 10/10')
VistA.write('0')
VistA.wait(':')
VistA.write('^ASK DEVICE IN REGISTRATION')
VistA.wait('ASK DEVICE IN REGISTRATION')
VistA.write('YES')
VistA.wait('DAYS TO MAINTAIN SENSITIVITY')
VistA.write('30')
VistA.wait(':')
VistA.write('^^')
VistA.wait('Enter 1-3 to EDIT, or RETURN to QUIT')
VistA.write('3')
VistA.wait(':')
VistA.write('^INSTITUTION FILE POINTER')
VistA.wait('INSTITUTION FILE POINTER')
VistA.write(institution)
VistA.wait(':')
VistA.write('^^')
VistA.wait('Enter 1-3 to EDIT, or RETURN to QUIT')
VistA.write('')
VistA.wait('ADT System Definition Menu')
VistA.write('')
VistA.wait('YES//')
VistA.write('')
VistA.wait(PROMPT)
VistA.write('')
def setupNursLocation(VistA, unit_name):
# Set up a NURS LOCATION entity so that BCMA can connect to the system.
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE:')
VistA.write('NURS LOCATION')
VistA.wait('EDIT WHICH FIELD')
VistA.write('')
VistA.wait('NURSING UNIT NAME')
VistA.write(unit_name)
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('PRODUCT LINE')
VistA.write('NURSING')
VistA.wait('CARE SETTING')
VistA.write("INPATIENT")
VistA.wait('UNIT TYPE')
VistA.write("CLINICAL")
VistA.wait('INPATIENT DSS DEPARTMENT')
VistA.write('')
VistA.wait('PATIENT CARE FLAG')
VistA.write('A')
VistA.wait('INACTIVE FLAG')
VistA.write('A')
VistA.wait('MAS WARD')
VistA.write('')
VistA.wait('AMIS BED SECTION')
VistA.write('')
VistA.wait('PROFESSIONAL PERCENTAGE')
VistA.write('')
VistA.wait('UNIT EXPERIENCE')
VistA.write('')
VistA.wait('POC DATA ENTRY PERSONNEL')
VistA.write('')
VistA.wait('POC DATA APPROVAL PERSONNEL')
VistA.write('')
VistA.wait('SERVICE DATE')
VistA.write('')
VistA.wait('SERVICE DATE')
VistA.write('')
VistA.wait('STATUS')
VistA.write('')
VistA.wait('NURSING UNIT NAME')
VistA.write('')
VistA.wait('Select OPTION')
VistA.write('')
def setupStrepTest(VistA):
# The Sikuli test for CPRS orders a Streptozyme test for the patient
# This information ensures the test can be ordered at the VistA Health care
# Facility
# Add a NUMERIC IDENTIFIER to the Chemistry ACCESSION Area
# This is necessary to add a laboratory test to an Accession
# area at an Institution.
startFileman(VistA)
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('ACCESSION\r1')
VistA.wait('EDIT WHICH FIELD')
VistA.write('.4\r')
VistA.wait('Select ACCESSION AREA')
VistA.write('CHEMISTRY')
VistA.wait('NUMERIC IDENTIFIER')
VistA.write('CH\r')
# Change the STREPTOZYME test to be accessioned through the Chemistry
# area at the Vista Health Care institution
VistA.wait('OPTION')
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('LABORATORY TEST')
VistA.wait('EDIT WHICH FIELD')
VistA.write('ACCESSION AREA\r\r')
VistA.wait('Select LABORATORY TEST NAME')
VistA.write('STREPTOZYME')
VistA.wait('Select INSTITUTION')
VistA.write('VISTA HEALTH CARE')
VistA.wait('ACCESSION AREA')
VistA.write('CHEMISTRY')
VistA.wait('Select LABORATORY TEST NAME')
VistA.write('')
# Change the Package Prefix of the ONCE schedule to be
# used by the Laboratory
VistA.wait('OPTION')
VistA.write('1')
VistA.wait_re('INPUT TO WHAT FILE')
VistA.write('ADMINISTRATION SCHEDULE')
VistA.wait('EDIT WHICH FIELD')
VistA.write('PACKAGE PREFIX\r')
VistA.wait('Select ADMINISTRATION SCHEDULE NAME')
VistA.write('ONCE')
VistA.wait('P')
VistA.write('LR')
VistA.wait('ADMINISTRATION SCHEDULE')
VistA.write('')
VistA.wait('Select OPTION')
VistA.write('')
# Set Up the Quick Order entry for the Strep Throat
# Default to a one time, swab collection.
VistA.wait(PROMPT)
VistA.write('K D ^XUP')
VistA.wait("Access Code")
VistA.write("SM1234")
index = VistA.multiwait(['Select OPTION NAME','TERMINAL TYPE NAME'])
if index ==1:
VistA.write("C-VT220")
VistA.wait("Select OPTION NAME")
VistA.write("Systems Manager Menu")
VistA.wait('Systems Manager Menu')
VistA.write('CPRS Configuration')
VistA.wait('CPRS Configuration')
VistA.write('MM')
VistA.wait('Order Menu Management')
VistA.write('QO')
VistA.wait('Select QUICK ORDER NAME')
VistA.write('LRZ STREP TEST')
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('TYPE OF QUICK ORDER')
VistA.write('LAB\r')
VistA.wait('DISPLAY TEXT')
VistA.write('STREP TEST')
VistA.wait('VERIFY ORDER')
VistA.write('Y')
VistA.wait('DESCRIPTION')
VistA.write('N\r')
VistA.wait('Lab Test')
VistA.write('STREP\r2')
VistA.wait('Collected By')
VistA.write('SP')
VistA.wait('Collection Sample')
VistA.write('SWAB\r')
VistA.wait('Collection Date/Time')
VistA.write('TODAY\r')
VistA.wait('How often')
VistA.write('ONCE')
VistA.wait('PLACE//')
VistA.write('\r\r')
VistA.wait('Option')
VistA.write('ST')
VistA.wait('Select ORDER SET NAME')
VistA.write('STREP TEST')
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('Do you wish to copy')
VistA.write('No\r')
VistA.wait('DISPLAY TEXT')
VistA.write('Strep Test\r\r\r')
VistA.wait('COMPONENT SEQUENCE')
VistA.write('10\r')
VistA.wait('ITEM:')
VistA.write('LRZ STREP TEST\r\r\r\r') # Return to EVE menu
VistA.wait("Systems Manager Menu")
VistA.write("")
VistA.wait("Do you really")
VistA.write("Y")
def registerVitalsCPRS(VistA):
# Register the DLL versions for Vitals and the executable version for
# CPRS through the XPAR Menu. This information should match the versions
# that will be used during testing.
# Files can be downloaded: http://www.osehra.org/document/guis-used-automatic-functional-testing
VistA.wait(PROMPT,60)
VistA.write('S GMVDLL=\"5.0.38.3\"')
VistA.wait(PROMPT,60)
VistA.write('D EN^XPAR(\"SYS\",\"GMV DLL VERSION\",GMVDLL,1)')
VistA.wait(PROMPT,60)
VistA.write('S GMVGUI=\"VITALSMANAGER.EXE:5.0.38.3\"')
VistA.wait(PROMPT,60)
VistA.write('D EN^XPAR(\"SYS\",\"GMV GUI VERSION\",GMVGUI,1)')
VistA.wait(PROMPT,60)
VistA.write('S GMVGUI=\"VITALS.EXE:5.0.38.3\"')
VistA.wait(PROMPT,60)
VistA.write('D EN^XPAR(\"SYS\",\"GMV GUI VERSION\",GMVGUI,1)')
def removeCAPRILogin(VistA):
VistA.wait(PROMPT,60)
VistA.write('D EN^XPAR(\"SYS\",\"XU522\",1,\"Y\")')
def addDoctor(VistA,name,init,SSN,sex,AC,VC1):
# Adds a Doctor user into the system via the User Management Menu as
# the System Manager.
# Needs:
# Doctor Name, Doctor Initials, SSN, Sex, Access Code, Verify Code
VistA.write('USER MANAGEMENT')
VistA.wait('User Management')
VistA.write('ADD')
VistA.wait('name')
VistA.write(name+'\rY')
VistA.wait('INITIAL:')
VistA.write(init)
VistA.wait('SSN:')
VistA.write(SSN)
VistA.wait('SEX:')
VistA.write(sex)
VistA.wait('NPI')
VistA.write('')
VistA.wait('NAME COMPONENTS')
# A ScreenMan form opens at this point, and the following information is set:
# Primary Menu: XUCORE
# Secondary Menu: PSB GUI CONTEXT, GMPL MGT MENU, OR CPRS GUI CHART, GMV V/M GUI,
# Access Code: <passed as argument>
# Verify Code: <passed as argument>
# No restriction on Patient Selection
# Allowed multiple sign-ons
# Allopathic and Osteopathic Physicians as the Person Class
# Core CPRS Tab access
VistA.write('\r\r\r\r\r^PRIMARY MENU OPTION\rXUCOR\r^SECONDARY MENU OPTIONS\rPSB GUI CONTEXT\rY\r\r\r\rGMPL MGT MENU\rY\r\r\r\rOR CPRS GUI CHART\rY\r\r\r\rGMV V/M GUI\rY\r\r\r\r^Want to edit ACCESS CODE\rY\r'+AC+'\r'+AC+'\r^Want to edit VERIFY CODE\rY\r'+VC1+'\r'+VC1+'\rVISTA HEALTH CARE\rY\r\r\r\r\r^SERVICE/SECTION\rIRM\r^Language\r\r767\rY\rY\rT-1\r\r^RESTRICT PATIENT SELECTION\r0\r\rCOR\rY\rT-1\r\r^MULTIPLE SIGN-ON\r1\r1\r99\r^\rS\rE')
# Exiting the ScreenMan form, Allocate Security Keys
# PROVIDER,GMV MANAGER,LRLAB,LRVERIFY,ORES,SD SUPERVISOR,SDWL PARAMETER,SDWL MENU,
VistA.wait('User Account Access Letter')
VistA.write('NO')
VistA.wait('wish to allocate security keys?')
VistA.write('Y')
VistA.wait('Allocate key')
VistA.write('PROVIDER\r1')
VistA.wait('Another key')
VistA.write('GMV MANAGER')
VistA.wait('Another key')
VistA.write('LRLAB')
VistA.wait('Another key')
VistA.write('LRVERIFY')
VistA.wait('Another key')
VistA.write('ORES')
VistA.wait('Another key')
VistA.write('SD SUPERVISOR')
VistA.wait('Another key')
VistA.write('SDWL PARAMETER')
VistA.wait('Another key')
VistA.write('SDWL MENU')
VistA.wait('Another key')
VistA.write('PSB MANAGER')
VistA.wait('Another key')
VistA.write('')
VistA.wait('Another holder')
VistA.write('')
VistA.wait('Do you wish to proceed')
VistA.write('Yes')
VistA.wait('add this user to mail groups')
VistA.write('NO')
VistA.wait("User Management")
VistA.write("")
def addNurse(VistA,name,init,SSN,sex,AC,VC1):
# Adds a Nurse user into the system via the User Management Menu as
# the System Manager.
# Needs:
# Nurse Name, Nurse Initials, SSN, Sex, Access Code, Verify Code
VistA.wait("Systems Manager Menu")
VistA.write("User Management")
VistA.wait('User Management')
VistA.write('ADD')
VistA.wait('name')
VistA.write(name+'\rY')
VistA.wait('INITIAL:')
VistA.write(init)
VistA.wait('SSN:')
VistA.write(SSN)
VistA.wait('SEX:')
VistA.write(sex)
VistA.wait('NPI')
VistA.write('')
VistA.wait('NAME COMPONENTS')
# A ScreenMan form opens at this point, and the following information is set:
# Primary Menu: XUCORE
# Secondary Menu: PSB GUI CONTEXT, GMPL MGT MENU, OR CPRS GUI CHART, GMV V/M GUI,
# Access Code: <passed as argument>
# Verify Code: <passed as argument>
# No restriction on Patient Selection
# Allowed multiple sign-ons
# Nursing Service Provider as the Person Class
# Core CPRS Tab access
VistA.write('\r\r\r\r\r^PRIMARY MENU OPTION\rXUCOR\r^SECONDARY MENU OPTIONS\rPSB GUI CONTEXT\rY\r\r\r\rGMPL MGT MENU\rY\r\r\r\rOR CPRS GUI CHART\rY\r\r\r\rGMV V/M GUI\rY\r\r\r\r^Want to edit ACCESS CODE\rY\r'+AC+'\r'+AC+'\r^Want to edit VERIFY CODE\rY\r'+VC1+'\r'+VC1+'\rVISTA HEALTH CARE\rY\r\r\r\r\r^SERVICE/SECTION\rIRM\r^Language\r\r289\rY\rY\rT-1\r\r^RESTRICT PATIENT SELECTION\r0\r\rCOR\rY\rT-1\r\r^MULTIPLE SIGN-ON\r1\r1\r99\r^\rS\rE')
# Exiting the ScreenMan form, Allocate Security Keys
# PROVIDER,ORELSE
VistA.wait('User Account Access Letter')
VistA.write('NO')
VistA.wait('wish to allocate security keys?')
VistA.write('Y')
VistA.wait('Allocate key')
VistA.write('PSB MANAGER')
VistA.wait('Another key')
VistA.write('PROVIDER\r1')
VistA.wait('Another key')
VistA.write('ORELSE\r')
VistA.wait('Another holder')
VistA.write('')
VistA.wait('Do you wish to proceed')
VistA.write('Yes')
VistA.wait('add this user to mail groups')
VistA.write('NO')
VistA.wait("User Management")
VistA.write("")
def addClerk(VistA,name,init,SSN,sex,AC,VC1):
# Adds a Clerk user into the system via the User Management Menu as
# the System Manager.
# Needs:
# Clerk Name, Clerk Initials, SSN, Sex, Access Code, Verify Code
VistA.wait("Systems Manager Menu")
VistA.write("User Management")
VistA.wait('User Management')
VistA.write('ADD')
VistA.wait('name')
VistA.write(name+'\rY')
VistA.wait('INITIAL:')
VistA.write(init)
VistA.wait('SSN:')
VistA.write(SSN)
VistA.wait('SEX:')
VistA.write(sex)
VistA.wait('NPI')
VistA.write('')
VistA.wait('NAME COMPONENTS')
# A ScreenMan form opens at this point, and the following information is set:
# Primary Menu: XUCORE
# Secondary Menu: GMPL DATA ENTRY
# Access Code: <passed as argument>
# Verify Code: <passed as argument>
# No restriction on Patient Selection
# Allowed multiple sign-ons
# Core CPRS Tab access
VistA.write('\r\r\r\r\r^PRIMARY MENU OPTION\rXUCOR\r^SECONDARY MENU OPTIONS\rGMPL DATA ENTRY\rY\r\r\r\rOR CPRS GUI CHART\rY\r\r\r\rGMV V/M GUI\rY\r\r\r\r^Want to edit ACCESS CODE\rY\r'+AC+'\r'+AC+'\r^Want to edit VERIFY CODE\rY\r'+VC1+'\r'+VC1+'\rVISTA HEALTH CARE\rY\r\r\r\r\r^SERVICE/SECTION\rIRM\r^RESTRICT PATIENT SELECTION\r0\r\rCOR\rY\rT-1\r\r^MULTIPLE SIGN-ON\r1\r1\r99\r^\rS\rE')
# Exiting the ScreenMan form, Allocate Security Key
# ORELSE
VistA.wait('User Account Access Letter')
VistA.write('NO')
VistA.wait('wish to allocate security keys?')
VistA.write('Y')
VistA.wait('Allocate key')
VistA.write('ORELSE')
VistA.wait('Another key')
VistA.write('')
VistA.wait('Another holder')
VistA.write('')
VistA.wait('Do you wish to proceed')
VistA.write('Yes')
VistA.wait('add this user to mail groups')
VistA.write('NO')
VistA.wait("User Management")
VistA.write("")
def createOrderMenu(VistA):
# Create the Quick Order Menu to have the LRZ Strep Test as a selectable option while
# not removing the old entries.
VistA.wait('Systems Manager Menu')
VistA.write('CPRS Configuration') # We can jump straight to the CPRS (Clin Coord) menu
VistA.wait('CPRS Configuration')
VistA.write('MM') # Order Menu Management
VistA.wait('Order Menu Management')
VistA.write('MN') # Enter/edit order menus
VistA.wait('ORDER MENU:')
VistA.write('ORZ GEN MED WRITE ORDERS LIST') # New menu name
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('Do you wish to copy an existing menu')
VistA.write('N')
VistA.wait('DISPLAY TEXT')
VistA.write('') # Ignored by GUI
VistA.wait('Edit') # DESCRIPTION field
VistA.write('N')
#VistA.write('General Medicine Write Orders list') # Menu description
#VistA.wait('2')
#VistA.write('') # End of DESCRIPTION
#VistA.wait('EDIT') # Editor options
#VistA.write('') # We are done with the DESCRIPTION
VistA.wait('COLUMN WIDTH')
VistA.write('80') # Default to 80 characters
VistA.wait('MNEMONIC WIDTH')
VistA.write('') # Ignored by GUI
VistA.wait('PATH SWITCH')
VistA.write('') # Ignored by GUI
VistA.wait('ENTRY ACTION')
VistA.write('') # Shown because we have programmer access - Ignore this field
VistA.wait('EXIT ACTION')
VistA.write('') # Shown because we have programmer access - Ignore this field
# Begin ScreenMan form
VistA.wait('Action')
VistA.write('Add')
VistA.wait('Add')
VistA.write('Menu Items') # Add Menu Items to this Order Menu
# Add items to menu - repeat for each menu item
# Begin 'Add New Orders' menu
VistA.wait('ITEM')
VistA.write('OR ADD MENU CLINICIAN')
VistA.wait('ROW')
VistA.write('1')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Add New Orders'
# Begin 'Allergies' package menu
VistA.wait('ITEM')
VistA.write('GMRAOR ALLERGY ENTER/EDIT')
VistA.wait('ROW')
VistA.write('2')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Allergies'
# Begin 'Diet' package menu
VistA.wait('ITEM')
VistA.write('FHW1')
VistA.wait('ROW')
VistA.write('3')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Diet'
# Begin 'Meds, Inpatient' package menu
VistA.wait('ITEM')
VistA.write('PSJ OR PAT OE')
VistA.wait('ROW')
VistA.write('4')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Meds, Inpatient'
# Begin 'Meds, Non-VA' package menu
VistA.wait('ITEM')
VistA.write('PSH OERR')
VistA.wait('ROW')
VistA.write('5')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Meds, Non-VA'
# Begin 'Meds, Outpatient' package menu
VistA.wait('ITEM')
VistA.write('PSO OERR')
VistA.wait('ROW')
VistA.write('6')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Meds, Outpatient'
# Begin 'IV Fluids' package menu
VistA.wait('ITEM')
VistA.write('PSJI OR PAT FLUID OE')
VistA.wait('ROW')
VistA.write('7')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'IV Fluids'
# Begin 'Lab Tests' package menu
VistA.wait('ITEM')
VistA.write('LR OTHER LAB TESTS')
VistA.wait('ROW')
VistA.write('8')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Lab Tests'
# Begin 'Imaging' package menu
VistA.wait('ITEM')
VistA.write('RA OERR EXAM')
VistA.wait('ROW')
VistA.write('9')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Imaging'
# Begin 'Consult' package menu
VistA.wait('ITEM')
VistA.write('GMRCOR CONSULT')
VistA.wait('ROW')
VistA.write('10')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Consult'
# Begin 'Procedure' package menu
VistA.wait('ITEM')
VistA.write('GMRCOR REQUEST')
VistA.wait('ROW')
VistA.write('11')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Procedure'
# Begin 'Vitals' package menu
VistA.wait('ITEM')
VistA.write('GMRVOR')
VistA.wait('CHOOSE') # There is more than one GMRVOR* menu
VistA.write('1') # GMRVOR is the entire menu name and is the first one
VistA.wait('ROW')
VistA.write('12')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Vitals'
# Begin 'Text Only Order' package menu
VistA.wait('ITEM')
VistA.write('OR GXTEXT WORD PROCESSING ORDER')
VistA.wait('ROW')
VistA.write('13')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'Text Only Order'
# Begin 'STREP TEST' quick order menu
VistA.wait('ITEM')
VistA.write('LRZ STREP TEST')
VistA.wait('ROW')
VistA.write('14')
VistA.wait('COLUMN')
VistA.write('1')
VistA.wait('DISPLAY TEXT')
VistA.write('')
VistA.wait('MNEMONIC')
VistA.write('')
# End 'STREP TEST'
VistA.wait('ITEM')
VistA.write('') # Done adding menus
VistA.wait('Action')
VistA.write('Quit') # Done editing this menu
VistA.wait('Order Menu Management') # Need to get to CPRS Manager Menu
VistA.write('General Parameter Tools')
VistA.wait('General Parameter Tools') # The System Manager has this as a secondary menu (can jump to it)
VistA.write('EP') # Edit Parameter
VistA.wait('PARAMETER DEFINITION NAME')
VistA.write('ORWDX WRITE ORDERS LIST') # Parameter used to control Write Orders list
VistA.wait('selection')
VistA.write('8') # Set it for the entire System
VistA.wait('Order Dialog')
VistA.write('ORZ GEN MED WRITE ORDERS LIST') # Order menu we want to use
VistA.write('\r\r\r\r') # we are done. Stay at the EVE menu
def addAllergiesPermission(VistA):
# Add permissions for all users to mark an Allergy as "Entered in error"
# in CPRS. Done in the CPRS Configuration menu.
# Start from the Systems Manager Menu
# Exits to Systems Manager Menu
VistA.wait('Systems Manager Menu')
VistA.write('CPRS Configuration')
VistA.wait('CPRS Configuration')
VistA.write('GUI PARAMETERS')
VistA.wait('GUI Parameters')
VistA.write('GUI Mark Allergy Entered in Error')
VistA.wait('Enter selection')
VistA.write('4\rY\r\r')
def addTemplatePermission(VistA,init):
# Add permission for the Nurse to create note templates that can be
# shared in the domain.
VistA.wait('Systems Manager Menu')
VistA.write('TIU Maintenance')
VistA.wait('TIU Maintenance')
VistA.write('User Class Management')
VistA.wait('User Class Management')
VistA.write('List Membership by User')
VistA.wait('Select USER')
VistA.write('MS\rAdd\rClinical Coordinator\rT-1\r\r\r')
VistA.wait('Option')
VistA.write('\r')
def setNonExpiringCodes(VistA, nameArray):
startFileman(VistA)
VistA.write('ENTER')
VistA.wait('Input to what File')
VistA.write('NEW PERSON')
VistA.wait_re('EDIT WHICH FIELD')
VistA.write('7.2')
VistA.wait('THEN EDIT')
VistA.write('')
for name in nameArray:
VistA.wait("NEW PERSON NAME")
VistA.write(name)
VistA.wait('VERIFY CODE never expires')
VistA.write('Y')
VistA.wait("NEW PERSON NAME")
VistA.write('')
VistA.wait_re('Select OPTION')
VistA.write('')
def createClinic(VistA,name,abbrv,service):
# Add clinic via the XUP menu to allow scheduling
# Clinic Information:
# Clinic meets at the Facility: Yes
# Non-Count clinic: No
# Stop Code: 301 (General Internal Medicine)
# Allowable consecutive no-shows: 0
# Max # days for booking in future: 90
# Time for Clinic start: 80
# Max # days for Auto-rebook: 90
# Maximum Overbooks per day: 0
# Length of Appointment: 30
# Variable Length Appointments?: Y
# Display increments per hour: 2
VistA.wait(PROMPT)
VistA.write('W $$NOSEND^VAFHUTL')
VistA.wait('0')
VistA.write('S DUZ=1 D ^XUP')
VistA.wait('OPTION NAME:')
VistA.write('SDBUILD')
VistA.wait('CLINIC NAME:')
VistA.write(name)
VistA.wait('Are you adding')
VistA.write('Y')
VistA.wait('NAME')
VistA.write('')
VistA.wait('ABBREVIATION')
VistA.write(abbrv)
while True:
index = VistA.multiwait(['SERVICE','CLINIC MEETS','PATIENT FRIENDLY NAME','ALLOW DIRECT PATIENT','DISPLAY CLIN APPT'])
if index == 0:
break;
if index == 2:
VistA.write('')
else:
VistA.write('Y')
VistA.write(service)
VistA.wait('NON-COUNT CLINIC')
VistA.write('N')
VistA.wait('STOP CODE NUMBER')
VistA.write('301\r\r')
VistA.wait('TELEPHONE')
VistA.write('555-555-1414\r\r\r\r\r\r\r\r\r\r\r')
index = VistA.multiwait(['ALLOWABLE CONSECUTIVE NO-SHOWS','WORKLOAD VALIDATION'])
if index == 1:
VistA.write('')
VistA.wait('ALLOWABLE CONSECUTIVE NO-SHOWS')
VistA.write('0')
VistA.wait('FUTURE BOOKING')
VistA.write('90')
VistA.wait('HOUR CLINIC DISPLAY BEGINS')
VistA.write('8\r')
VistA.wait('AUTO-REBOOK')
VistA.write('90\r\r\r\r\r')
VistA.wait('MAXIMUM')
VistA.write('0\r')
VistA.wait('LENGTH OF APP')
VistA.write('30')
VistA.wait('VARIABLE')
VistA.write('Yes')
VistA.wait('DISPLAY INCREMENTS PER HOUR')
VistA.write('2')
# Sets availability for Clinic. Dates below are for a work week (Mon-Fri)
# Sets 4 appointment slots from 8am to 3pm with a half hour lunch break of
# no appointments. This will be set for all week days in future.
dates = ['JUL 2,2012','JUL 3,2012','JUL 4,2012','JUL 5,2012','JUL 6,2012']
for date in dates:
VistA.wait('AVAILABILITY DATE')
VistA.write(date)
VistA.wait('TIME')
VistA.write('0800-1200\r4')
VistA.wait('TIME')
VistA.write('1230-1500\r4')
VistA.wait('TIME')
VistA.write('')
VistA.wait('PATTERN OK')
VistA.write('Yes')
VistA.wait('AVAILABILITY DATE')
VistA.write('')
VistA.wait('CLINIC NAME:')
VistA.write('')
def setupElectronicSignature(VistA,AC,VC1,VC2,sigcode):
# Signs a created user into the ZU Menu system to add a signature code for
# document signing. It will force the user to change the verify code,
VistA.wait(PROMPT,60)
VistA.write('D ^ZU')
VistA.wait('ACCESS CODE:')
VistA.write(AC)
VistA.wait('VERIFY CODE:')
VistA.write(VC1)
VistA.wait('verify code:')
VistA.write(VC1)
VistA.wait('VERIFY CODE:')
VistA.write(VC2)
VistA.wait('right:')
VistA.write(VC2)
VistA.wait('TYPE NAME')
VistA.write('')
# then will enter the User's Toolbox to change the signature information.
VistA.wait('Core Applications')
VistA.write('USER\'s TOOLBOX')
VistA.wait('Toolbox')
VistA.write('ELE')
VistA.wait('INITIAL')
VistA.write('')
VistA.wait('SIGNATURE BLOCK PRINTED NAME')
VistA.write('')
VistA.wait('SIGNATURE BLOCK TITLE')
VistA.write('\r\r\r')
VistA.wait('SIGNATURE CODE')
VistA.write(sigcode)
VistA.wait('SIGNATURE CODE FOR VERIFICATION')
VistA.write(sigcode)
VistA.wait('Toolbox')
VistA.write('\r\r\r')
# Add patient through the
# Function arguments:
# VistA, Patient Name, Patient Sex, Patient DOB, Patient SSN, Patient Veteran?
def addPatient(VistA, pfile):
'''Add ALL patients from specified CSV '''
preader = TestHelper.CSVFileReader()
prec = preader.getfiledata(pfile)
for key in sorted(prec):
patient_data = prec[key]
VistA.write('L S DUZ=1 D ^XUP')
VistA.wait('Select OPTION NAME')
VistA.write('Core Applications\r')
VistA.wait("Select Core Applications")
VistA.write("ADT Manager Menu")
while True:
index = VistA.multiwait(['to continue','Select ADT Manager Menu',"Select Registration Menu"])
if index == 0:
VistA.write('')
elif index == 1:
VistA.write("Registration Menu")
elif index == 2:
VistA.write('Register a Patient')
break
index = VistA.multiwait(['PATIENT NAME',"Select 1010 printer"])
if index == 1:
VistA.write("NULL")
VistA.wait('PATIENT NAME')
VistA.write(patient_data['fullname'].strip())
index = VistA.multiwait(['ARE YOU ADDING','Enterprise Search'])
VistA.write('Y')
if index == 1:
while True:
index = VistA.multiwait(['FAMILY','GIVEN','MIDDLE NAME','PREFIX','SUFFIX',
'DEGREE','SOCIAL SECURITY','DATE OF BIRTH','SEX',
'MAIDEN NAME','CITY','STATE', 'MULTIPLE BIRTH',
'PHONE NUMBER','ARE YOU ADDING'])
if index == 14:
VistA.write('Y')
break
elif index == 6:
VistA.write(patient_data['ssn'])
elif index == 7:
VistA.write(patient_data['dob'].strip())
elif index == 8:
VistA.write(patient_data['sex'].strip())
else:
VistA.write('')
VistA.wait('to continue')
VistA.write('')
VistA.wait('MULTIPLE BIRTH INDICATOR')
VistA.write('')
VistA.wait('MAIDEN NAME:')
VistA.write('')
else:
VistA.wait('SEX')
VistA.write(patient_data['sex'].strip())
VistA.wait('DATE OF BIRTH')
VistA.write(patient_data['dob'].strip())
VistA.wait('SOCIAL SECURITY NUMBER')
VistA.write(patient_data['ssn'])
VistA.wait('TYPE')
VistA.write(patient_data['type'].strip())
VistA.wait('PATIENT VETERAN')
VistA.write(patient_data['veteran'].strip())
VistA.wait('SERVICE CONNECTED')
VistA.write(patient_data['service'].strip())
VistA.wait('MULTIPLE BIRTH INDICATOR')
VistA.write(patient_data['twin'].strip())
index = VistA.multiwait(["Do you still",'FAMILY'])
if index == 0:
VistA.write('Y')
VistA.wait("FAMILY")
VistA.write('^\r')
VistA.wait('MAIDEN NAME:')
VistA.write('')
VistA.wait('[CITY]')
VistA.write(patient_data['cityob'].strip())
VistA.wait('[STATE]')
VistA.write(patient_data['stateob'].strip())
VistA.wait('ALIAS')
VistA.write('')
while True:
waitIndex = VistA.multiwait(['Patient Data', 'to exit:'])
if waitIndex == 0:
break
VistA.write('')
VistA.write('Y')
index = VistA.multiwait(['QUIT','Do you want to edit'])
if index == 1:
VistA.write('N')
VistA.wait("QUIT")
# VistA.write("^1.1")
# VistA.wait("QUIT")
# VistA.write('1')
# VistA.wait('COUNTRY')
# VistA.write('')
# VistA.wait('ADDRESS')
# VistA.write('834 Ocean Vista Avenue\r')
# VistA.wait('ZIP')
# VistA.write('90401')
# VistA.wait('CITY')
# VistA.write('1')
# VistA.wait('PHONE NUMBER')
# VistA.write('310-555-2233\r')
# VistA.wait('ADDRESS changes')
# VistA.write('Y\r')
# VistA.wait('PHONE changes')
# VistA.write('Y\r')
# VistA.wait("Copy the Residential")
# VistA.write('N')
# VistA.wait('QUIT')
# VistA.write('^3')
# VistA.wait('QUIT')
# VistA.write('1')
# VistA.wait('PRIMARY NOK')
# VistA.write('Carter,David J Sr')
# VistA.wait('RELATIONSHIP')
# VistA.write('FATHER')
# VistA.wait('ADDRESS')
# VistA.write('Y')
# VistA.wait('WORK PHONE')
# VistA.write('310-555-9876\r^')
VistA.write("^")
VistA.wait('condition')
VistA.write('N')
VistA.wait('today')
VistA.write('N')
VistA.wait('Registration login')
VistA.write('NOW')
VistA.wait("TYPE OF BENEFIT")
VistA.write('3')
VistA.wait("TYPE OF CARE")
VistA.write('5')
VistA.wait("REGISTRATION ELIGIBILITY CODE")
VistA.write('')
VistA.wait("NEED RELATED TO AN ACCIDENT")
VistA.write('N')
VistA.wait("NEED RELATED TO OCCUPATION")
VistA.write('N')
index = VistA.multiwait(["VA Patient Enrollment", "PRINT 10"])
if index == 0:
VistA.write('No')
VistA.wait("as soon as available")
VistA.write('No')
VistA.wait("PRINT 10")
VistA.write('N')
VistA.wait("ROUTING SLIP")
VistA.write('N')
VistA.wait_re("SELECT PATIENT NAME")
VistA.write('^')
while True:
index = VistA.multiwait(["to halt","Core Applications", "to continue",
"Select ADT Manager Menu", "Registration Menu"])
VistA.write('')
if index == 0:
break
VistA.wait(PROMPT)
|
{
"content_hash": "1f4fa3cf06831cce75a38981d41935c9",
"timestamp": "",
"source": "github",
"line_count": 1704,
"max_line_length": 444,
"avg_line_length": 31.916079812206572,
"alnum_prop": 0.6617081915969477,
"repo_name": "josephsnyder/VistA",
"id": "0c3f64766ad70abc8b220dba86810f69ba15e8b2",
"size": "55149",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/vista/OSEHRASetup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6315"
},
{
"name": "Brightscript",
"bytes": "297"
},
{
"name": "CMake",
"bytes": "120273"
},
{
"name": "CSS",
"bytes": "132661"
},
{
"name": "Genshi",
"bytes": "72951258"
},
{
"name": "HTML",
"bytes": "2296661"
},
{
"name": "JavaScript",
"bytes": "2341060"
},
{
"name": "M",
"bytes": "483901"
},
{
"name": "PHP",
"bytes": "6750"
},
{
"name": "Pascal",
"bytes": "17825658"
},
{
"name": "Python",
"bytes": "1473431"
},
{
"name": "Ruby",
"bytes": "12147"
},
{
"name": "Shell",
"bytes": "99067"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdatePriceAndQuantity(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdatePriceAndQuantity Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdatePriceAndQuantity, self).__init__(temboo_session, '/Library/Amazon/Marketplace/Feeds/UpdatePriceAndQuantity')
def new_input_set(self):
return UpdatePriceAndQuantityInputSet()
def _make_result_set(self, result, path):
return UpdatePriceAndQuantityResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdatePriceAndQuantityChoreographyExecution(session, exec_id, path)
class UpdatePriceAndQuantityInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdatePriceAndQuantity
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(UpdatePriceAndQuantityInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSMarketplaceId(self, value):
"""
Set the value of the AWSMarketplaceId input for this Choreo. ((required, string) The Marketplace ID provided by Amazon Web Services.)
"""
super(UpdatePriceAndQuantityInputSet, self)._set_input('AWSMarketplaceId', value)
def set_AWSMerchantId(self, value):
"""
Set the value of the AWSMerchantId input for this Choreo. ((required, string) The Merchant ID provided by Amazon Web Services.)
"""
super(UpdatePriceAndQuantityInputSet, self)._set_input('AWSMerchantId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(UpdatePriceAndQuantityInputSet, self)._set_input('AWSSecretKeyId', value)
def set_Endpoint(self, value):
"""
Set the value of the Endpoint input for this Choreo. ((conditional, string) The base URL for the MWS endpoint. Defaults to mws.amazonservices.co.uk.)
"""
super(UpdatePriceAndQuantityInputSet, self)._set_input('Endpoint', value)
def set_Price(self, value):
"""
Set the value of the Price input for this Choreo. ((required, decimal) Enter the unit price for this product. The price must be greater than 0.00. Do NOT include the currency symbol (e.g. $).)
"""
super(UpdatePriceAndQuantityInputSet, self)._set_input('Price', value)
def set_Quantity(self, value):
"""
Set the value of the Quantity input for this Choreo. ((required, integer) Enter the quantity of the product you have for sale. The quantity must be a whole number, and should be greater than zero.)
"""
super(UpdatePriceAndQuantityInputSet, self)._set_input('Quantity', value)
def set_SKU(self, value):
"""
Set the value of the SKU input for this Choreo. ((required, string) A SKU is a "Stock Keeping Unit" which you can assign to your products to track your inventory. Provide the SKU that you want to modify.)
"""
super(UpdatePriceAndQuantityInputSet, self)._set_input('SKU', value)
def set_TimeToWait(self, value):
"""
Set the value of the TimeToWait input for this Choreo. ((optional, integer) By default, the Choreo will wait for 10 minutes to see if the report is ready for retrieval. Max is 120 minutes.)
"""
super(UpdatePriceAndQuantityInputSet, self)._set_input('TimeToWait', value)
class UpdatePriceAndQuantityResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdatePriceAndQuantity Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Amazon after submitting the feed.)
"""
return self._output.get('Response', None)
def get_ProcessingStatus(self):
"""
Retrieve the value for the "ProcessingStatus" output from this Choreo execution. ((string) The processing status of the feed submission which is parsed from the Amazon response.)
"""
return self._output.get('ProcessingStatus', None)
def get_SubmissionId(self):
"""
Retrieve the value for the "SubmissionId" output from this Choreo execution. ((integer) The submission id parsed from the Amazon response.)
"""
return self._output.get('SubmissionId', None)
def get_SubmissionResult(self):
"""
Retrieve the value for the "SubmissionResult" output from this Choreo execution. ((string) The submission result returned from Amazon.)
"""
return self._output.get('SubmissionResult', None)
class UpdatePriceAndQuantityChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdatePriceAndQuantityResultSet(response, path)
|
{
"content_hash": "693dd95608e3311fb544a35433da35be",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 212,
"avg_line_length": 50.567567567567565,
"alnum_prop": 0.6976661321931231,
"repo_name": "jordanemedlock/psychtruths",
"id": "83a3efe41edf87106d7a105c7ae96124d0626005",
"size": "6526",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "temboo/core/Library/Amazon/Marketplace/Feeds/UpdatePriceAndQuantity.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
'''
Integration Test for public network down in management network separated and recover.
@author: SyZhao
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.node_operations as node_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.operations.resource_operations as res_ops
import test_stub
import time
import os
vm = None
mn_host = None
pub_mn_ip = None
mag_mn_ip = None
def test():
global vm
global mn_host
global pub_mn_ip
global mag_mn_ip
test_stub.skip_if_scenario_not_multiple_networks()
mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
if len(mn_host) != 1:
test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
pub_mn_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
mag_mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mag_mn_ip
test_util.test_logger("shutdown host's network [%s] that mn vm is running on" % (mn_host[0].ip_))
test_stub.shutdown_host_network(mn_host[0], test_lib.all_scenario_config, downMagt=False)
test_util.test_logger("wait for 20 seconds to see if management node VM starts on another host")
time.sleep(20)
new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
if new_mn_host_ip == "" or new_mn_host_ip == mn_host[0].ip_ or new_mn_host_ip == mn_host[0].managementIp_:
test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host[0].ip_))
count = 120
while count > 0:
new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
if len(new_mn_host) == 1:
test_util.test_logger("management node VM run after its former host down for 120s")
break
elif len(new_mn_host) > 1:
test_util.test_fail("management node VM runs on more than one host after its former host down")
time.sleep(5)
count -= 1
if len(new_mn_host) == 0:
test_util.test_fail("management node VM does not run after its former host down for 120s")
elif len(new_mn_host) > 1:
test_util.test_fail("management node VM runs on more than one host after its former host down")
#node_ops.wait_for_management_server_start()
test_stub.wrapper_of_wait_for_management_server_start(600)
test_stub.ensure_hosts_connected(exclude_host=[mn_host[0]])
test_stub.ensure_bss_host_connected_from_sep_net_down(test_lib.scenario_file, test_lib.all_scenario_config, downMagt=False)
test_stub.ensure_bss_connected()
test_stub.ensure_pss_connected()
test_stub.return_pass_ahead_if_3sites("TEST PASS")
vm = test_stub.create_basic_vm()
vm.check()
vm.destroy()
test_util.test_pass('Create VM Test Success')
#Will be called what ever test result is
def env_recover():
global mn_host
global pub_mn_ip
global mag_mn_ip
test_stub.reopen_host_network(mn_host[0], test_lib.all_scenario_config)
test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = pub_mn_ip
#test_stub.recover_host(mn_host[0], test_lib.all_scenario_config, test_lib.deploy_config)
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
|
{
"content_hash": "4e073cac0ce3203a7e380fc4901fe433",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 150,
"avg_line_length": 39.915789473684214,
"alnum_prop": 0.6703586497890295,
"repo_name": "zstackorg/zstack-woodpecker",
"id": "563d2acc84d08b86383e92bed9ace438f0240d60",
"size": "3792",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/mn_ha/test_sep_man_1_mn_hst_pub_net_dwn_crt_vm.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "46522"
},
{
"name": "Makefile",
"bytes": "692"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "2891030"
},
{
"name": "Shell",
"bytes": "54266"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
def index(request, pageData):
return render(request, 'index.html', pageData)
def orders(request, pageData):
return render(request, 'orders.html', pageData)
def orderers(request, pageData):
return render(request, 'orderers.html', pageData)
def products(request, pageData):
return render(request, 'products.html', pageData)
def departments(request, pageData):
return render(request, 'departments.html', pageData)
|
{
"content_hash": "8757bfee5bcc60627c2666d324f379c0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 53,
"avg_line_length": 29.4375,
"alnum_prop": 0.7473460721868365,
"repo_name": "sanchaez/python_labs",
"id": "eace6681b3c66ed44c23adca56752a8623ba92d4",
"size": "471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lab2/Lab2/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "134"
},
{
"name": "HTML",
"bytes": "24445"
},
{
"name": "JavaScript",
"bytes": "18616"
},
{
"name": "Python",
"bytes": "53410"
}
],
"symlink_target": ""
}
|
from wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register
from bvspca.animals.models import Animal
class AnimalModelAdmin(ModelAdmin):
model = Animal
menu_label = 'Animals'
menu_icon = 'fa-paw'
menu_order = 100
add_to_settings_menu = False
list_display = ('title', 'petpoint_id', 'adoption_date', 'live')
search_fields = ('title', 'description', 'petpoint_id', 'adoption_message')
list_filter = ('species', 'sex',)
ordering = ('-live', '-adoption_date', '-petpoint_id',)
list_per_page = 20
modeladmin_register(AnimalModelAdmin)
|
{
"content_hash": "9647d3dfe6806c7dc2ecaf0a61d1365a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 31.263157894736842,
"alnum_prop": 0.6818181818181818,
"repo_name": "nfletton/bvspca",
"id": "e0a9217332472ebbfe59a3bc0f4f55caf37de6ce",
"size": "594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bvspca/animals/wagtail_hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "57765"
},
{
"name": "JavaScript",
"bytes": "44372"
},
{
"name": "Python",
"bytes": "259812"
},
{
"name": "SCSS",
"bytes": "61500"
},
{
"name": "Shell",
"bytes": "349"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLSpatialBNTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
#order=st.sampled_from(["NCHW", "NHWC"]),
order=st.sampled_from(["NCHW"]),
epsilon=st.floats(1e-5, 1e-2),
**mu.gcs)
def test_mkl_BN(self, size, input_channels, batch_size, seed, order, epsilon,
gc, dc):
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y"],
order=order,
is_test=True,
epsilon=epsilon,
)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
#order=st.sampled_from(["NCHW", "NHWC"]),
order=st.sampled_from(["NCHW"]),
epsilon=st.floats(1e-5, 1e-2),
**mu.gcs)
def test_spatialbn_train_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "running_mean", "running_var"],
["Y", "running_mean", "running_var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var],
[0, 1, 2, 3, 4])
if __name__ == "__main__":
import unittest
unittest.main()
|
{
"content_hash": "fc2cea6ebb024db7d2e0dd36d37a9be1",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 81,
"avg_line_length": 36.95061728395062,
"alnum_prop": 0.5766789174741063,
"repo_name": "bwasti/caffe2",
"id": "9c91ee0a34234eadf1254d3e10d2fb3ace13c14b",
"size": "2993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caffe2/python/mkl/mkl_sbn_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4514"
},
{
"name": "C",
"bytes": "58731"
},
{
"name": "C++",
"bytes": "2743591"
},
{
"name": "CMake",
"bytes": "131386"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "455661"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Jupyter Notebook",
"bytes": "4615340"
},
{
"name": "Makefile",
"bytes": "527"
},
{
"name": "Metal",
"bytes": "29686"
},
{
"name": "Objective-C",
"bytes": "828"
},
{
"name": "Objective-C++",
"bytes": "147470"
},
{
"name": "Python",
"bytes": "2137478"
},
{
"name": "Shell",
"bytes": "20688"
}
],
"symlink_target": ""
}
|
from interaction import *
from optimization import *
from sketch import *
from visualization import *
from connectorBehavior import *
import regionToolset
session.journalOptions.setValues(replayGeometry=COORDINATE,recoverGeometry=COORDINATE)
beamLength=4.0
cLoad=10000 #only refers to scale
#-----------------------------------------------------
# Create a model.
myModel = mdb.Model(name='ssBeamModel')
#-----------------------------------------------------
from part import *
# Create a sketch for the base feature.
mySketch = myModel.ConstrainedSketch(name='beamSketch',sheetSize=10.0)
# Create the line.
mySketch.Line(point1=(0.0, 0.0), point2=(beamLength/2, 0.0))
mySketch.Line(point1=(beamLength/2, 0.0), point2=(beamLength, 0.0))
# Create a three-dimensional, deformable part.
myBeamPart = myModel.Part(name='beamPart', dimensionality=THREE_D, type=DEFORMABLE_BODY)
# Create the part's base feature
myBeamPart.BaseWire(sketch=mySketch)
#-----------------------------------------------------
from material import *
# Create a material.
#mySteel = myModel.Material(name='Steel')
# Create the elastic properties
#elasticProperties = (209.E9, 0.28)
#mySteel.Elastic(table=(elasticProperties, ) )
#-------------------------------------------------------
from section import *
# Create the beam section.
myModel.GeneralizedProfile(name='GProfile', area=3.80e-3, i11=2.292667e-5, i12=0, i22=1.681667e-6, j=1.30e-7, gammaO=0.0, gammaW=0.0)
mySection=myModel.BeamSection(name='beamSection', profile='GProfile',
poissonRatio=0.28, integration=BEFORE_ANALYSIS,
table=((210000000000.0, 82030000000.0), ), alphaDamping=0.0, beamShape=CONSTANT,
betaDamping=0.0, centroid=(0.0, 0.0), compositeDamping=0.0,
consistentMassMatrix=False, dependencies=0, shearCenter=(0.0, 0.0),
temperatureDependency=OFF, thermalExpansion=OFF)
# Assign the section to the region. The region refers
# to the single cell in this model.
#mdb.models['Model-1'].parts['Part-1'].SectionAssignment(offset=0.0,
# offsetField='', offsetType=MIDDLE_SURFACE, region=
# mdb.models['Model-1'].parts['Part-1'].sets['Set-1'], sectionName=
# 'Section-1', thicknessAssignment=FROM_SECTION)
#beamRegion = (myBeamPart.cells,)
beamRegion=regionToolset.Region(edges=myBeamPart.edges)
myBeamPart.SectionAssignment(region=beamRegion, sectionName='beamSection',
offset=0.0, offsetField='',offsetType=MIDDLE_SURFACE,
thicknessAssignment=FROM_SECTION)
myModel.parts['beamPart'].assignBeamSectionOrientation(method=
N1_COSINES, n1=(0.0, 0.0, 1.0), region=Region(
edges=myBeamPart.edges.findAt(((0.5, 0.0, 0.0),
), ((2.5, 0.0, 0.0), ), )))
#-------------------------------------------------------
from assembly import *
# Create a part instance.
myAssembly = myModel.rootAssembly
myAssembly.DatumCsysByDefault(CARTESIAN)
myInstance = myAssembly.Instance(name='beamInstance',
part=myBeamPart, dependent=OFF)
#-------------------------------------------------------
from step import *
# Create a step. The time period of the static step is 1.0,
# and the initial incrementation is 0.1; the step is created
# after the initial step.
myModel.StaticStep(name='beamStep', previous='Initial',
nlgeom=OFF, description='Load of the beam.')
myModel.FieldOutputRequest(name='F-Output-2',
createStepName='beamStep', variables=('SF',))
#-------------------------------------------------------
from load import *
#mdb.models['Model-1'].rootAssembly.Set(name='Set-1', vertices=
# mdb.models['Model-1'].rootAssembly.instances['Part-1-1'].vertices.findAt(((
# 0.0, 0.0, 0.0), )))
#v=myAssembly.instances('beamInstance').vertices
#verts=v.findAt(((0.0, 0.0, 0.0), ),)
v=myAssembly.instances['beamInstance'].vertices
verts=v.findAt(((0.0, 0.0, 0.0), ),)
myAssembly.Set(vertices=verts,name='Set-fix1')
#mdb.models['Model-1'].DisplacementBC(amplitude=UNSET, createStepName='Step-1',
# distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name=
# 'BC-1', region=mdb.models['Model-1'].rootAssembly.sets['Set-1'], u1=0.0,
# u2=0.0, u3=0.0, ur1=0.0, ur2=0.0, ur3=UNSET)
region=myAssembly.sets['Set-fix1']
myModel.DisplacementBC(name='BC-1', createStepName='beamStep',
region=region, u1=0.0, u2=0.0, u3=UNSET, ur1=0.0, ur2=0.0, ur3=UNSET,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM,fieldName='',
localCsys=None)
#mdb.models['Model-1'].rootAssembly.Set(name='Set-2', vertices=
# mdb.models['Model-1'].rootAssembly.instances['Part-1-1'].vertices.findAt(((
# 4.0, 0.0, 0.0), )))
v=myAssembly.instances['beamInstance'].vertices
verts=v.findAt(((beamLength, 0.0, 0.0), ),)
myAssembly.Set(vertices=verts, name='Set-fix2')
#mdb.models['Model-1'].DisplacementBC(amplitude=UNSET, createStepName='Step-1',
# distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name=
# 'BC-2', region=mdb.models['Model-1'].rootAssembly.sets['Set-2'], u1=UNSET,
# u2=0.0, u3=0.0, ur1=0.0, ur2=0.0, ur3=UNSET)
region=myAssembly.sets['Set-fix2']
myModel.DisplacementBC(name='BC-2', createStepName='beamStep',
region=region, u1=UNSET, u2=0.0, u3=0.0, ur1=0.0, ur2=0.0, ur3=UNSET,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM, fieldName='',
localCsys=None)
#mdb.models['Model-1'].rootAssembly.Set(name='Set-3', vertices=
# mdb.models['Model-1'].rootAssembly.instances['Part-1-1'].vertices.findAt(((
# 2.0, 0.0, 0.0), )))
v=myAssembly.instances['beamInstance'].vertices
verts=v.findAt(((beamLength/2, 0.0, 0.0), ),)
myAssembly.Set(vertices=verts, name='Set-force')
region=myAssembly.sets['Set-force']
myModel.ConcentratedForce(name='beamLoad', createStepName='beamStep',
region=region, cf2=-1.0*cLoad, distributionType=UNIFORM, field='',
localCsys=None)
#-------------------------------------------------------
from mesh import *
# Assign an element type to the part instance.
#region = (myInstance.cells,)
#elemType = mesh.ElemType(elemCode=B31, elemLibrary=STANDARD)
#myAssembly.setElementType(regions=region, elemTypes=(elemType,))
# Seed the part instance.
myAssembly.seedPartInstance(regions=(myInstance,), size=0.01,
deviationFactor=0.1, minSizeFactor=0.1)
# Mesh the part instance.
myAssembly.generateMesh(regions=(myInstance,))
#-------------------------------------------------------
myAssembly.regenerate()
#-------------------------------------------------------
from job import *
# Create an analysis job for the model and submit it.
jobName='ssBeam'
myJob=mdb.Job(name=jobName, model='ssBeamModel')
myJob.submit(consistencyChecking=OFF)
# Save by ldn
|
{
"content_hash": "04b69a465f9555e39686eb9fb1ce8369",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 134,
"avg_line_length": 32.077669902912625,
"alnum_prop": 0.6613196125907991,
"repo_name": "zjkl19/AbaqusPython",
"id": "45fc387e780c912036456ae686c28b4f19702e6d",
"size": "6884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CompareGeneralProfileIProfile(BEAM)/SimpleSupportBeam(GeneralProfile).py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "512"
},
{
"name": "Python",
"bytes": "387298"
}
],
"symlink_target": ""
}
|
from flask import url_for
from flask.ext.testing import TestCase
from webapp import app, db
from webapp.models import Monkey, Friendship, BestFriend
from config import HEROKU_POSTGRESQL_CHARCOAL_URL
def setup_module():
app.config['SQLALCHEMY_DATABASE_URI'] = HEROKU_POSTGRESQL_CHARCOAL_URL
app.config['WTF_CSRF_ENABLED'] = False
db.create_all()
def teardown_module():
db.session.remove()
db.drop_all()
class TestProfilePage(TestCase):
def create_app(self):
return app
def test_profile_page_is_working(self):
monkey = Monkey(name='Sajjad', age='21', email='Sajjad@nono.com')
db.session.add(monkey)
db.session.commit()
response = self.client.get(url_for('profile',
monkey_id=monkey.monkey_id))
self.assert_200(response)
def test_edit_info_inside_profile_page_valid_input_works(self):
monkey = Monkey(name='Sajjad', age='21', email='Sajjad@nono.com')
db.session.add(monkey)
db.session.commit()
response = self.client.get(url_for('profile',
monkey_id=monkey.monkey_id))
assert b'Sajjad@nono.com' in response.data
data = {
'name': 'Sajjad',
'age': '21',
'email': 'Sajjad@gmail.com'
}
response = self.client.post(url_for('profile',
monkey_id=monkey.monkey_id),
data=data,
follow_redirects=True)
self.assert_200(response)
assert b'Sajjad@gmail.com' in response.data
def test_best_friend_status_not_yet_and_name_of_bf_works(self):
monkey = Monkey(name='Sajjad', age='21', email='Sajjad@nono.com')
db.session.add(monkey)
db.session.commit()
response = self.client.get(url_for('profile',
monkey_id=monkey.monkey_id))
assert b'Best friend: Not yet' in response.data
bf = Monkey(name='Alice', age='100', email='Alice@nono.com')
db.session.add(bf)
db.session.commit()
best_friend = BestFriend(monkey_id=monkey.monkey_id,
best_friend_id=bf.monkey_id)
db.session.add(best_friend)
db.session.commit()
response = self.client.get(url_for('profile',
monkey_id=monkey.monkey_id))
assert b'Best friend: Alice' in response.data
def test_terminate_monkey_works(self):
monkey = Monkey(name='About2Terminate', age='21', email='ATT@no.com')
monkey2 = Monkey(name='PoorFriend', age='21', email='ATT@no.com')
monkey3 = Monkey(name='EvenPoorer', age='21', email='ATT@no.com')
db.session.add(monkey)
db.session.add(monkey2)
db.session.add(monkey3)
db.session.commit()
friendship = Friendship(monkey_id=monkey.monkey_id,
friend_id=monkey2.monkey_id)
friendship_reciprocal = Friendship(monkey_id=monkey2.monkey_id,
friend_id=monkey.monkey_id)
best_friend = BestFriend(monkey_id=monkey.monkey_id,
best_friend_id=monkey3.monkey_id)
db.session.add(friendship)
db.session.add(best_friend)
db.session.commit()
response = self.client.get(url_for('terminate',
monkey_id=monkey.monkey_id),
follow_redirects=True)
self.assert_200(response)
assert b'About2Terminate was terminated' in response.data
assert monkey not in Monkey.query.all()
assert friendship not in Friendship.query.all()
assert best_friend not in BestFriend.query.all()
def test_edit_info_a_non_existent_monkey_gets_handled(self):
data = {
'name': 'Sajjad',
'age': '21',
'email': 'Sajjad@gmail.com'
}
response = self.client.post(url_for('profile',
monkey_id=999999999),
data=data)
self.assert_200(response)
assert b'500: Internal Server Error' in response.data
|
{
"content_hash": "cef997cb50ce7bf4a6de3c3504a40f14",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 77,
"avg_line_length": 40.429906542056074,
"alnum_prop": 0.5570966250577901,
"repo_name": "Sajjadhosn/web-app-monkey",
"id": "7382c7b12baff31286e473ec01414571d9a9f4e5",
"size": "4326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_profile_page.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "499"
},
{
"name": "HTML",
"bytes": "13996"
},
{
"name": "Python",
"bytes": "31060"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from azure.mgmt.eventgrid.models import EventSubscription, EventSubscriptionFilter
from c7n_azure.session import Session
from c7n.utils import local_session
class AzureEvents(object):
"""A mapping of resource types to events."""
azure_events = {
'AppServicePlanWrite': {
'resource_provider': 'Microsoft.Web/serverFarms',
'event': 'write'},
'BatchWrite': {
'resource_provider': 'Microsoft.Batch/batchAccounts',
'event': 'write'},
'CdnProfileWrite': {
'resource_provider': 'Microsoft.Cdn/profiles',
'event': 'write'},
'CognitiveServiceWrite': {
'resource_provider': 'Microsoft.CognitiveServices/account',
'event': 'write'},
'ContainerServiceWrite': {
'resource_provider': 'Microsoft.ContainerService/managedClusters',
'event': 'write'},
'CosmosDbWrite': {
'resource_provider': 'Microsoft.DocumentDB/databaseAccounts',
'event': 'write'},
'DataFactoryWrite': {
'resource_provider': 'Microsoft.DataFactory/factories',
'event': 'write'},
'DataLakeWrite': {
'resource_provider': 'Microsoft.DataLakeStore/accounts',
'event': 'write'},
'DiskWrite': {
'resource_provider': 'Microsoft.Compute/disks',
'event': 'write'},
'IotHubWrite': {
'resource_provider': 'Microsoft.Devices/IotHubs',
'event': 'write'},
'KeyVaultWrite': {
'resource_provider': 'Microsoft.KeyVault/vaults',
'event': 'write'},
'LoadBalancerWrite': {
'resource_provider': 'Microsoft.Network/loadBalancers',
'event': 'write'},
'NetworkInterfaceWrite': {
'resource_provider': 'Microsoft.Network/networkInterfaces',
'event': 'write'},
'NetworkSecurityGroupWrite': {
'resource_provider': 'Microsoft.Network/networkSecurityGroups',
'event': 'write'},
'PublicIpWrite': {
'resource_provider': 'Microsoft.Network/publicIPAddresses',
'event': 'write'},
'RedisWrite': {
'resource_provider': 'Microsoft.Cache/Redis',
'event': 'write'},
'ResourceGroupWrite': {
'resource_provider': 'Microsoft.Resources/subscriptions/resourceGroups',
'event': 'write'},
'SqlServerWrite': {
'resource_provider': 'Microsoft.Sql/servers',
'event': 'write'},
'StorageWrite': {
'resource_provider': 'Microsoft.Storage/storageAccounts',
'event': 'write'},
'VmWrite': {
'resource_provider': 'Microsoft.Compute/virtualMachines',
'event': 'write'},
'VmssWrite': {
'resource_provider': 'Microsoft.Compute/virtualMachineScaleSets',
'event': 'write'},
'VnetWrite': {
'resource_provider': 'Microsoft.Network/virtualNetworks',
'event': 'write'},
'WebAppWrite': {
'resource_provider': 'Microsoft.Web/sites',
'event': 'write'}
}
@classmethod
def get(cls, event):
return cls.azure_events.get(event)
@classmethod
def get_event_operations(cls, events):
event_operations = []
for e in events:
if isinstance(e, six.string_types):
event = cls.get(e)
event_operations.append('%s/%s' % (event['resource_provider'], event['event']))
else:
event_operations.append('%s/%s' % (e['resourceProvider'], e['event']))
return event_operations
class AzureEventSubscription(object):
@staticmethod
def create(destination, name, subscription_id, session=None, event_filter=None):
s = session or local_session(Session)
event_filter = event_filter or EventSubscriptionFilter()
event_info = EventSubscription(destination=destination, filter=event_filter)
scope = '/subscriptions/%s' % subscription_id
client = s.client('azure.mgmt.eventgrid.EventGridManagementClient')
event_subscription = client.event_subscriptions.create_or_update(scope, name, event_info)
return event_subscription.result()
|
{
"content_hash": "f2995611940eb06451ad85b4157e903f",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 97,
"avg_line_length": 31.964028776978417,
"alnum_prop": 0.5838397479180734,
"repo_name": "kapilt/cloud-custodian",
"id": "abeecf3707c566b7365274bcc33edb3f57c84f26",
"size": "5033",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/c7n_azure/azure_events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8163"
},
{
"name": "Go",
"bytes": "146630"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9971"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "5354902"
},
{
"name": "Shell",
"bytes": "13032"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('climatemodels', '0066_auto_20170120_1625'),
]
operations = [
migrations.AlterField(
model_name='impactmodel',
name='responsible_person',
field=models.CharField(blank=True, help_text='Contact information for person responsible for model simulations in this simulation round', max_length=500, null=True, verbose_name='Person responsible for model simulations in this simulation round'),
),
]
|
{
"content_hash": "2abf0cbce1d8796ab088a97713064fce",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 259,
"avg_line_length": 33.888888888888886,
"alnum_prop": 0.6836065573770492,
"repo_name": "bruecksen/isimip",
"id": "6f4191bf22a1f53b17f1101f6fa3e7d60d4c40e2",
"size": "683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isi_mip/climatemodels/migrations/0067_auto_20170120_1630.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36731"
},
{
"name": "HTML",
"bytes": "106877"
},
{
"name": "JavaScript",
"bytes": "30564"
},
{
"name": "Python",
"bytes": "4244200"
},
{
"name": "Shell",
"bytes": "789"
}
],
"symlink_target": ""
}
|
from ..util import TestConfig
from biokbase.workspace.baseclient import ServerError
class MockClients:
"""
Mock KBase service clients as needed for Narrative backend tests.
Use this with the Python mock library to mock the biokbase.narrative.clients.get call
as a test function decorator, like this:
<top of file>
from mockclients import get_mock_client
class MyTestCase(unittest.TestCase):
@mock.patch('biokbase.narrative.jobs.appmanager.clients.get', get_mock_client)
def test_my_function(self):
... test code that calls out to AppManager ...
This case will monkeypatch the clients.get call that the appmanager uses to return
an instance of get_mock_client, which is, in turn, this class. Any client call that
the appmanager uses will go through here, so be sure that it's mocked in this class.
This really only works because each client has different function names to call, so
these all return different things. If you have something that overlaps, you might need
to make another class. That'll mean either changing get_mock_client to return a new
client in some cases, or doing interesting things with your duplicate function.
These are specially put together to handle common use cases, and in some cases, expected test
inputs (e.g. special workspace names)
Will likely be removed (or modified, at least), when a minified KBase deploy becomes available.
Then we don't need to mock as much.
"""
def __init__(self, token=None):
if token is not None:
assert isinstance(token, str)
self.config = TestConfig()
self.job_info = self.config.load_json_file(self.config.get('jobs', 'job_info_file'))
self.ee2_job_info = self.config.load_json_file(self.config.get('jobs', 'ee2_job_info_file'))
self.test_job_id = self.config.get('app_tests', 'test_job_id')
# ----- User and Job State functions -----
def list_jobs2(self, params):
return self.job_info.get('job_info')
def delete_job(self, job):
return "bar"
def check_workspace_jobs(self, params):
return self.ee2_job_info
# ----- Narrative Method Store functions ------
def list_methods_spec(self, params):
return self.config.load_json_file(self.config.get('specs', 'app_specs_file'))
def list_categories(self, params):
return self.config.load_json_file(self.config.get('specs', 'type_specs_file'))
def get_method_full_info(self, params):
return self.config.load_json_file(self.config.get('specs', 'app_infos_file'))
# ----- Workspace functions -----
def ver(self):
return "0.0.0"
def get_workspace_info(self, params):
"""
Some magic workspace ids.
12345 - the standard one.
678 - doesn't have useful narrative info in its metadata
789 - raises a permissions error
890 - raises a deleted workspace error
otherwise, returns workspace info with narrative = 1, and narrative name = 'Fake'
"""
wsid = params.get('id', 12345)
name = params.get('workspace', 'some_workspace')
if wsid == 678:
return [wsid, name, 'owner', 'moddate', 'largestid', 'a', 'n', 'unlocked', {}]
elif wsid == 789:
raise ServerError("JSONRPCError", -32500, "User you may not read workspace 789")
elif wsid == 890:
raise ServerError("JSONRPCError", -32500, "Workspace 890 is deleted")
elif name != 'invalid_workspace':
return [wsid, name, 'owner', 'moddate', 'largestid', 'a', 'n', 'unlocked', {'is_temporary': 'false', 'narrative': '1', 'narrative_nice_name': 'Fake'}]
else:
raise Exception('not found')
def get_object_info_new(self, params):
"""
Returns a (more or less) random object.
But we introspect the params a little bit to return something crafted to the test.
Add more to this if it's helpful.
"""
random_obj_info = [5, 'Sbicolor2', 'KBaseGenomes.Genome-12.3', '2017-03-31T23:42:59+0000', 1,
'wjriehl', 18836, 'wjriehl:1490995018528', '278abf8f0dbf8ab5ce349598a8674a6e', 109180038, None]
obj_info = random_obj_info
infos = []
for obj_ident in params.get('objects', [{'name': 'Sbicolor2', 'workspace': 'whatever'}]):
if obj_ident.get('name') == 'rhodobacterium.art.q20.int.PE.reads':
infos.append([7,
'rhodobacterium.art.q20.int.PE.reads',
'KBaseFile.PairedEndLibrary-2.1',
'2018-06-26T19:31:41+0000',
1,
'wjriehl',
12345,
'random_workspace',
'a20f2df66f973de41b84164f2c2bedd3',
765,
None])
elif obj_ident.get('name') == 'rhodobacterium.art.q10.PE.reads':
infos.append([8,
'rhodobacterium.art.q10.PE.reads',
'KBaseFile.PairedEndLibrary-2.1',
'2018-08-13T23:13:09+0000',
1,
'wjriehl',
12345,
'random_workspace',
'9f014a3c08368537a40fa2e4b90f9cab',
757,
None])
else:
infos.append(random_obj_info)
return infos
infos = [[5, 'Sbicolor2', 'KBaseGenomes.Genome-12.3', '2017-03-31T23:42:59+0000', 1,
'wjriehl', 18836, 'wjriehl:1490995018528', '278abf8f0dbf8ab5ce349598a8674a6e',
109180038, None]]
ret_val = infos * len(params.get('objects', [0]))
return ret_val
def get_object_info3(self, params):
infos = [[5, 'Sbicolor2', 'KBaseGenomes.Genome-12.3', '2017-03-31T23:42:59+0000', 1,
'wjriehl', 18836, 'wjriehl:1490995018528', '278abf8f0dbf8ab5ce349598a8674a6e',
109180038, None]]
paths = [['18836/5/1']]
num_objects = len(params.get('objects', [0]))
return {
'infos': infos * num_objects,
'paths': paths * num_objects
}
# ----- Narrative Job Service functions -----
def run_job(self, params):
return self.test_job_id
def cancel_job(self, job_id):
return "done"
def check_job_canceled(self, params):
return {
"finished": 0,
"canceled": 0,
"job_id": params.get("job_id")
}
def get_job_params(self, job_id):
return self.ee2_job_info.get(job_id, {}).get('job_input', {})
def check_job(self, params):
job_id = params.get('job_id')
if not job_id:
return {}
info = self.ee2_job_info.get(job_id, {})
if "exclude_fields" in params:
for f in params["exclude_fields"]:
if f in info:
del info[f]
return info
def check_jobs(self, params):
job_ids = params.get('job_ids')
infos = dict()
for job in job_ids:
infos[job] = self.check_job({'job_id': job, 'exclude_fields': params.get('exclude_fields', [])})
return infos
def get_job_logs(self, params):
"""
params: job_id, skip_lines
skip_lines = number of lines to skip, get all the rest
single line: {
is_error 0,1
line: string
}
there are only 100 "log lines" in total.
"""
total_lines = 100
skip = params.get('skip_lines', 0)
lines = list()
if skip < total_lines:
for i in range(total_lines-skip):
lines.append({
"is_error": 0,
"line": "This is line {}".format(i+skip)
})
return {
'last_line_number': max(total_lines, skip),
'lines': lines
}
# ----- Service Wizard functions -----
def sync_call(self, call, params):
if call == "NarrativeService.list_objects_with_sets":
return self._mock_ns_list_objects_with_sets(params)
def _mock_ns_list_objects_with_sets(self, params):
"""
Always returns the same several objects. Should be enough to
cover all data cases.
"""
params = params[0]
user_id = "some_user"
ws_name = "some_workspace"
ws_id = 1
types = params.get('types', [])
with_meta = True if params.get('includeMetadata') else False
if params.get('ws_name'):
ws_name = params['ws_name']
if params.get('ws_id'):
ws_id = params['ws_id']
if params.get('workspaces'):
ws_name = params['workspaces'][0]
dp_id = 999
dp_ref = "{}/{}".format(ws_id, dp_id)
data = {
'data': [{
'object_info': [
6, 'NarrativeObj', 'KBaseNarrative.Narrative-4.0', '2018-08-10T16:45:12+0000', 1, user_id, ws_id, ws_name, "checksum", 12345, None
]
}, {
'object_info': [
1, 'obj1', 'ModuleA.TypeA-1.0', '2018-08-10T16:47:36+0000', 2, user_id, ws_id, ws_name, "checksum", 12345, None
]
}, {
'object_info': [
7, 'obj7', 'ModuleA.TypeA-1.0', '2018-08-10T16:47:36+0000', 2, user_id, ws_id, ws_name, "checksum", 12345, None
]
}, {
'object_info': [
8, 'obj8', 'ModuleA.TypeA-1.0', '2018-08-10T16:47:36+0000', 2, user_id, ws_id, ws_name, "checksum", 12345, None
]
}, {
'object_info': [
9, 'obj9', 'ModuleB.TypeB-1.0', '2018-08-10T16:47:36+0000', 3, user_id, ws_id, ws_name, "checksum", 12345, None
]
}, {
'object_info': [
3, 'obj3', 'ModuleC.TypeC-1.0', '2018-08-10T16:47:36+0000', 4, user_id, ws_id, ws_name, "checksum", 12345, None
]
}, {
'object_info': [
4, 'obj4', 'ModuleD.TypeD-1.0', '2018-08-10T16:47:36+0000', 5, user_id, ws_id, ws_name, "checksum", 12345, None
],
'dp_info': {
'ref': dp_ref,
'refs': [dp_ref]
}
}, {
'object_info': [
5, 'obj5', 'Module5.Type5-1.0', '2018-08-10T16:47:36+0000', 6, user_id, ws_id, ws_name, "checksum", 12345, None
],
'dp_info': {
'ref': dp_ref,
'refs': [dp_ref]
}
}],
'data_palette_refs': {
str(ws_id) : dp_ref
}
}
# filter on type
if types:
# kinda ew, but kinda pretty, too.
# check if any member of types is the start of any object_info type, pass the filter if so
data['data'] = list(filter(lambda x: any([x['object_info'][2].lower().startswith(t.lower()) for t in types]), data['data']))
if with_meta:
# fake, uniform metadata. fun!
for d in data['data']:
d['object_info'][10] = {'key1': 'value1', 'key2': 'value2'}
return [data]
def get_mock_client(client_name, token=None):
return MockClients(token=token)
def get_failing_mock_client(client_name, token=None):
return FailingMockClient(token=token)
class FailingMockClient:
def __init__(self, token=None):
pass
def check_workspace_jobs(self, params):
raise ServerError("JSONRPCError", -32000, "Job lookup failed.")
def cancel_job(self, params):
raise ServerError("JSONRPCError", -32000, "Can't cancel job")
def check_job_canceled(self, params):
raise ServerError("JSONRPCError", 1, "Can't cancel job")
def get_job_logs(self, params):
raise ServerError("JSONRPCError", 2, "Can't get job logs")
class MockStagingHelper:
def list(self):
"""
Mock the call to the staging service to get the "user's" files.
This returns a total of 7 files, 6 of while have "file" in the name,
and 3 are paths.
"""
return [
'file1',
'file2',
'file3',
'path1/file1',
'path2/file2',
'omg/this/is/a/long/path/to/a/file',
'filterme'
]
|
{
"content_hash": "51313d707c085a17e51b4b6f39d7b98e",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 162,
"avg_line_length": 38.3109756097561,
"alnum_prop": 0.5380391532707306,
"repo_name": "pranjan77/narrative",
"id": "09d1bdf465e3a61434af60e00283b443a51bf79e",
"size": "12566",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/biokbase/narrative/tests/narrative_mock/mockclients.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "159688"
},
{
"name": "HTML",
"bytes": "113401"
},
{
"name": "JavaScript",
"bytes": "8064370"
},
{
"name": "Lua",
"bytes": "89680"
},
{
"name": "Makefile",
"bytes": "9726"
},
{
"name": "PHP",
"bytes": "1691"
},
{
"name": "Python",
"bytes": "3697412"
},
{
"name": "R",
"bytes": "39956"
},
{
"name": "Ruby",
"bytes": "3328"
},
{
"name": "Shell",
"bytes": "18976"
},
{
"name": "Smarty",
"bytes": "9051"
}
],
"symlink_target": ""
}
|
"""
Created on Thur Apr 20 13:56:10 2017
@author: richard.mathie@amey.co.uk
"""
class MailGunException(Exception):
pass
|
{
"content_hash": "e7594ce9ad27cc0ec9b4f801cfeab700",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 36,
"avg_line_length": 14.11111111111111,
"alnum_prop": 0.7007874015748031,
"repo_name": "amey-sam/Flask-MailGun",
"id": "50de4d0321e07abdc4e3d0978b3b4e1fb7021898",
"size": "151",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flask_mailgun/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1741"
},
{
"name": "Python",
"bytes": "28900"
}
],
"symlink_target": ""
}
|
from flask import Flask, jsonify
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from flask_jwt import JWT, jwt_required
from models import Users, Contacts
from flask import request, Response
from sqlalchemy.exc import IntegrityError
from sqlalchemy import func
import json
engine = create_engine('sqlite:///task2.db')
Session = sessionmaker(bind=engine)
app = Flask(__name__)
db = SQLAlchemy(app)
app.config['SECRET_KEY'] = 'super-secret'
app.config['JWT_EXPIRATION_DELTA'] = 3600 # time life token
jwt = JWT(app)
class User(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
@jwt.authentication_handler
def authenticate(username, password):
if username == 'joe' and password == 'pass':
return User(id=1, username='joe')
@jwt.user_handler
def load_user(payload):
if payload['user_id'] == 1:
return User(id=1, username='joe')
#@app.route('/login', methods=['GET', 'POST'])
@app.route('/user/create', methods=['POST'])
@jwt_required()
def create_user():
try:
session = Session()
user = Users(name=request.form.get('name'),
surname=request.form.get('surname'),
salt=request.form.get('salt'),
enc_passwd=request.form.get('enc_passwd'))
session.add(user)
session.commit()
status = {'status': 'Success!', 'User_id': user.id}
except IntegrityError:
status = {'status': 'Error!', 'message': 'creating user'}
return Response(response=json.dumps(status),
status=200,
mimetype="application/json")
@app.route('/user/update/<int:id>', methods=['PUT'])
@jwt_required()
def update_user(id):
session = Session()
user = Users.query.filter_by(id=id).first()
req = dict(request.form)
for attr, value in req.iteritems():
setattr(user, attr, value[0])
session.merge(user)
session.commit()
status = {'status': 'Success!'}
return Response(response=json.dumps(status),
status=200,
mimetype="application/json")
@app.route('/contact/create', methods=['POST'])
@jwt_required()
def create_contact():
try:
user = Users.query.filter_by(id=request.form.get('user_id', None)).first()
user_id = user.id
session = Session()
if request.form.get('contact') and request.form.get('contact_type') and request.form.get('user_id'):
obj = Contacts(contact=request.form.get('contact'),
contact_type=request.form.get('contact_type'),
users_id=user_id)
session.add(obj)
session.commit()
status = {'status': 'Success!', 'Contact_id': obj.id}
else:
status = {'status': 'Error!', 'message': 'creating contact'}
except AttributeError:
status = {'status': 'Error!', 'message': 'user does not exist'}
return Response(response=json.dumps(status),
status=200,
mimetype="application/json")
@app.route('/contact/update/<int:id>', methods=['PUT'])
@jwt_required()
def update_contact(id):
session = Session()
user = Contacts.query.filter_by(id=id).first()
req = dict(request.form)
for attr, value in req.iteritems():
setattr(user, attr, value[0])
session.merge(user)
session.commit()
status = {'status': 'Success!'}
return Response(response=json.dumps(status),
status=200,
mimetype="application/json")
#@app.route('/user/search/<name>', methods=['GET'])
@app.route('/user/search', methods=['GET'])
@jwt_required()
def search_user():
result = []
if request.args.getlist('name') and request.args.getlist('surname'):
name = request.args.getlist('name')[0]
surname = request.args.getlist('surname')[0]
user = Users.query.filter(func.lower(Users.name) == func.lower(name),
func.lower(Users.surname) == func.lower(surname)).all() # .first()#.all()
cols = ['id', 'name', 'surname', 'salt', 'enc_passwd']
result = [{col: getattr(d, col) for col in cols} for d in user]
elif request.args.getlist('name'):
name = request.args.getlist('name')[0]
user = Users.query.filter(func.lower(Users.name) == func.lower(name)).all() # .first()#.all()
cols = ['id', 'name', 'surname', 'salt', 'enc_passwd']
result = [{col: getattr(d, col) for col in cols} for d in user]
elif request.args.getlist('surname'):
surname = request.args.getlist('surname')[0]
user = Users.query.filter(func.lower(Users.surname) == func.lower(surname)).all() # .first()#.all()
cols = ['id', 'name', 'surname', 'salt', 'enc_passwd']
result = [{col: getattr(d, col) for col in cols} for d in user]
else:
print 'asdas'
if result:
for i in range(len(result)):
contacts = Contacts.query.filter_by(users_id=result[i]['id']).all()
cols = ['id', 'contact', 'contact_type']
contacts = [{col: getattr(d, col) for col in cols} for d in contacts]
result[i]['contacts'] = contacts
else:
result = {'status': 'Error!'}
return Response(response=json.dumps(result),
status=200,
mimetype="application/json")
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
if __name__ == '__main__':
# init DB
#db.create_all()
# print 'created'
# app.run()
print 'run'
app.run(debug=True, port=5000)
|
{
"content_hash": "9aab3044675ae24cffefd8380867de2a",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 108,
"avg_line_length": 31.11891891891892,
"alnum_prop": 0.5893694632621157,
"repo_name": "Sergey010289/Address_book_flask",
"id": "108d0ec12fb24207a3b5f47d6eabf0a94a4ca070",
"size": "5782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7343"
}
],
"symlink_target": ""
}
|
StreamSubscriptions = {}
|
{
"content_hash": "3651643a95890a7afba803c03adc1668",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 24,
"avg_line_length": 14,
"alnum_prop": 0.6785714285714286,
"repo_name": "AlexPereverzyev/html5stream",
"id": "a811ff8be255af1201f99d702898e4ce720831ac",
"size": "28",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "html5stream/streaming/stream_subscriptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4284"
},
{
"name": "JavaScript",
"bytes": "12132"
},
{
"name": "Nginx",
"bytes": "1583"
},
{
"name": "Python",
"bytes": "5241"
}
],
"symlink_target": ""
}
|
import argparse
import pprint
from pathlib import Path
from git import Git
from PIL import Image, ImageDraw
# Bold Farmhouse https://www.color-hex.com/color-palette/104687
PALETTE = ["#830015", "#000068", "#8fb178", "#f2e8cf", "#ae976d"]
# Dull Shell https://www.color-hex.com/color-palette/104668
# PALETTE = ["#dfb2b2", "#bf8a8a", "#b67777", "#985d5d", "#784343"]
def line_count(path):
return sum(1 for _ in open(path))
def ls_files(gitobj, pat):
return gitobj.ls_files(pat).split("\n")
def count_project(project):
gitproj = Git(project)
return {
"python": len(ls_files(gitproj, "*.py")),
"html": len(ls_files(gitproj, "*.html")),
"c": len(ls_files(gitproj, "*.[ch]")),
# "test": len(ls_files(gitproj, "test*")),
"total": len(ls_files(gitproj, "*")),
}
def cmd_stat(project):
stat = count_project(project)
label = str(project).upper()
print(label)
pprint.pprint(stat)
def calc_project(project):
gitproj = Git(project)
source_names = ls_files(gitproj, "*.[ch]")
sourcedb = dict.fromkeys(source_names)
for name in sourcedb:
sourcedb[name] = {"path": Path(gitproj.working_dir) / name}
for name, info in sourcedb.items():
sourcedb[name]["line_count"] = line_count(info["path"])
total_lines = sum([info["line_count"] for info in sourcedb.values()])
return dict(
gitproj=gitproj,
sourcedb=sourcedb,
total_lines=total_lines,
)
def run(cmd):
import subprocess
if 1:
out = subprocess.run(cmd, capture_output=True, text=True)
return out.stdout.split("\n")
else:
return subprocess.getoutput(cmd).split("\n")
def calc_year(project):
date_expr = "@{1 year ago}"
cmd = [
"git",
f"--git-dir={project}/.git",
"rev-list",
"-1",
f"--before='{date_expr}'",
"master",
]
print(cmd)
year_out = run(cmd)
year_hash = year_out[0]
cmd = [
"git",
f"--git-dir={project}/.git",
"diff",
"--numstat",
"HEAD",
year_hash,
"*.[ch]",
]
print(cmd)
print(run(cmd))
def cmd_render(project):
calc_year(project)
info = calc_project(project)
sourcedb = info["sourcedb"]
img = Image.new("RGB", [1000] * 2, (88, 88, 88))
scale_x = img.width / info["total_lines"]
draw = ImageDraw.Draw(img)
cursor = 0
labels = []
important_width = img.width * 0.04
for i, name in enumerate(sourcedb):
info = sourcedb[name]
print(name, info["line_count"])
width = scale_x * info["line_count"]
color = PALETTE[i % len(PALETTE)]
if width > important_width:
print(name)
labels.append(dict(x=cursor + 3, y=20, text=name))
draw.rectangle(
[cursor, 0, cursor + width, img.height],
fill=color,
# outline="gainsboro",
# width=2,
)
cursor += width
for label in labels:
draw.text([label["x"], label["y"]], label["text"], fill="white")
img.save("z.png", "PNG")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--stat", dest="cmd", action="store_const", const=cmd_stat)
parser.add_argument(
"projects", nargs="+", type=Path, help="Git directory for project"
)
args = parser.parse_args()
cmd = args.cmd or cmd_render
cmd(project=args.projects[0])
if __name__ == "__main__":
main()
|
{
"content_hash": "a93a14a202d35ececf2592c2dabfa6b6",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 83,
"avg_line_length": 24.13013698630137,
"alnum_prop": 0.5642917967641214,
"repo_name": "johntellsall/shotglass",
"id": "bcc8b9814c0ba60e3878d70b2ba1abef88fd59bc",
"size": "3536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "color-dir/sizes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "503"
},
{
"name": "HTML",
"bytes": "132808"
},
{
"name": "Jupyter Notebook",
"bytes": "932983"
},
{
"name": "Makefile",
"bytes": "12518"
},
{
"name": "Python",
"bytes": "200303"
},
{
"name": "Shell",
"bytes": "3099"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Employee.image'
db.add_column(u'employee_employee', 'image',
self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Employee.image'
db.delete_column(u'employee_employee', 'image')
models = {
u'employee.employee': {
'Add1': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'Add2': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'City': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'Designation': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'Major_Subject': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'Meta': {'object_name': 'Employee'},
'Qualification': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'Skill_sets': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'Visa_Status': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'Zip_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'bill': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'doj': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '50'}),
'exp': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'id': ('django.db.models.fields.IntegerField', [], {'max_length': '6', 'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mobile': ('django.db.models.fields.IntegerField', [], {'max_length': '12'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'personal_email': ('django.db.models.fields.EmailField', [], {'max_length': '50', 'blank': 'True'}),
'proj': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['employee.Project']"}),
'start_date': ('django.db.models.fields.DateField', [], {'blank': 'True'})
},
u'employee.project': {
'Meta': {'object_name': 'Project'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['employee']
|
{
"content_hash": "afd466d7af4083366cbc86ef881a58d0",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 126,
"avg_line_length": 58.166666666666664,
"alnum_prop": 0.55396370582617,
"repo_name": "asm-technologies/management",
"id": "debc9969d33d47dac2fdd7d400e39a9680620ff5",
"size": "3165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "employee/migrations/0003_auto__add_field_employee_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "936170"
},
{
"name": "JavaScript",
"bytes": "1284158"
},
{
"name": "Python",
"bytes": "79748"
}
],
"symlink_target": ""
}
|
from py2neo import Graph
graph = Graph()
def buildNodes(aNodeRecord):
data = {"id": aNodeRecord.n._id}
data.update(aNodeRecord.n.properties)
return {"data": data}
def buildRelations(aRelationRecord):
data = {"source": aRelationRecord.r.start_node._id, "target": aRelationRecord.r.end_node._id}
def getGraphData():
# elements["nodes"].append({"data": {"id": rrh_id, "name": type + rrh_name, "faveColor":"#EDA1ED", "weight":30}})
# elements["edges"].append({"data": {"source": enb_id, "target": rrh_id, "weight":5}})
nodeRecords = graph.cypher.execute('MATCH n return n')
print(nodeRecords)
nodes = map(buildNodes, nodeRecords)
print(nodes)
relationRecords = graph.cypher.execute('MATCH ()-[r]->() RETURN r')
relations = map(buildRelations, relationRecords)
elements = {"nodes": nodes, "edges": relations}
return elements
getGraphData()
|
{
"content_hash": "95b5e47e47d52f5caa3df1c6648eb030",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 117,
"avg_line_length": 28.34375,
"alnum_prop": 0.659316427783903,
"repo_name": "zhongzhu/cytoscape_py2neo",
"id": "3e62e4023124a603704e36d660508a9ebd9451fe",
"size": "907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "haha.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "827"
},
{
"name": "HTML",
"bytes": "2029"
},
{
"name": "JavaScript",
"bytes": "19694"
},
{
"name": "Python",
"bytes": "3431"
}
],
"symlink_target": ""
}
|
import json
from contextlib import contextmanager
import mock
import os
import random
import tempfile
import unittest
import shutil
import copy
import time
from collections import defaultdict, Counter
from swift.common.exceptions import RingBuilderError
from swift.common.ring import RingBuilder, Ring
from swift.common.ring.composite_builder import (
compose_rings, CompositeRingBuilder, CooperativeRingBuilder)
def make_device_iter():
x = 0
base_port = 6000
while True:
yield {'region': 0, # Note that region may be replaced on the tests
'zone': 0,
'ip': '10.0.0.%s' % x,
'replication_ip': '10.0.0.%s' % x,
'port': base_port + x,
'replication_port': base_port + x,
'device': 'sda',
'weight': 100.0, }
x += 1
class BaseTestCompositeBuilder(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.device_iter = make_device_iter()
self.output_ring = os.path.join(self.tmpdir, 'composite.ring.gz')
def pop_region_device(self, region):
dev = next(self.device_iter)
dev.update({'region': region})
return dev
def tearDown(self):
try:
shutil.rmtree(self.tmpdir, True)
except OSError:
pass
def save_builder_with_no_id(self, builder, fname):
orig_to_dict = builder.to_dict
def fake_to_dict():
res = orig_to_dict()
res.pop('id')
return res
with mock.patch.object(builder, 'to_dict', fake_to_dict):
builder.save(fname)
def save_builders(self, builders, missing_ids=None, prefix='builder'):
missing_ids = missing_ids or []
builder_files = []
for i, builder in enumerate(builders):
fname = os.path.join(self.tmpdir, '%s_%s.builder' % (prefix, i))
if i in missing_ids:
self.save_builder_with_no_id(builder, fname)
else:
builder.save(fname)
builder_files.append(fname)
return builder_files
def create_sample_ringbuilders(self, num_builders=2, rebalance=True):
"""
Create sample rings with four devices
:returns: a list of ring builder instances
"""
builders = []
for region in range(num_builders):
fname = os.path.join(self.tmpdir, 'builder_%s.builder' % region)
builder = RingBuilder(6, 3, 0)
for _ in range(5):
dev = self.pop_region_device(region)
builder.add_dev(dev)
# remove last dev to simulate a ring with some history
builder.remove_dev(dev['id'])
# add a dev that won't be assigned any parts
new_dev = self.pop_region_device(region)
new_dev['weight'] = 0
builder.add_dev(new_dev)
if rebalance:
builder.rebalance()
builder.save(fname)
self.assertTrue(os.path.exists(fname))
builders.append(builder)
return builders
def add_dev(self, builder, weight=None, region=None):
if region is None:
dev = next(builder._iter_devs())
region = dev['region']
new_dev = self.pop_region_device(region)
if weight is not None:
new_dev['weight'] = weight
builder.add_dev(new_dev)
def add_dev_and_rebalance(self, builder, weight=None):
self.add_dev(builder, weight)
builder.rebalance()
def assertDevices(self, composite_ring, builders):
"""
:param composite_ring: a Ring instance
:param builders: a list of RingBuilder instances for assertion
"""
# assert all component devices are in composite device table
builder_devs = []
for builder in builders:
builder_devs.extend([
(dev['ip'], dev['port'], dev['device'])
for dev in builder._iter_devs()])
got_devices = [
(dev['ip'], dev['port'], dev['device'])
for dev in composite_ring.devs if dev]
self.assertEqual(sorted(builder_devs), sorted(got_devices),
"composite_ring mismatched with part of the rings")
# assert composite device ids correctly index into the dev list
dev_ids = []
for i, dev in enumerate(composite_ring.devs):
if dev:
self.assertEqual(i, dev['id'])
dev_ids.append(dev['id'])
self.assertEqual(len(builder_devs), len(dev_ids))
def uniqueness(dev):
return (dev['ip'], dev['port'], dev['device'])
# assert part assignment is ordered by ring order
part_count = composite_ring.partition_count
for part in range(part_count):
primaries = [uniqueness(primary) for primary in
composite_ring.get_part_nodes(part)]
offset = 0
for builder in builders:
sub_primaries = [uniqueness(primary) for primary in
builder.get_part_devices(part)]
self.assertEqual(
primaries[offset:offset + builder.replicas],
sub_primaries,
"composite ring is not ordered by ring order, %s, %s"
% (primaries, sub_primaries))
offset += builder.replicas
def check_composite_ring(self, ring_file, builders):
got_ring = Ring(ring_file)
self.assertEqual(got_ring.partition_count, builders[0].parts)
self.assertEqual(got_ring.replica_count,
sum(b.replicas for b in builders))
self.assertEqual(got_ring._part_shift, builders[0].part_shift)
self.assertDevices(got_ring, builders)
def check_composite_meta(self, cb_file, builder_files, version=1):
with open(cb_file) as fd:
actual = json.load(fd)
builders = [RingBuilder.load(fname) for fname in builder_files]
expected_metadata = {
'saved_path': os.path.abspath(cb_file),
'serialization_version': 1,
'version': version,
'components': [
{'id': builder.id,
'version': builder.version,
'replicas': builder.replicas,
}
for builder in builders
],
'component_builder_files':
dict((builder.id, os.path.abspath(builder_files[i]))
for i, builder in enumerate(builders))
}
self.assertEqual(expected_metadata, actual)
def _make_composite_builder(self, builders):
# helper to compose a ring, save it and sanity check it
builder_files = self.save_builders(builders)
cb = CompositeRingBuilder(builder_files)
cb.compose().save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
return cb, builder_files
class TestCompositeBuilder(BaseTestCompositeBuilder):
def test_compose_rings(self):
def do_test(builder_count):
builders = self.create_sample_ringbuilders(builder_count)
rd = compose_rings(builders)
rd.save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
do_test(2)
do_test(3)
do_test(4)
def test_composite_same_region_in_the_different_rings_error(self):
builder_1 = self.create_sample_ringbuilders(1)
builder_2 = self.create_sample_ringbuilders(1)
builders = builder_1 + builder_2
with self.assertRaises(ValueError) as cm:
compose_rings(builders)
self.assertIn('Same region found in different rings',
cm.exception.message)
def test_composite_only_one_ring_in_the_args_error(self):
builders = self.create_sample_ringbuilders(1)
with self.assertRaises(ValueError) as cm:
compose_rings(builders)
self.assertIn(
'Two or more component builders are required.',
cm.exception.message)
def test_composite_same_device_in_the_different_rings_error(self):
builders = self.create_sample_ringbuilders(2)
same_device = copy.deepcopy(builders[0].devs[0])
# create one more ring which duplicates a device in the first ring
builder = RingBuilder(6, 3, 1)
_, fname = tempfile.mkstemp(dir=self.tmpdir)
# add info to feed to add_dev
same_device.update({'region': 2, 'weight': 100})
builder.add_dev(same_device)
# add rest of the devices, which are unique
for _ in range(3):
dev = self.pop_region_device(2)
builder.add_dev(dev)
builder.rebalance()
builder.save(fname)
# sanity
self.assertTrue(os.path.exists(fname))
builders.append(builder)
with self.assertRaises(ValueError) as cm:
compose_rings(builders)
self.assertIn(
'Duplicate ip/port/device combination %(ip)s/%(port)s/%(device)s '
'found in builders at indexes 0 and 2' %
same_device, cm.exception.message)
def test_different_part_power_error(self):
# create a ring builder
# (default, part power is 6 with create_sample_ringbuilders)
builders = self.create_sample_ringbuilders(1)
# prepare another ring which has different part power
incorrect_builder = RingBuilder(4, 3, 1)
_, fname = tempfile.mkstemp(dir=self.tmpdir)
for _ in range(4):
dev = self.pop_region_device(1)
incorrect_builder.add_dev(dev)
incorrect_builder.rebalance()
incorrect_builder.save(fname)
# sanity
self.assertTrue(os.path.exists(fname))
# sanity
correct_builder = builders[0]
self.assertNotEqual(correct_builder.part_shift,
incorrect_builder.part_shift)
self.assertNotEqual(correct_builder.part_power,
incorrect_builder.part_power)
builders.append(incorrect_builder)
with self.assertRaises(ValueError) as cm:
compose_rings(builders)
self.assertIn("All builders must have same value for 'part_power'",
cm.exception.message)
def test_compose_rings_float_replica_count_builder_error(self):
builders = self.create_sample_ringbuilders(1)
# prepare another ring which has float replica count
incorrect_builder = RingBuilder(6, 1.5, 1)
_, fname = tempfile.mkstemp(dir=self.tmpdir)
for _ in range(4):
dev = self.pop_region_device(1)
incorrect_builder.add_dev(dev)
incorrect_builder.rebalance()
incorrect_builder.save(fname)
# sanity
self.assertTrue(os.path.exists(fname))
self.assertEqual(1.5, incorrect_builder.replicas)
# the first replica has 2 ** 6 partitions
self.assertEqual(
2 ** 6, len(incorrect_builder._replica2part2dev[0]))
# but the second replica has the half of the first partitions
self.assertEqual(
2 ** 5, len(incorrect_builder._replica2part2dev[1]))
builders.append(incorrect_builder)
with self.assertRaises(ValueError) as cm:
compose_rings(builders)
self.assertIn("Problem with builders", cm.exception.message)
self.assertIn("Non integer replica count", cm.exception.message)
def test_compose_rings_rebalance_needed(self):
builders = self.create_sample_ringbuilders(2)
# add a new device to builder 1 but no rebalance
dev = self.pop_region_device(1)
builders[1].add_dev(dev)
self.assertTrue(builders[1].devs_changed) # sanity check
with self.assertRaises(ValueError) as cm:
compose_rings(builders)
self.assertIn("Problem with builders", cm.exception.message)
self.assertIn("Builder needs rebalance", cm.exception.message)
# after rebalance, that works (sanity)
builders[1].rebalance()
compose_rings(builders)
def test_different_replica_count_works(self):
# create a ring builder
# (default, part power is 6 with create_sample_ringbuilders)
builders = self.create_sample_ringbuilders(1)
# prepare another ring which has different replica count
builder = RingBuilder(6, 1, 1)
_, fname = tempfile.mkstemp(dir=self.tmpdir)
for _ in range(4):
dev = self.pop_region_device(1)
builder.add_dev(dev)
builder.rebalance()
builder.save(fname)
# sanity
self.assertTrue(os.path.exists(fname))
builders.append(builder)
rd = compose_rings(builders)
rd.save(self.output_ring)
got_ring = Ring(self.output_ring)
self.assertEqual(got_ring.partition_count, 2 ** 6)
self.assertEqual(got_ring.replica_count, 4) # 3 + 1
self.assertEqual(got_ring._part_shift, 26)
self.assertDevices(got_ring, builders)
def test_ring_swap(self):
# sanity
builders = sorted(self.create_sample_ringbuilders(2))
rd = compose_rings(builders)
rd.save(self.output_ring)
got_ring = Ring(self.output_ring)
self.assertEqual(got_ring.partition_count, 2 ** 6)
self.assertEqual(got_ring.replica_count, 6)
self.assertEqual(got_ring._part_shift, 26)
self.assertDevices(got_ring, builders)
# even if swapped, it works
reverse_builders = sorted(builders, reverse=True)
self.assertNotEqual(reverse_builders, builders)
rd = compose_rings(reverse_builders)
rd.save(self.output_ring)
got_ring = Ring(self.output_ring)
self.assertEqual(got_ring.partition_count, 2 ** 6)
self.assertEqual(got_ring.replica_count, 6)
self.assertEqual(got_ring._part_shift, 26)
self.assertDevices(got_ring, reverse_builders)
# but if the composite rings are different order, the composite ring
# *will* be different. Note that the CompositeRingBuilder class will
# check builder order against the existing ring and fail if the order
# is different (actually checking the metadata). See also
# test_compose_different_builder_order
with self.assertRaises(AssertionError) as cm:
self.assertDevices(got_ring, builders)
self.assertIn("composite ring is not ordered by ring order",
cm.exception.message)
class TestCompositeRingBuilder(BaseTestCompositeBuilder):
def test_compose_with_builder_files(self):
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json')
builders = self.create_sample_ringbuilders(2)
cb, _ = self._make_composite_builder(builders)
cb.save(cb_file)
for i, b in enumerate(builders):
self.add_dev_and_rebalance(b)
self.save_builders(builders)
cb = CompositeRingBuilder.load(cb_file)
cb.compose().save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
def test_compose_ok(self):
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json')
builders = self.create_sample_ringbuilders(2)
# make first version of composite ring
cb, builder_files = self._make_composite_builder(builders)
# check composite builder persists ok
cb.save(cb_file)
self.assertTrue(os.path.exists(cb_file))
self.check_composite_meta(cb_file, builder_files)
# and reloads ok
cb = CompositeRingBuilder.load(cb_file)
self.assertEqual(1, cb.version)
# compose detects if no component builder changes, if we ask it to...
with self.assertRaises(ValueError) as cm:
cb.compose(require_modified=True)
self.assertIn('None of the component builders has been modified',
cm.exception.message)
self.assertEqual(1, cb.version)
# ...but by default will compose again despite no changes to components
cb.compose(force=True).save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
self.assertEqual(2, cb.version)
# check composite builder persists ok again
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json2')
cb.save(cb_file)
self.assertTrue(os.path.exists(cb_file))
self.check_composite_meta(cb_file, builder_files, version=2)
def test_compose_modified_component_builders(self):
# check it's ok to compose again with same but modified builders
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json')
builders = self.create_sample_ringbuilders(2)
cb, builder_files = self._make_composite_builder(builders)
ring = Ring(self.output_ring)
orig_devs = [dev for dev in ring.devs if dev]
self.assertEqual(10, len(orig_devs)) # sanity check
self.add_dev_and_rebalance(builders[1])
builder_files = self.save_builders(builders)
cb.compose().save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
ring = Ring(self.output_ring)
modified_devs = [dev for dev in ring.devs if dev]
self.assertEqual(len(orig_devs) + 1, len(modified_devs))
# check composite builder persists ok
cb.save(cb_file)
self.assertTrue(os.path.exists(cb_file))
self.check_composite_meta(cb_file, builder_files, version=2)
# and reloads ok
cb = CompositeRingBuilder.load(cb_file)
# and composes ok after reload
cb.compose(force=True).save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
# check composite builder persists ok again
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json2')
cb.save(cb_file)
self.assertTrue(os.path.exists(cb_file))
self.check_composite_meta(cb_file, builder_files, version=3)
def test_compose_override_component_builders(self):
# check passing different builder files to the compose() method
# overrides loaded builder files
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json')
builders = self.create_sample_ringbuilders(2)
cb, builder_files = self._make_composite_builder(builders)
# modify builders and save in different files
self.add_dev_and_rebalance(builders[1])
with self.assertRaises(ValueError):
# sanity check - originals are unchanged
cb.compose(builder_files, require_modified=True)
other_files = self.save_builders(builders, prefix='other')
cb.compose(other_files, require_modified=True).save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
# check composite builder persists ok
cb.save(cb_file)
self.assertTrue(os.path.exists(cb_file))
self.check_composite_meta(cb_file, other_files, version=2)
# and reloads ok
cb = CompositeRingBuilder.load(cb_file)
# and composes ok after reload
cb.compose(force=True).save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
# check composite builder persists ok again
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json2')
cb.save(cb_file)
self.assertTrue(os.path.exists(cb_file))
self.check_composite_meta(cb_file, other_files, version=3)
def test_abs_paths_persisted(self):
cwd = os.getcwd()
try:
os.chdir(self.tmpdir)
builders = self.create_sample_ringbuilders(2)
builder_files = self.save_builders(builders)
rel_builder_files = [os.path.basename(bf) for bf in builder_files]
cb = CompositeRingBuilder(rel_builder_files)
cb.compose().save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json')
rel_cb_file = os.path.basename(cb_file)
cb.save(rel_cb_file)
self.check_composite_meta(rel_cb_file, rel_builder_files)
finally:
os.chdir(cwd)
def test_load_errors(self):
bad_file = os.path.join(self.tmpdir, 'bad_file.json')
with self.assertRaises(IOError):
CompositeRingBuilder.load(bad_file)
def check_bad_content(content):
with open(bad_file, 'wb') as fp:
fp.write(content)
try:
with self.assertRaises(ValueError) as cm:
CompositeRingBuilder.load(bad_file)
self.assertIn(
"File does not contain valid composite ring data",
cm.exception.message)
except AssertionError as err:
raise AssertionError('With content %r: %s' % (content, err))
for content in ('', 'not json', json.dumps({}), json.dumps([])):
check_bad_content(content)
good_content = {
'components': [
{'version': 1, 'id': 'uuid_x', 'replicas': 12},
{'version': 2, 'id': 'uuid_y', 'replicas': 12}
],
'builder_files': {'uuid_x': '/path/to/file_x',
'uuid_y': '/path/to/file_y'},
'version': 99}
for missing in good_content:
bad_content = dict(good_content)
bad_content.pop(missing)
check_bad_content(json.dumps(bad_content))
def test_save_errors(self):
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json')
def do_test(cb):
with self.assertRaises(ValueError) as cm:
cb.save(cb_file)
self.assertIn("No composed ring to save", cm.exception.message)
do_test(CompositeRingBuilder())
do_test(CompositeRingBuilder([]))
do_test(CompositeRingBuilder(['file1', 'file2']))
def test_rebalance(self):
@contextmanager
def mock_rebalance():
# captures component builder rebalance call results, yields a dict
# that maps builder -> results
calls = defaultdict(list)
orig_func = RingBuilder.rebalance
def func(builder, **kwargs):
result = orig_func(builder, **kwargs)
calls[builder].append(result)
return result
with mock.patch('swift.common.ring.RingBuilder.rebalance', func):
yield calls
def check_results():
self.assertEqual(2, len(rebalance_calls)) # 2 builders called
for calls in rebalance_calls.values():
self.assertFalse(calls[1:]) # 1 call to each builder
self.assertEqual(sorted(expected_ids),
sorted([b.id for b in rebalance_calls]))
self.assertEqual(sorted(expected_versions),
sorted([b.version for b in rebalance_calls]))
for b in rebalance_calls:
self.assertEqual(set(rebalance_calls.keys()),
set(b.parent_builder._builders))
# check the rebalanced builders were saved
written_builders = [RingBuilder.load(f) for f in builder_files]
self.assertEqual(expected_ids,
[b.id for b in written_builders])
self.assertEqual(expected_versions,
[b.version for b in written_builders])
# check returned results, should be in component order
self.assertEqual(2, len(results))
self.assertEqual(builder_files,
[r['builder_file'] for r in results])
self.assertEqual(expected_versions,
[r['builder'].version for r in results])
self.assertEqual(expected_ids, [r['builder'].id for r in results])
self.assertEqual(
[rebalance_calls[r['builder']][0] for r in results],
[r['result'] for r in results])
# N.B. the sample builders have zero min_part_hours
builders = self.create_sample_ringbuilders(2)
expected_versions = [b.version + 1 for b in builders]
expected_ids = [b.id for b in builders]
# test rebalance loads component builders
builder_files = self.save_builders(builders)
cb = CompositeRingBuilder(builder_files)
with mock_rebalance() as rebalance_calls:
results = cb.rebalance()
check_results()
# test loading builder files via load_components
# revert builder files to original builder state
builder_files = self.save_builders(builders)
cb = CompositeRingBuilder()
cb.load_components(builder_files)
with mock_rebalance() as rebalance_calls:
results = cb.rebalance()
check_results()
def test_rebalance_errors(self):
cb = CompositeRingBuilder()
with self.assertRaises(ValueError) as cm:
cb.rebalance()
self.assertIn('Two or more component builders are required',
cm.exception.message)
builders = self.create_sample_ringbuilders(2)
cb, builder_files = self._make_composite_builder(builders)
with mock.patch('swift.common.ring.RingBuilder.rebalance',
side_effect=RingBuilderError('test')):
with mock.patch('swift.common.ring.composite_builder.shuffle',
lambda x: x):
with self.assertRaises(RingBuilderError) as cm:
cb.rebalance()
self.assertIn('An error occurred while rebalancing component %s' %
builder_files[0], str(cm.exception))
self.assertIsNone(cb._builders)
with mock.patch('swift.common.ring.RingBuilder.validate',
side_effect=RingBuilderError('test')):
with mock.patch('swift.common.ring.composite_builder.shuffle',
lambda x: x):
with self.assertRaises(RingBuilderError) as cm:
cb.rebalance()
self.assertIn('An error occurred while rebalancing component %s' %
builder_files[0], str(cm.exception))
self.assertIsNone(cb._builders)
def test_rebalance_with_unrebalanced_builders(self):
# create 2 non-rebalanced rings
builders = self.create_sample_ringbuilders(rebalance=False)
# save builders
builder_files = self.save_builders(builders)
cb = CompositeRingBuilder(builder_files)
# sanity, it is impossible to compose un-rebalanced component rings
with self.assertRaises(ValueError) as cm:
cb.compose()
self.assertIn("Builder needs rebalance", cm.exception.message)
# but ok to compose after rebalance
cb.rebalance()
rd = cb.compose()
rd.save(self.output_ring)
rebalanced_builders = [RingBuilder.load(f) for f in builder_files]
self.check_composite_ring(self.output_ring, rebalanced_builders)
class TestLoadComponents(BaseTestCompositeBuilder):
# Tests for the loading of component builders.
def _call_method_under_test(self, cb, *args, **kwargs):
# Component builder loading is triggered by the load_components method
# and the compose method. This method provides a hook for subclasses to
# configure a different method to repeat the component loading tests.
cb.load_components(*args, **kwargs)
def test_load_components(self):
builders = self.create_sample_ringbuilders(2)
builder_files = self.save_builders(builders)
cb = CompositeRingBuilder(builder_files)
# check lazy loading
self.assertEqual(builder_files, cb._builder_files)
self.assertFalse(cb._builders) # none loaded yet
# check loading configured files
self._call_method_under_test(cb)
self.assertEqual(builder_files, cb._builder_files)
for i, builder in enumerate(cb._builders):
self.assertEqual(builders[i].id, builder.id)
self.assertEqual(builders[i].devs, builder.devs)
# modify builders and save in different files
self.add_dev_and_rebalance(builders[0])
other_files = self.save_builders(builders, prefix='other')
# reload from other files
self._call_method_under_test(cb, other_files)
self.assertEqual(other_files, cb._builder_files)
for i, builder in enumerate(cb._builders):
self.assertEqual(builders[i].id, builder.id)
self.assertEqual(builders[i].devs, builder.devs)
# modify builders again and save in same files
self.add_dev_and_rebalance(builders[1])
self.save_builders(builders, prefix='other')
# reload from same files
self._call_method_under_test(cb)
self.assertEqual(other_files, cb._builder_files)
for i, builder in enumerate(cb._builders):
self.assertEqual(builders[i].id, builder.id)
self.assertEqual(builders[i].devs, builder.devs)
def test_load_components_insufficient_builders(self):
def do_test(builder_files, force):
cb = CompositeRingBuilder(builder_files)
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, builder_files,
force=force)
self.assertIn('Two or more component builders are required',
cm.exception.message)
cb = CompositeRingBuilder()
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, builder_files,
force=force)
self.assertIn('Two or more component builders are required',
cm.exception.message)
builders = self.create_sample_ringbuilders(3)
builder_files = self.save_builders(builders)
do_test([], force=False)
do_test([], force=True) # this error is never ignored
do_test(builder_files[:1], force=False)
do_test(builder_files[:1], force=True) # this error is never ignored
def test_load_components_missing_builder_id(self):
def check_missing_id(cb, builders):
# not ok to load builder_files that have no id assigned
orig_version = cb.version
no_id = random.randint(0, len(builders) - 1)
# rewrite the builder files so that one has missing id
builder_files = self.save_builders(builders, missing_ids=[no_id])
def do_check(force):
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, builder_files,
force=force)
error_lines = cm.exception.message.split('\n')
self.assertIn("Problem with builder at index %s" % no_id,
error_lines[0])
self.assertIn("id attribute has not been initialised",
error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(orig_version, cb.version)
do_check(False)
do_check(True) # we never ignore this error
# check with compose not previously called, cb has no existing metadata
builders = self.create_sample_ringbuilders(3)
cb = CompositeRingBuilder()
check_missing_id(cb, builders)
# now save good copies of builders and compose so this cb has
# existing component metadata
builder_files = self.save_builders(builders)
cb = CompositeRingBuilder(builder_files)
cb.compose() # cb now has component metadata
check_missing_id(cb, builders)
def test_load_components_duplicate_builder_ids(self):
builders = self.create_sample_ringbuilders(3)
builders[2]._id = builders[0]._id
cb = CompositeRingBuilder(self.save_builders(builders))
def do_check(force):
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, force=force)
error_lines = cm.exception.message.split('\n')
self.assertIn("Builder id %r used at indexes 0, 2" %
builders[0].id, error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(0, cb.version)
do_check(False)
do_check(True)
def test_load_components_unchanged_builders(self):
def do_test(cb, builder_files, **kwargs):
orig_version = cb.version
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, builder_files, **kwargs)
error_lines = cm.exception.message.split('\n')
self.assertIn("None of the component builders has been modified",
error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(orig_version, cb.version)
builders = self.create_sample_ringbuilders(2)
cb, builder_files = self._make_composite_builder(builders)
# ok to load same *unchanged* builders
self._call_method_under_test(cb, builder_files)
# unless require_modified is set
do_test(cb, builder_files, require_modified=True)
# even if we rewrite the files
builder_files = self.save_builders(builders)
do_test(cb, builder_files, require_modified=True)
# even if we rename the files
builder_files = self.save_builders(builders, prefix='other')
do_test(cb, builder_files, require_modified=True)
# force trumps require_modified
self._call_method_under_test(cb, builder_files, force=True,
require_modified=True)
def test_load_components_older_builder(self):
# make first version of composite ring
builders = self.create_sample_ringbuilders(2)
cb, builder_files = self._make_composite_builder(builders)
old_builders = [copy.deepcopy(b) for b in builders]
# update components and reload
for i, b in enumerate(builders):
self.add_dev_and_rebalance(b)
self.assertLess(old_builders[i].version, b.version)
self.save_builders(builders)
self._call_method_under_test(cb)
orig_version = cb.version
cb.compose() # compose with newer builder versions
self.assertEqual(orig_version + 1, cb.version) # sanity check
# not ok to use old versions of same builders
self.save_builders([old_builders[0], builders[1]])
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb)
error_lines = cm.exception.message.split('\n')
self.assertIn("Invalid builder change at index 0", error_lines[0])
self.assertIn("Older builder version", error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(orig_version + 1, cb.version)
# not even if one component ring has changed
self.add_dev_and_rebalance(builders[1])
self.save_builders([old_builders[0], builders[1]])
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb)
error_lines = cm.exception.message.split('\n')
self.assertIn("Invalid builder change at index 0", error_lines[0])
self.assertIn("Older builder version", error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(orig_version + 1, cb.version)
self.assertIsNone(cb._builders)
# unless we ignore errors
self._call_method_under_test(cb, force=True)
self.assertEqual(old_builders[0].version, cb._builders[0].version)
def test_load_components_different_number_builders(self):
# not ok to use a different number of component rings
builders = self.create_sample_ringbuilders(4)
def do_test(bad_builders):
cb, builder_files = self._make_composite_builder(builders[:3])
# expect an error
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(
cb, self.save_builders(bad_builders))
error_lines = cm.exception.message.split('\n')
self.assertFalse(error_lines[1:])
self.assertEqual(1, cb.version)
# unless we ignore errors
self._call_method_under_test(cb, self.save_builders(bad_builders),
force=True)
self.assertEqual(len(bad_builders), len(cb._builders))
return error_lines
error_lines = do_test(builders[:2]) # too few
self.assertIn("Missing builder at index 2", error_lines[0])
error_lines = do_test(builders) # too many
self.assertIn("Unexpected extra builder at index 3", error_lines[0])
def test_load_components_different_builders(self):
# not ok to change component rings
builders = self.create_sample_ringbuilders(3)
cb, builder_files = self._make_composite_builder(builders[:2])
# ensure builder[0] is newer version so that's not the problem
self.add_dev_and_rebalance(builders[0])
different_files = self.save_builders([builders[0], builders[2]])
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, different_files)
error_lines = cm.exception.message.split('\n')
self.assertIn("Invalid builder change at index 1", error_lines[0])
self.assertIn("Attribute mismatch for id", error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(1, cb.version)
# ok if we force
self._call_method_under_test(cb, different_files, force=True)
self.assertEqual(different_files, cb._builder_files)
def test_load_component_different_builder_order(self):
# not ok to change order of component rings
builders = self.create_sample_ringbuilders(4)
cb, builder_files = self._make_composite_builder(builders)
builder_files.reverse()
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, builder_files)
error_lines = cm.exception.message.split('\n')
for i, line in enumerate(error_lines):
self.assertIn("Invalid builder change at index %s" % i, line)
self.assertIn("Attribute mismatch for id", line)
self.assertEqual(1, cb.version)
# ok if we force
self._call_method_under_test(cb, builder_files, force=True)
self.assertEqual(builder_files, cb._builder_files)
def test_load_components_replica_count_changed(self):
# not ok to change the number of replicas in a ring
builders = self.create_sample_ringbuilders(3)
cb, builder_files = self._make_composite_builder(builders)
builders[0].set_replicas(4)
self.save_builders(builders)
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb)
error_lines = cm.exception.message.split('\n')
for i, line in enumerate(error_lines):
self.assertIn("Invalid builder change at index 0", line)
self.assertIn("Attribute mismatch for replicas", line)
self.assertEqual(1, cb.version)
# ok if we force
self._call_method_under_test(cb, force=True)
class TestComposeLoadComponents(TestLoadComponents):
def _call_method_under_test(self, cb, *args, **kwargs):
cb.compose(*args, **kwargs)
def test_load_components_replica_count_changed(self):
# For compose method this test differs from superclass when the force
# flag is used, because although the force flag causes load_components
# to skip checks, the actual ring composition fails.
# not ok to change the number of replicas in a ring
builders = self.create_sample_ringbuilders(3)
cb, builder_files = self._make_composite_builder(builders)
builders[0].set_replicas(4)
self.save_builders(builders)
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb)
error_lines = cm.exception.message.split('\n')
for i, line in enumerate(error_lines):
self.assertIn("Invalid builder change at index 0", line)
self.assertIn("Attribute mismatch for replicas", line)
self.assertEqual(1, cb.version)
# if we force, then load_components succeeds but the compose pre
# validate will fail because the builder needs rebalancing
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, force=True)
error_lines = cm.exception.message.split('\n')
self.assertIn("Problem with builders", error_lines[0])
self.assertIn("Builder needs rebalance", error_lines[1])
self.assertFalse(error_lines[2:])
self.assertEqual(1, cb.version)
class TestCooperativeRingBuilder(BaseTestCompositeBuilder):
def _make_coop_builder(self, region, composite_builder, rebalance=False,
min_part_hours=1):
rb = CooperativeRingBuilder(8, 3, min_part_hours, composite_builder)
if composite_builder._builders is None:
composite_builder._builders = [rb]
for i in range(3):
self.add_dev(rb, region=region)
if rebalance:
rb.rebalance()
self.assertEqual(self._partition_counts(rb),
{0: 256, 1: 256, 2: 256}) # sanity check
return rb
def _partition_counts(self, builder):
"""
Returns a dictionary mapping device id's to (number of
partitions assigned to that device).
"""
return Counter(builder.devs[dev_id]['id']
for part2dev_id in builder._replica2part2dev
for dev_id in part2dev_id)
def get_moved_parts(self, after, before):
def uniqueness(dev):
return dev['ip'], dev['port'], dev['device']
moved_parts = set()
for p in range(before.parts):
if ({uniqueness(dev) for dev in before._devs_for_part(p)} !=
{uniqueness(dev) for dev in after._devs_for_part(p)}):
moved_parts.add(p)
return moved_parts
def num_parts_can_move(self, builder):
# note that can_part_move() gives consideration to the
# _part_moved_bitmap which is only reset when a rebalance starts
return len(
[p for p in range(builder.parts)
if super(CooperativeRingBuilder, builder)._can_part_move(p)])
@mock.patch('swift.common.ring.builder.time')
def _check_rebalance_respects_cobuilder_part_moves(
self, min_part_hours, mock_time):
mock_time.return_value = now = int(time.time())
builder_files = []
cb = CompositeRingBuilder()
for i in (1, 2, 3):
b = self._make_coop_builder(i, cb, min_part_hours=min_part_hours)
fname = os.path.join(self.tmpdir, 'builder_%s.builder' % i)
b.save(fname)
builder_files.append(fname)
builder_files, builders = cb.load_components(builder_files)
# all cobuilders can perform initial rebalance
cb.rebalance()
exp = {0: 256, 1: 256, 2: 256}
self.assertEqual(exp, self._partition_counts(builders[0]))
self.assertEqual(exp, self._partition_counts(builders[1]))
self.assertEqual(exp, self._partition_counts(builders[2]))
exp = min_part_hours * 3600
self.assertEqual(exp, builders[0].min_part_seconds_left)
self.assertEqual(exp, builders[1].min_part_seconds_left)
self.assertEqual(exp, builders[2].min_part_seconds_left)
# jump forwards min_part_hours
now += min_part_hours * 3600
mock_time.return_value = now
old_builders = []
for builder in builders:
old_builder = CooperativeRingBuilder(8, 3, min_part_hours, None)
old_builder.copy_from(copy.deepcopy(builder.to_dict()))
old_builders.append(old_builder)
for builder in builders:
self.add_dev(builder)
# sanity checks: all builders are ready for rebalance
self.assertEqual(0, builders[0].min_part_seconds_left)
self.assertEqual(0, builders[1].min_part_seconds_left)
self.assertEqual(0, builders[2].min_part_seconds_left)
# ... but last_part_moves not yet updated to current epoch
if min_part_hours > 0:
self.assertEqual(0, self.num_parts_can_move(builders[0]))
self.assertEqual(0, self.num_parts_can_move(builders[1]))
self.assertEqual(0, self.num_parts_can_move(builders[2]))
with mock.patch('swift.common.ring.composite_builder.shuffle',
lambda x: x):
cb.rebalance()
rb1_parts_moved = self.get_moved_parts(builders[0], old_builders[0])
self.assertEqual(192, len(rb1_parts_moved))
self.assertEqual(self._partition_counts(builders[0]),
{0: 192, 1: 192, 2: 192, 3: 192})
rb2_parts_moved = self.get_moved_parts(builders[1], old_builders[1])
self.assertEqual(64, len(rb2_parts_moved))
counts = self._partition_counts(builders[1])
self.assertEqual(counts[3], 64)
self.assertEqual([234, 235, 235], sorted(counts.values()[:3]))
self.assertFalse(rb2_parts_moved.intersection(rb1_parts_moved))
# rb3 can't rebalance - all parts moved while rebalancing rb1 and rb2
self.assertEqual(
0, len(self.get_moved_parts(builders[2], old_builders[2])))
# jump forwards min_part_hours, all builders can move all parts again,
# so now rb2 should be able to further rebalance
now += min_part_hours * 3600
mock_time.return_value = now
old_builders = []
for builder in builders:
old_builder = CooperativeRingBuilder(8, 3, min_part_hours, None)
old_builder.copy_from(copy.deepcopy(builder.to_dict()))
old_builders.append(old_builder)
with mock.patch('swift.common.ring.composite_builder.shuffle',
lambda x: x):
cb.rebalance()
rb2_parts_moved = self.get_moved_parts(builders[1], old_builders[1])
self.assertGreater(len(rb2_parts_moved), 64)
self.assertGreater(self._partition_counts(builders[1])[3], 64)
self.assertLess(self.num_parts_can_move(builders[2]), 256)
self.assertEqual(256, self.num_parts_can_move(builders[0]))
# and rb3 should also have been able to move some parts
rb3_parts_moved = self.get_moved_parts(builders[2], old_builders[2])
self.assertGreater(len(rb3_parts_moved), 0)
self.assertFalse(rb3_parts_moved.intersection(rb2_parts_moved))
# but cobuilders will not prevent a new rb rebalancing for first time
rb4 = self._make_coop_builder(4, cb, rebalance=False,
min_part_hours=min_part_hours)
builders.append(rb4)
builder_files = []
for i, builder in enumerate(builders):
fname = os.path.join(self.tmpdir, 'builder_%s.builder' % i)
builder.save(fname)
builder_files.append(fname)
cb = CompositeRingBuilder()
builder_files, builders = cb.load_components(builder_files)
cb.rebalance()
self.assertEqual(256, len(self.get_moved_parts(builders[3], rb4)))
def test_rebalance_respects_cobuilder_part_moves(self):
self._check_rebalance_respects_cobuilder_part_moves(1)
self._check_rebalance_respects_cobuilder_part_moves(0)
@mock.patch('swift.common.ring.builder.time')
def _check_rebalance_cobuilder_states(
self, min_part_hours, mock_time):
@contextmanager
def mock_rebalance():
# wrap rebalance() in order to capture builder states before and
# after each component rebalance
orig_rebalance = RingBuilder.rebalance
# a dict mapping builder -> (list of captured builder states)
captured_builder_states = defaultdict(list)
def update_states():
for b in cb._builders:
rb = CooperativeRingBuilder(8, 3, min_part_hours, None)
rb.copy_from(copy.deepcopy(b.to_dict()))
rb._part_moved_bitmap = bytearray(b._part_moved_bitmap)
captured_builder_states[b].append(rb)
def wrap_rebalance(builder_instance):
update_states()
results = orig_rebalance(builder_instance)
update_states()
return results
with mock.patch('swift.common.ring.RingBuilder.rebalance',
wrap_rebalance):
yield captured_builder_states
mock_time.return_value = now = int(time.time())
builder_files = []
cb = CompositeRingBuilder()
for i in (1, 2, 3):
b = self._make_coop_builder(i, cb, min_part_hours=min_part_hours)
fname = os.path.join(self.tmpdir, 'builder_%s.builder' % i)
b.save(fname)
builder_files.append(fname)
builder_files, builders = cb.load_components(builder_files)
# all cobuilders can perform initial rebalance
cb.rebalance()
# jump forwards min_part_hours
now += min_part_hours * 3600
mock_time.return_value = now
for builder in builders:
self.add_dev(builder)
with mock.patch('swift.common.ring.composite_builder.shuffle',
lambda x: x):
with mock_rebalance() as captured_states:
cb.rebalance()
# sanity - state captured before and after each component rebalance
self.assertEqual(len(builders), len(captured_states))
for states in captured_states.values():
self.assertEqual(2 * len(builders), len(states))
# for each component we have a list of it's builder states
rb1s = captured_states[builders[0]]
rb2s = captured_states[builders[1]]
rb3s = captured_states[builders[2]]
# rebalancing will update epoch for all builders' last_part_moves
self.assertEqual(now, rb1s[0]._last_part_moves_epoch)
self.assertEqual(now, rb2s[0]._last_part_moves_epoch)
self.assertEqual(now, rb3s[0]._last_part_moves_epoch)
# so, in state before any component rebalance, all can now move parts
# N.B. num_parts_can_move gathers super class's (i.e. RingBuilder)
# _can_part_move so that it doesn't refer to cobuilders state.
self.assertEqual(256, self.num_parts_can_move(rb1s[0]))
self.assertEqual(256, self.num_parts_can_move(rb2s[0]))
self.assertEqual(256, self.num_parts_can_move(rb3s[0]))
# after first component has been rebalanced it has moved parts
self.assertEqual(64, self.num_parts_can_move(rb1s[1]))
self.assertEqual(256, self.num_parts_can_move(rb2s[2]))
self.assertEqual(256, self.num_parts_can_move(rb3s[2]))
rb1_parts_moved = self.get_moved_parts(rb1s[1], rb1s[0])
self.assertEqual(192, len(rb1_parts_moved))
self.assertEqual(self._partition_counts(rb1s[1]),
{0: 192, 1: 192, 2: 192, 3: 192})
# rebalancing rb2 - rb2 in isolation could potentially move all parts
# so would move 192 parts to new device, but it is constrained by rb1
# only having 64 parts that can move
rb2_parts_moved = self.get_moved_parts(rb2s[3], rb2s[2])
self.assertEqual(64, len(rb2_parts_moved))
counts = self._partition_counts(rb2s[3])
self.assertEqual(counts[3], 64)
self.assertEqual([234, 235, 235], sorted(counts.values()[:3]))
self.assertFalse(rb2_parts_moved.intersection(rb1_parts_moved))
self.assertEqual(192, self.num_parts_can_move(rb2s[3]))
self.assertEqual(64, self.num_parts_can_move(rb1s[3]))
# rb3 can't rebalance - all parts moved while rebalancing rb1 and rb2
self.assertEqual(0, len(self.get_moved_parts(rb3s[5], rb3s[0])))
def test_rebalance_cobuilder_states(self):
self._check_rebalance_cobuilder_states(1)
self._check_rebalance_cobuilder_states(0)
def _check_rebalance_cobuilders_calls(self, min_part_hours):
# verify that co-builder methods are called during one builder's
# rebalance
@contextmanager
def mock_update_last_part_moves():
# intercept calls to RingBuilder._update_last_part_moves (yes, the
# superclass method) and populate a dict mapping builder instance
# to a list of that builder's parent builder when method was called
calls = []
orig_func = RingBuilder._update_last_part_moves
def fake_update(builder):
calls.append(builder)
return orig_func(builder)
with mock.patch(
'swift.common.ring.RingBuilder._update_last_part_moves',
fake_update):
yield calls
@contextmanager
def mock_can_part_move():
# intercept calls to RingBuilder._can_part_move (yes, the
# superclass method) and populate a dict mapping builder instance
# to a list of that builder's parent builder when method was called
calls = defaultdict(list)
orig_func = RingBuilder._can_part_move
def fake_can_part_move(builder, part):
calls[builder].append(part)
return orig_func(builder, part)
with mock.patch('swift.common.ring.RingBuilder._can_part_move',
fake_can_part_move):
yield calls
cb = CompositeRingBuilder()
rb1 = self._make_coop_builder(1, cb, min_part_hours=min_part_hours)
rb2 = self._make_coop_builder(2, cb, min_part_hours=min_part_hours)
cb._builders = [rb1, rb2]
# composite rebalance updates last_part_moves before any component
# rebalance - after that expect no more updates
with mock_update_last_part_moves() as update_calls:
cb.update_last_part_moves()
self.assertEqual(sorted([rb1, rb2]), sorted(update_calls))
with mock_update_last_part_moves() as update_calls:
with mock_can_part_move() as can_part_move_calls:
rb2.rebalance()
self.assertFalse(update_calls)
# rb1 has never been rebalanced so no calls propagate from its
# can_part_move method to its superclass _can_part_move method
self.assertEqual([rb2], can_part_move_calls.keys())
with mock_update_last_part_moves() as update_calls:
with mock_can_part_move() as can_part_move_calls:
rb1.rebalance()
self.assertFalse(update_calls)
# rb1 is being rebalanced so gets checked, and rb2 also gets checked
self.assertEqual(sorted([rb1, rb2]), sorted(can_part_move_calls))
self.assertEqual(768, len(can_part_move_calls[rb1]))
self.assertEqual(768, len(can_part_move_calls[rb2]))
def test_rebalance_cobuilders_calls(self):
self._check_rebalance_cobuilders_calls(1)
self._check_rebalance_cobuilders_calls(0)
def test_save_then_load(self):
cb = CompositeRingBuilder()
coop_rb = self._make_coop_builder(1, cb, rebalance=True)
builder_file = os.path.join(self.tmpdir, 'test.builder')
coop_rb.save(builder_file)
cb = CompositeRingBuilder()
loaded_coop_rb = CooperativeRingBuilder.load(builder_file,
parent_builder=cb)
self.assertIs(cb, loaded_coop_rb.parent_builder)
self.assertEqual(coop_rb.to_dict(), loaded_coop_rb.to_dict())
# check can be loaded as superclass
loaded_rb = RingBuilder.load(builder_file)
self.assertEqual(coop_rb.to_dict(), loaded_rb.to_dict())
# check can load a saved superclass
rb = RingBuilder(6, 3, 0)
for _ in range(3):
self.add_dev(rb, region=1)
rb.save(builder_file)
cb = CompositeRingBuilder()
loaded_coop_rb = CooperativeRingBuilder.load(builder_file,
parent_builder=cb)
self.assertIs(cb, loaded_coop_rb.parent_builder)
self.assertEqual(rb.to_dict(), loaded_coop_rb.to_dict())
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "272d01741d3b5fe4bc0bd804dce8e387",
"timestamp": "",
"source": "github",
"line_count": 1295,
"max_line_length": 79,
"avg_line_length": 44.186100386100385,
"alnum_prop": 0.610545079603642,
"repo_name": "nadeemsyed/swift",
"id": "9e952611f550c50d4628ecca8d8f6745efbc340f",
"size": "57815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/common/ring/test_composite_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "248"
},
{
"name": "PHP",
"bytes": "377"
},
{
"name": "Python",
"bytes": "8547418"
},
{
"name": "Shell",
"bytes": "1804"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from builtins import object
import logging
class BaseConfig(object):
""" Base class for all configuration objects """
defaults = {}
search_path = ['config']
def __init__(self, config=None):
if config is None:
self.config = {}
else:
self.config = config
self._init_logger()
self._load_config()
def _init_logger(self):
""" Initializes self.logger """
self.logger = logging.getLogger(__name__)
def _load_config(self):
""" Performs the logic to initialize self.config """
pass
def __getattr__(self, name):
tree = name.split('__')
if name.startswith('_'):
raise AttributeError('Attribute {} not found'.format(name))
value = None
value_found = False
for attr in self.search_path:
config = getattr(self, attr)
if len(tree) > 1:
# Walk through the config dictionary using __ as a delimiter
for key in tree[:-1]:
config = config.get(key)
if config is None:
break
if config is None:
continue
if tree[-1] in config:
value = config[tree[-1]]
value_found = True
break
if value_found:
return value
else:
return self.defaults.get(name)
|
{
"content_hash": "23b38758e8c209d4d4ae7f9842bc044b",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 76,
"avg_line_length": 28.037735849056602,
"alnum_prop": 0.5114401076716016,
"repo_name": "e02d96ec16/CumulusCI",
"id": "ca63037c8c1c32226082ea7cb61f58a799320f07",
"size": "1486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cumulusci/core/config/BaseConfig.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2303"
},
{
"name": "Python",
"bytes": "641697"
},
{
"name": "RobotFramework",
"bytes": "9270"
},
{
"name": "Shell",
"bytes": "5555"
}
],
"symlink_target": ""
}
|
a = 2. # parameter
s = np.random.zipf(a, 1000)
# Display the histogram of the samples, along with
# the probability density function:
import matplotlib.pyplot as plt
import scipy.special as sps
# Truncate s values at 50 so plot is interesting
count, bins, ignored = plt.hist(s[s<50], 50, normed=True)
x = np.arange(1., 50.)
y = x**(-a)/sps.zetac(a)
plt.plot(x, y/max(y), linewidth=2, color='r')
plt.show()
|
{
"content_hash": "6850dbfbb906b3faea41200673ed83c2",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 57,
"avg_line_length": 29.142857142857142,
"alnum_prop": 0.6985294117647058,
"repo_name": "leesavide/pythonista-docs",
"id": "bd6c5db2e433d86fc87ac8094b1c165f6b63131f",
"size": "447",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Documentation/numpy/reference/generated/numpy-random-RandomState-zipf-1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "84392"
},
{
"name": "HTML",
"bytes": "70040156"
},
{
"name": "JavaScript",
"bytes": "89777"
},
{
"name": "Python",
"bytes": "884325"
}
],
"symlink_target": ""
}
|
import argparse
import os
from six.moves import configparser
import sys
from launchpadlib import launchpad
from gerrit_dash_creator.cmd import creator
CACHE_DIR = os.path.expanduser('~/.cache/launchpadlib/')
SERVICE_ROOT = 'production'
def print_dash_url(projects, bugs):
config = configparser.ConfigParser()
config.add_section('dashboard')
config.set('dashboard', 'title',
'Priortized Bug Fix Dashboard')
config.set('dashboard', 'description', 'Bug Fix Inbox')
proj_q = ['project:openstack/%s' % proj for proj in projects]
config.set('dashboard', 'foreach',
'(%s) status:open ' % ' OR '.join(proj_q))
for label in bugs:
for prio in bugs[label]:
if len(bugs[label][prio]) == 0:
continue
sect = 'section "%s Importance %s"' % (label, prio)
config.add_section(sect)
config.set(sect, 'query',
' OR '.join(['change:%s' % bug
for bug in bugs[label][prio]]))
print(creator.generate_dashboard_url(config))
def pretty_milestone(milestone_url):
if milestone_url is None:
return 'all'
# https://api.launchpad.net/1.0/heat/+milestone/next:
return str(milestone_url).split('/')[-1]
def review_id_from_bug(bug):
reviews = set()
reviews_merged = set()
for msg in bug.bug.messages:
if 'ix proposed' in msg.subject:
for line in str(msg.content).split('\n'):
if 'Review' in line:
reviews.add(line.split('/')[-1])
if 'ix merged to' in msg.subject:
for line in str(msg.content).split('\n'):
if 'Reviewed: ' in line:
for rev in reviews:
if rev in line:
reviews_merged.add(rev)
return (reviews - reviews_merged)
def get_options():
"""Parse command line arguments and options."""
parser = argparse.ArgumentParser(
description='Create a Gerrit dashboard URL from launchpad '
'"In Progress bugs')
parser.add_argument('projects', nargs='+',
metavar='projects',
help='Launchpad Projects')
parser.add_argument('--milestone', default=None,
help='Project Milestone')
parser.add_argument('--tag', default=None,
help='Project Tag')
return parser.parse_args()
def process_project(lp, opts, project_name, bugs):
project = lp.projects[project_name]
review_bugtasks = project.searchTasks(status=['In Progress'])
for bug in review_bugtasks:
importance = bug.importance
milestone = pretty_milestone(bug.milestone)
tags = bug.bug.tags
label = None
if opts.tag is not None:
if opts.tag in tags:
label = 'Tag:%s' % opts.tag
if opts.milestone is not None:
if milestone == opts.milestone:
label = 'Milestone:%s' % milestone
if label is None:
continue
if label not in bugs:
bugs[label] = {}
if importance not in bugs[label]:
bugs[label][importance] = []
for rev_no in review_id_from_bug(bug):
bugs[label][importance].append(rev_no)
print('[%s] %s -> %s' % (importance,
bug, rev_no))
def main():
"""Entrypoint."""
opts = get_options()
lpad = launchpad.Launchpad.login_anonymously(sys.argv[0],
SERVICE_ROOT,
CACHE_DIR)
bugs = {}
for proj in opts.projects:
process_project(lpad, opts, proj, bugs)
print('')
print_dash_url(opts.projects, bugs)
return 0
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "306e6bec2f6f347bf40a852a19df6361",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 67,
"avg_line_length": 30.763779527559056,
"alnum_prop": 0.5446634246224725,
"repo_name": "asalkeld/review-bugs",
"id": "6e43cbdec3364cdcc36ee2054e356bfe460a2bab",
"size": "4477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "review-bugs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6833"
}
],
"symlink_target": ""
}
|
class Solution:
def guessNumber(self, n: int) -> int:
if n < 1: return 0 # unexpected
left, right = 1, n
while left <= right:
mid = (left + right) // 2 # mid number
is_picked = guess(mid)
if is_picked == 0:
return mid
elif is_picked == -1:
right = mid - 1
elif is_picked == 1:
left = mid + 1
else:
break # unexpected
return 0 # unexpected
|
{
"content_hash": "8ecf2e874dbcf27f50659afd221b52b7",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 50,
"avg_line_length": 32.375,
"alnum_prop": 0.4266409266409266,
"repo_name": "tanchao/algo",
"id": "dad32d37a41b551828b468371268ca5a6e5fbcd8",
"size": "698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/py/374_guess_number.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "73906"
},
{
"name": "Python",
"bytes": "81289"
}
],
"symlink_target": ""
}
|
"""
Tests the fugato app
"""
##########################################################################
## Imports
##########################################################################
from unittest import skip
from fugato.models import *
from voting.models import *
from stream.signals import stream
from stream.models import StreamItem
from django.test import TestCase, Client
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APIClient
from urllib.parse import urlsplit
from django.contrib.contenttypes.models import ContentType
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
##########################################################################
## Fixtures
##########################################################################
fixtures = {
'user': {
'username': 'jdoe',
'first_name': 'John',
'last_name': 'Doe',
'email': 'jdoe@example.com',
'password': 'supersecret',
},
'voter' : {
'username': 'bobbyd',
'first_name': 'Bob',
'last_name': 'Dylan',
'email': 'bobby@example.com',
'password': 'dontguessthis',
},
'question': {
'text': 'Why did the chicken cross the road?',
'author': None
},
'answer': {
'question': None,
'author': None,
'text': 'To get to the other side.',
}
}
##########################################################################
## Fugato models tests
##########################################################################
class QuestionModelTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(**fixtures['user'])
fixtures['question']['author'] = self.user
def test_question_ask_send_stream(self):
"""
Assert that when a question is created it sends the "ask" stream signal
"""
handler = MagicMock()
stream.connect(handler)
question = Question.objects.create(**fixtures['question'])
# Ensure that the signal was sent once with required arguments
handler.assert_called_once_with(verb='ask', sender=Question,
timestamp=question.created, actor=self.user,
target=question, signal=stream)
def test_question_asked_activity(self):
"""
Assert that when a question is asked, there is an activity stream item
"""
question = Question.objects.create(**fixtures['question'])
target_content_type = ContentType.objects.get_for_model(question)
target_object_id = question.id
query = StreamItem.objects.filter(verb='ask', actor=self.user,
target_content_type=target_content_type, target_object_id=target_object_id)
self.assertEqual(query.count(), 1, "no stream item created!")
class AnswerModelTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(**fixtures['user'])
fixtures['question']['author'] = self.user
fixtures['answer']['author'] = self.user
self.question = Question.objects.create(**fixtures['question'])
fixtures['answer']['question'] = self.question
def test_question_answer_send_stream(self):
"""
Assert that when an Answer is created it sends the "answer" stream signal
"""
handler = MagicMock()
stream.connect(handler)
answer = Answer.objects.create(**fixtures['answer'])
# Ensure that the signal was sent once with required arguments
handler.assert_called_once_with(verb='answer', sender=Answer,
timestamp=answer.created, actor=self.user, target=answer,
signal=stream)
def test_question_answered_activity(self):
"""
Assert that when a question is answered, there is an activity stream item
"""
answer = Answer.objects.create(**fixtures['answer'])
target_content_type = ContentType.objects.get_for_model(answer)
target_object_id = answer.id
query = {
'verb': 'answer',
'actor': self.user,
'target_content_type': target_content_type,
'target_object_id': target_object_id,
}
query = StreamItem.objects.filter(**query)
self.assertEqual(query.count(), 1, "no stream item created!")
##########################################################################
## Fugato API Views tests
##########################################################################
class QuestionAPIViewSetTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(**fixtures['user'])
fixtures['question']['author'] = self.user
self.client = APIClient()
def login(self):
credentials = {
'username': fixtures['user']['username'],
'password': fixtures['user']['password'],
}
return self.client.login(**credentials)
def logout(self):
return self.client.logout();
def test_question_list_auth(self):
"""
Assert GET /api/question/ returns 403 when not logged in
"""
endpoint = reverse('api:question-list')
response = self.client.get(endpoint)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_question_create_auth(self):
"""
Assert POST /api/question/ returns 403 when not logged in
"""
endpoint = reverse('api:question-list')
response = self.client.post(endpoint, {'text': 'Where are my keys?'}, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_question_retrieve_auth(self):
"""
Assert GET /api/question/:id/ returns 403 when not logged in
"""
question = Question.objects.create(**fixtures['question'])
endpoint = question.get_api_detail_url()
response = self.client.get(endpoint)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_question_update_auth(self):
"""
Assert PUT /api/question/:id/ returns 403 when not logged in
"""
question = Question.objects.create(**fixtures['question'])
endpoint = question.get_api_detail_url()
response = self.client.put(endpoint, {'text': 'Why did the bear cross the road?'}, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_question_delete_auth(self):
"""
Assert DELETE /api/question/:id/ returns 403 when not logged in
"""
question = Question.objects.create(**fixtures['question'])
endpoint = question.get_api_detail_url()
response = self.client.delete(endpoint)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_question_vote_post_auth(self):
"""
Assert POST /api/question/:id/vote returns 403 when not logged in
"""
question = Question.objects.create(**fixtures['question'])
endpoint = question.get_api_detail_url() + "vote/"
response = self.client.post(endpoint, {'vote': 1}, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_question_answers_list_auth(self):
"""
Assert GET /api/question/:id/answers returns 403 when not logged in
"""
question = Question.objects.create(**fixtures['question'])
endpoint = question.get_api_detail_url() + "answers/"
response = self.client.get(endpoint)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_question_vote_get_auth(self):
"""
Assert GET /api/question/:id/vote returns a 400
"""
question = Question.objects.create(**fixtures['question'])
endpoint = question.get_api_detail_url() + "vote/"
response = self.client.get(endpoint)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.login()
response = self.client.get(endpoint)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
@skip("pending implementation")
def test_question_list(self):
"""
Test GET /api/question/ returns question list
"""
self.login()
endpoint = reverse('api:question-list')
response = self.client.get(endpoint)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@skip("pending implementation")
def test_question_create(self):
"""
Test POST /api/question/ creates a question
"""
self.login()
endpoint = reverse('api:question-list')
response = self.client.post(endpoint, {'text': 'Where are my keys?'}, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
@skip("pending implementation")
def test_question_retrieve(self):
"""
Test GET /api/question/:id/ returns a question detail
"""
self.login()
question = Question.objects.create(**fixtures['question'])
endpoint = question.get_api_detail_url()
response = self.client.get(endpoint)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@skip("pending implementation")
def test_question_update(self):
"""
Test PUT /api/question/:id/ updates a question
"""
self.login()
question = Question.objects.create(**fixtures['question'])
endpoint = question.get_api_detail_url()
response = self.client.put(endpoint, {'text': 'Why did the bear cross the road?'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_question_delete_auth(self):
"""
Test DELETE /api/question/:id/ deletes a question
"""
self.login()
question = Question.objects.create(**fixtures['question'])
endpoint = question.get_api_detail_url()
response = self.client.delete(endpoint)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(endpoint)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertFalse(Question.objects.filter(pk=question.pk).exists())
def test_question_create_vote(self):
"""
Assert POST /api/question/:id/vote creates a vote for a user
"""
self.login()
question = Question.objects.create(**fixtures['question'])
endpoint = question.get_api_detail_url() + "vote/"
self.assertEqual(question.votes.count(), 0)
response = self.client.post(endpoint, {'vote': 1}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected = {'created': True, 'status': 'vote recorded', 'display': 'upvote'}
self.assertDictContainsSubset(expected, response.data)
self.assertEqual(question.votes.count(), 1)
def test_question_update_vote(self):
"""
Assert POST /api/question/:id/vote updates if already voted
"""
self.login()
question = Question.objects.create(**fixtures['question'])
vote, _ = Vote.objects.punch_ballot(content=question, user=self.user, vote=1)
endpoint = question.get_api_detail_url() + "vote/"
self.assertEqual(question.votes.count(), 1)
response = self.client.post(endpoint, {'vote': -1}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected = {'created': False, 'status': 'vote recorded', 'display': 'downvote'}
self.assertDictContainsSubset(expected, response.data)
self.assertEqual(question.votes.count(), 1)
def test_question_vote_response(self):
"""
Ensure POST /api/question/:id/vote response contains expected data
"""
self.login()
question = Question.objects.create(**fixtures['question'])
endpoint = question.get_api_detail_url() + "vote/"
self.assertEqual(question.votes.count(), 0)
response = self.client.post(endpoint, {'vote': 1}, format='json')
expected = {
'created': True,
'status': 'vote recorded',
'display': 'upvote',
'upvotes': 1, # Required for Question FE app (resets button counts)
'downvotes': 0, # Required for Question FE app (resets button counts)
}
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key, val in expected.items():
self.assertIn(key, response.data)
self.assertEqual(val, response.data[key])
@skip("pending implementation")
def test_question_answers_list(self):
"""
Ensure GET /api/question/:id/answers response works
"""
pass
class AnswerAPIViewSetTest(TestCase):
def setUp(self):
self.usera = User.objects.create_user(**fixtures['user'])
self.userb = User.objects.create_user(**fixtures['voter'])
fixtures['question']['author'] = self.usera
fixtures['answer']['author'] = self.userb
self.question = Question.objects.create(**fixtures['question'])
fixtures['answer']['question'] = self.question
self.client = APIClient()
def login(self):
credentials = {
'username': fixtures['user']['username'],
'password': fixtures['user']['password'],
}
return self.client.login(**credentials)
def logout(self):
return self.client.logout();
def test_answer_url_view_kwarg(self):
"""
Check that the answer provides a url
"""
answer = Answer.objects.create(**fixtures['answer'])
endpoint = answer.get_api_detail_url()
self.login()
response = self.client.get(endpoint)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('url', response.data)
url = urlsplit(response.data.get('url', '')).path
self.assertEqual(url, endpoint)
|
{
"content_hash": "4ccdaa055066904cb66482f0b6cf877b",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 105,
"avg_line_length": 35.49751243781095,
"alnum_prop": 0.5906797477224948,
"repo_name": "DistrictDataLabs/minimum-entropy",
"id": "3f0c632c674304a14a5c1077e5fbc16a33b02106",
"size": "14553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fugato/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5678"
},
{
"name": "HTML",
"bytes": "97821"
},
{
"name": "JavaScript",
"bytes": "10997"
},
{
"name": "Makefile",
"bytes": "2037"
},
{
"name": "Python",
"bytes": "173040"
}
],
"symlink_target": ""
}
|
import os
import sys
#Experiment Variables
eiger1 = "104.236.140.240"
eiger2 = "188.226.251.145"
eiger3 = "104.236.191.32"
eiger4 = "192.241.215.97"
eiger5 = "104.236.152.144"
DC0 = [eiger1,eiger3,eiger4,eiger5] #add nodes here
DC1 = [eiger2]
NODES = DC0 + DC1
ALL_NODES = [eiger1,eiger2,eiger3,eiger4,eiger5]
VAL_SIZES = ["10"] #bytes
LATENCY_MAX = ["0"] #deviation of normally distributed simulated mutation delay
RATIO_WRITES = ["0.3","0.1","0.01"] #writes/read
#LATENCY_MAX = ["0"] #ms
#RATIO_WRITES = ["0.125"] #writes/read
NUM_OPERATIONS = 10000
TEST_TYPE = "regular-stress" #facebook-stress | regular-stress
FACEBOOK_USE_EIGER = "no" # yes | no (default no)
NUM_USERS_FACEBOOK = 20
base_dir = "regular-results-num-nodes" #where the output will go
def mkdir(path):
os.system("mkdir {}".format(path))
def experiment():
print("Generating Files")
NUM_NODES = len(DC0)
mkdir(base_dir)
base_path_node = "{}/{}_node".format(base_dir,NUM_NODES)
mkdir(base_path_node)
for val in VAL_SIZES:
base_path_val = "{}/{}_val".format(base_path_node, val)
mkdir(base_path_val)
for latency in LATENCY_MAX:
base_path_lat = "{}/{}_lat".format(base_path_val, latency)
mkdir(base_path_lat)
for ratio in RATIO_WRITES:
base_path_rat = "{}/{}_write_ratio".format(base_path_lat, ratio)
mkdir(base_path_rat)
perform_experiment(val, latency, ratio)
files = []
for indx, address in enumerate(DC0):
filename = "{}/eiger{}.log".format(base_path_rat,indx)
files.append(filename)
copy_logs(address, filename)
generate_report(files, "{}/report.txt".format(base_path_rat), "{}/report.csv".format(base_dir), ratio, latency, val, NUM_NODES)
def reset_nodes(latency):
print("Reseting all nodes....")
for node in ALL_NODES:
cmd = "sshpass -p $eiger_pass ssh eiger@{} 'cd eiger; bash deegan_burn_it_all.bash;'".format(node)
print(cmd)
os.system(cmd)
for node in NODES:
cmd = "sshpass -p $eiger_pass ssh eiger@{} 'cd eiger; bash deegan_datacenter_launcher.bash {} skip'".format(node, latency)
if node == NODES[-1]:
#last node
cmd = "sshpass -p $eiger_pass ssh eiger@{} 'cd eiger; bash deegan_datacenter_launcher.bash {}'".format(node, latency)
print(cmd)
os.system(cmd)
def perform_experiment(val, latency, ratio):
print("Performing Experiment VAL:{} LAT:{} RAT:{}".format(val, latency, ratio))
reset_nodes(latency)
print("Running Client Stress Tests on {}".format(eiger2))
cmd = "sshpass -p $eiger_pass ssh eiger@{} 'cd eiger; export testType={}; export useEiger={}; export num_facebook_users={}; export num_operations={}; export chance_of_write={}; export value_size={}; export CASSANDRA_HOME=/home/eiger/eiger; env; ./deegan_client_launcher.bash > out.log'".format(eiger2, TEST_TYPE, FACEBOOK_USE_EIGER, NUM_USERS_FACEBOOK, NUM_OPERATIONS, ratio, val)
print(cmd)
os.system(cmd)
def copy_logs(address, path):
print("Copying logs from {} to {}".format(address, path))
os.system("sshpass -p $eiger_pass scp eiger@{}:/home/eiger/eiger/cassandra_var/cassandra_system.0.log {}".format(address,path));
def generate_report(input_files, output_path, csv_path, ratio, latency, val, num_nodes):
print("Generating report...")
print(input_files)
print(output_path)
f = open(output_path, "w")
f.write("Write/Read ratio: {}\n".format(ratio))
f.write("Latency: {}ms\n".format(latency))
f.write("Value Size: {}b\n\n".format(val))
f.write("Total Operiations: {}b\n\n".format(NUM_OPERATIONS))
all_numbers = []
for input_file in input_files:
f.write("\n\n{}:\n\n".format(input_file))
numbers = []
for line in open(input_file):
if("Completion Complete" in line):
i = line.index("e (") + 3
j = line.index("ms")
numString = line[i:j]
numbers.append(int(numString))
print numbers
if(len(numbers) == 0):
continue
all_numbers = all_numbers + numbers
total = sum(numbers)
average = total/len(numbers)
median = sorted(numbers)[len(numbers)/2]
f.write("Average: {}ms\n".format(average))
f.write("Median: {}ms\n".format(median))
f.write(str(numbers))
f.close()
csv = open(csv_path, "a")
csv.write(str(num_nodes) + ',' + str(val) + ',' + str(latency) + ',' +str(ratio) + ','.join([str(x) for x in all_numbers]) + '\n')
if __name__ == "__main__":
if(len(sys.argv) != 2):
print("Usage: runner.py (reset | experiment)")
elif(sys.argv[1] == "reset"):
reset_nodes(0)
elif(sys.argv[1] == "experiment"):
experiment()
else:
print("Usage: runner.py (reset | experiment)")
|
{
"content_hash": "52443e843f016753c42b9b42b2bb07f4",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 384,
"avg_line_length": 37.961832061068705,
"alnum_prop": 0.6016489040820431,
"repo_name": "tadeegan/eiger-application-aware",
"id": "469902d464a7cabbabcb63b4ef90d21188f3bec3",
"size": "4973",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "deegan_client/runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14437"
},
{
"name": "GAP",
"bytes": "37819"
},
{
"name": "Java",
"bytes": "4616769"
},
{
"name": "PigLatin",
"bytes": "402"
},
{
"name": "Python",
"bytes": "301308"
},
{
"name": "Shell",
"bytes": "178756"
},
{
"name": "Thrift",
"bytes": "36549"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template import Context
_mail_context_processors = None
# This is a function rather than module-level procedural code because we only
# want it to execute if somebody uses MailContext.
def get_mail_processors():
global _mail_context_processors
if _mail_context_processors is None:
processors = []
for path in getattr(settings, 'CAMPAIGN_CONTEXT_PROCESSORS', ('campaign.context_processors.recipient',)):
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = __import__(module, {}, {}, [attr])
except ImportError as e:
raise ImproperlyConfigured('Error importing campaign processor module %s: "%s"' % (module, e))
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" callable campaign processor' % (module, attr))
processors.append(func)
_mail_context_processors = tuple(processors)
return _mail_context_processors
class MailContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in CAMPAIGN_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, subscriber, dict_=None, processors=None, autoescape=True,
use_l10n=None, use_tz=None):
Context.__init__(self, dict_, autoescape=autoescape,
use_l10n=use_l10n, use_tz=use_tz)
if processors is None:
processors = ()
else:
processors = tuple(processors)
updates = dict()
for processor in get_mail_processors() + processors:
updates.update(processor(subscriber))
self.update(updates)
def flatten(self):
"""
Returns self.dicts as one dictionary
"""
flat = {}
for d in self.dicts:
flat.update(d)
return flat
|
{
"content_hash": "b4e5b38c8c7e2697ee3eb5887df89d20",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 125,
"avg_line_length": 38.07017543859649,
"alnum_prop": 0.6248847926267281,
"repo_name": "arneb/django-campaign",
"id": "a8c982a20f46a178e9f2948088724ea80e3d8678",
"size": "2213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "campaign/context.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4889"
},
{
"name": "Python",
"bytes": "63831"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20170323_1743'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='parent',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='api.Comment'),
),
migrations.AlterField(
model_name='comment',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='api.Post'),
),
]
|
{
"content_hash": "a057c8de4cae5c08c1eea153c6dd8be7",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 135,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.6201117318435754,
"repo_name": "frostblooded/kanq",
"id": "b32c28c415ebf781bb05e71f418ea90cd5462a7b",
"size": "789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/migrations/0006_auto_20170323_2105.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "876"
},
{
"name": "HTML",
"bytes": "8928"
},
{
"name": "JavaScript",
"bytes": "23135"
},
{
"name": "Python",
"bytes": "93176"
},
{
"name": "Shell",
"bytes": "129"
},
{
"name": "TypeScript",
"bytes": "44890"
}
],
"symlink_target": ""
}
|
"""
Generates a new configuration file for use with Astrometrica
"""
from astropy.io import fits
import os
import astrogen
import pyfits
import sys
__pkg_root__ = os.path.dirname(__file__)
__resources_dir__ = os.path.join(__pkg_root__, os.pardir, 'resources')
__batch_dir__ = os.path.join(__resources_dir__, 'fits_files')
__output_dir__ = os.path.join(__pkg_root__, os.pardir, 'output')
class ConfigFile(object):
def __init__(self):
"""
Initialize instance variables representing the fits filename,
the template for the configuration file, the new configuration file
output name, and the two parameters to replace.
"""
self.stdout_ra = None
self.stdout_dec = None
self.stdout_pixelscale = None
self.focal_length = None
self.field_rotation = None
self.template_filename = os.path.join(__resources_dir__,
'config_template.txt')
def process(self, fits_filename, stdout_filename):
"""
Sets instance variables for extracted stdout values,
sets objctra and objctdec fits headers based on those values,
and determines the focal length based on the given equation
focal length = (206265*.03)/platescale.
Creates a new file.
"""
self.get_stdout_values(stdout_filename)
self.set_fits_headers(fits_filename)
self.determine_focal_length()
config_name = os.path.splitext(fits_filename)[0] + ".cfg"
self.set_new_cfg_headers(config_name)
def get_stdout_values(self, stdout_filename):
"""
Sets instance variables for extracted stdout values
"""
with open(stdout_filename, "r") as f:
for line in f:
if "pixel scale" in line:
line_list = line.split(" ")
self.stdout_pixelscale = line_list[7]
elif "(RA H:M:S, Dec D:M:S)" in line:
line_list = line.split(" ")
self.stdout_ra = line_list[7][1:-1].replace(":", " ")
self.stdout_dec = line_list[8][:-3].replace(":", " ")
elif "Field rotation angle" in line:
line_list = line.split(" ")
self.field_rotation = line_list[5]
return None
def set_fits_headers(self, fits_filename):
"""
Sets objctra and objctdec fits headers based on instance variables
"""
fits_file = pyfits.open(fits_filename)
# TODO unit test for this
# Following lines should set headers even if they don't exist
# If not, will add some code later
fits_file[0].header["objctra"] = self.stdout_ra
fits_file[0].header["objctdec"] = self.stdout_dec
return None
def determine_focal_length(self):
"""
Set instance variable representing the value derived from the focal length equation
focal length = (206265*.03)/platescale (pixel scale...?)
"""
self.focal_length = (206265 * 0.03) / float(self.stdout_pixelscale)
return None
def set_new_cfg_headers(self, config_output_filename):
"""
Creates a new file based on self.new_cfg_filename and replaces necessary
parameters.
:param config_output_filename:
"""
template = open(self.template_filename, "r")
new_cfg = open(config_output_filename, "w")
for line in template:
if "FocalLength" in line:
new_cfg.write("FocalLength=" + str(self.focal_length) +"\n")
elif "PA" in line and "VarPA" not in line:
new_cfg.write("PA=" + str(self.field_rotation) +"\n")
else:
new_cfg.write(line)
template.close()
new_cfg.close()
return None
if __name__=="__main__":
new = ConfigFile()
new.get_stdout_values("resources/sample_stdout.txt")
new.set_fits_headers("output/solve_field_output/example.new")
new.determine_focal_length()
new.set_new_cfg_headers('output/example.cfg')
|
{
"content_hash": "528286059b4cf55dce8c88babbda6eb7",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 91,
"avg_line_length": 35.34188034188034,
"alnum_prop": 0.5823458282950423,
"repo_name": "aesoll/astrogen",
"id": "6518783a14e4e19850bc76fe0d7979935cccdb3d",
"size": "4433",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astrogen/configuration_gen.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "37169"
},
{
"name": "Shell",
"bytes": "96"
}
],
"symlink_target": ""
}
|
import json
import logging
import sys
import time
from telemetry.core import util
class InspectorPage(object):
def __init__(self, inspector_backend, timeout=60):
self._inspector_backend = inspector_backend
self._inspector_backend.RegisterDomain(
'Page',
self._OnNotification,
self._OnClose)
self._navigation_pending = False
self._navigation_url = ""
self._script_to_evaluate_on_commit = None
# Turn on notifications. We need them to get the Page.frameNavigated event.
self._EnablePageNotifications(timeout=timeout)
def _OnNotification(self, msg):
logging.debug('Notification: %s', json.dumps(msg, indent=2))
if msg['method'] == 'Page.frameNavigated' and self._navigation_pending:
url = msg['params']['frame']['url']
if (self._navigation_url == url or
(not url == 'chrome://newtab/' and not url == 'about:blank'
and not 'parentId' in msg['params']['frame'])):
# Marks the navigation as complete and unblocks the
# PerformActionAndWaitForNavigate call.
self._navigation_pending = False
def _OnClose(self):
pass
def _SetScriptToEvaluateOnCommit(self, source):
existing_source = (self._script_to_evaluate_on_commit and
self._script_to_evaluate_on_commit['source'])
if source == existing_source:
return
if existing_source:
request = {
'method': 'Page.removeScriptToEvaluateOnLoad',
'params': {
'identifier': self._script_to_evaluate_on_commit['id'],
}
}
self._inspector_backend.SyncRequest(request)
self._script_to_evaluate_on_commit = None
if source:
request = {
'method': 'Page.addScriptToEvaluateOnLoad',
'params': {
'scriptSource': source,
}
}
res = self._inspector_backend.SyncRequest(request)
self._script_to_evaluate_on_commit = {
'id': res['result']['identifier'],
'source': source
}
def _EnablePageNotifications(self, timeout=60):
request = {
'method': 'Page.enable'
}
res = self._inspector_backend.SyncRequest(request, timeout)
assert len(res['result'].keys()) == 0
def PerformActionAndWaitForNavigate(self, action_function, timeout=60):
"""Executes action_function, and waits for the navigation to complete.
action_function is expect to result in a navigation. This function returns
when the navigation is complete or when the timeout has been exceeded.
"""
start_time = time.time()
remaining_time = timeout
action_function()
self._navigation_pending = True
try:
while self._navigation_pending and remaining_time > 0:
remaining_time = max(timeout - (time.time() - start_time), 0.0)
self._inspector_backend.DispatchNotifications(remaining_time)
except util.TimeoutException:
# Since we pass remaining_time to DispatchNotifications, we need to
# list the full timeout time in this message.
raise util.TimeoutException('Timed out while waiting %ds for navigation. '
'Error=%s' % (timeout, sys.exc_info()[1]))
def Navigate(self, url, script_to_evaluate_on_commit=None, timeout=60):
"""Navigates to |url|.
If |script_to_evaluate_on_commit| is given, the script source string will be
evaluated when the navigation is committed. This is after the context of
the page exists, but before any script on the page itself has executed.
"""
def DoNavigate():
self._SetScriptToEvaluateOnCommit(script_to_evaluate_on_commit)
# Navigate the page. However, there seems to be a bug in chrome devtools
# protocol where the request id for this event gets held on the browser
# side pretty much indefinitely.
#
# So, instead of waiting for the event to actually complete, wait for the
# Page.frameNavigated event.
request = {
'method': 'Page.navigate',
'params': {
'url': url,
}
}
self._inspector_backend.SendAndIgnoreResponse(request)
self._navigation_url = url
self.PerformActionAndWaitForNavigate(DoNavigate, timeout)
def GetCookieByName(self, name, timeout=60):
"""Returns the value of the cookie by the given |name|."""
request = {
'method': 'Page.getCookies'
}
res = self._inspector_backend.SyncRequest(request, timeout)
cookies = res['result']['cookies']
for cookie in cookies:
if cookie['name'] == name:
return cookie['value']
return None
def CollectGarbage(self, timeout=60):
request = {
'method': 'HeapProfiler.CollectGarbage'
}
self._inspector_backend.SyncRequest(request, timeout)
|
{
"content_hash": "86123529f91b31d157956c976a01c1ac",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 80,
"avg_line_length": 36.24812030075188,
"alnum_prop": 0.6403235843186061,
"repo_name": "patrickm/chromium.src",
"id": "2874268caef4c93e983778c32cacaf0b098c164b",
"size": "4983",
"binary": false,
"copies": "4",
"ref": "refs/heads/nw",
"path": "tools/telemetry/telemetry/core/backends/chrome/inspector_page.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "40737238"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "207930633"
},
{
"name": "CSS",
"bytes": "939170"
},
{
"name": "Java",
"bytes": "5844934"
},
{
"name": "JavaScript",
"bytes": "17837835"
},
{
"name": "Mercury",
"bytes": "10533"
},
{
"name": "Objective-C",
"bytes": "886228"
},
{
"name": "Objective-C++",
"bytes": "6667789"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "672770"
},
{
"name": "Python",
"bytes": "10857933"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1326032"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
}
|
from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Rss201rev2Feed
from wagtail.wagtailimages.models import Filter
from wagtail.wagtailcore.models import Site
from django.conf import settings
from datetime import datetime, time
from django.utils.html import strip_tags
from django.apps import apps
from wagtail.wagtailcore.rich_text import expand_db_html
from bs4 import BeautifulSoup
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from django.utils import feedgenerator
feed_app_label = getattr(settings, "FEED_APP_LABEL")
feed_model_name = getattr(settings, "FEED_MODEL_NAME")
feed_model = apps.get_model(app_label=feed_app_label,
model_name=feed_model_name)
class CustomFeedGenerator(Rss201rev2Feed):
def root_attributes(self):
attrs = super(CustomFeedGenerator, self).root_attributes()
attrs['xmlns:content'] = 'http://purl.org/rss/1.0/modules/content/'
return attrs
def add_item_elements(self, handler, item):
super(CustomFeedGenerator, self).add_item_elements(handler, item)
handler.startElement(u"content:encoded", {})
content = '<![CDATA['
if item['image'] != "":
content += '<img src="%s"><hr>' % (item['image'])
content += item['content']
content += ']]>'
# Adding content in this way do not escape content so make it suitable
# for Feedburner and other services. If we use
# handler.characters(content) then it will escape content and will not
# work perfectly with Feedburner and other services.
handler._write(content)
handler.endElement(u"content:encoded")
class BasicFeed(Feed):
# FEED TYPE
feed_type = feedgenerator.Rss201rev2Feed
# The RSS information that gets shown at the top of the feed.
title = getattr(settings, "FEED_TITLE", "")
link = getattr(settings, "FEED_LINK", "")
description = getattr(settings, "FEED_DESCRIPTION", "Blog Feed")
author_email = getattr(settings, "FEED_AUTHOR_EMAIL", "")
author_link = getattr(settings, "FEED_AUTHOR_LINK", "")
item_description_field = getattr(settings, "FEED_ITEM_DESCRIPTION_FIELD")
item_content_field = getattr(settings, "FEED_ITEM_CONTENT_FIELD")
def items(self):
return feed_model.objects.order_by('-date').live()
def item_pubdate(self, item):
return datetime.combine(item.date, time())
def item_link(self, item):
return item.full_url
def item_author_name(self, item):
pass
class ExtendedFeed(Feed):
# FEED TYPE
feed_type = CustomFeedGenerator
# The RSS information that gets shown at the top of the feed.
title = getattr(settings, "FEED_TITLE", "")
link = getattr(settings, "FEED_LINK", "")
description = getattr(settings, "FEED_DESCRIPTION", "Blog Feed")
author_email = getattr(settings, "FEED_AUTHOR_EMAIL", "")
author_link = getattr(settings, "FEED_AUTHOR_LINK", "")
item_description_field = getattr(settings, "FEED_ITEM_DESCRIPTION_FIELD")
item_content_field = getattr(settings, "FEED_ITEM_CONTENT_FIELD")
def get_site_url(self):
site = Site.objects.get(is_default_site=True)
return site.root_url
def items(self):
return feed_model.objects.order_by('-date').live()
def item_pubdate(self, item):
return datetime.combine(item.date, time())
def item_title(self, item):
return item.title
def item_description(self, item):
content = strip_tags(getattr(item, self.item_description_field))
return content
def item_link(self, item):
return item.full_url
def item_author_name(self, item):
return u'Jonh Blog'
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the 'add_item' call of the feed generator.
Add the fields of the item, to be used by the custom feed generator.
"""
feed_image = item.feed_image
if feed_image:
filter, _ = Filter.objects.get_or_create(spec='width-1200')
img = feed_image.get_rendition(filter)
image_complete_url = urljoin(self.get_site_url(), img.url)
content = expand_db_html(getattr(item, self.item_content_field))
soup = BeautifulSoup(content, 'html.parser')
for img_tag in soup.findAll('img'):
img_tag['src'] = urljoin(self.get_site_url(), img_tag['src'])
return {
'image': image_complete_url if feed_image else "",
'content': soup.prettify(formatter="html")
}
|
{
"content_hash": "9bd1b5ab61d3b0705050906d0103e027",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 78,
"avg_line_length": 34.02173913043478,
"alnum_prop": 0.6585729499467519,
"repo_name": "DonaldTrumpHasTinyHands/tiny_hands_pac",
"id": "b75f0fb748a96251739cd3de2c2feaa1eba90f42",
"size": "4728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feeds/feeds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1397831"
},
{
"name": "HTML",
"bytes": "98379"
},
{
"name": "JavaScript",
"bytes": "3579725"
},
{
"name": "Makefile",
"bytes": "5052"
},
{
"name": "Python",
"bytes": "210664"
},
{
"name": "Ruby",
"bytes": "527"
},
{
"name": "Shell",
"bytes": "2381"
}
],
"symlink_target": ""
}
|
import os
import shutil
import textwrap
from ..util.compat import u, has_pep3147, get_current_bytecode_suffixes
from ..script import Script, ScriptDirectory
from .. import util
from . import engines
from . import provision
def _get_staging_directory():
if provision.FOLLOWER_IDENT:
return "scratch_%s" % provision.FOLLOWER_IDENT
else:
return 'scratch'
def staging_env(create=True, template="generic", sourceless=False):
from alembic import command, script
cfg = _testing_config()
if create:
path = os.path.join(_get_staging_directory(), 'scripts')
if os.path.exists(path):
shutil.rmtree(path)
command.init(cfg, path, template=template)
if sourceless:
try:
# do an import so that a .pyc/.pyo is generated.
util.load_python_file(path, 'env.py')
except AttributeError:
# we don't have the migration context set up yet
# so running the .env py throws this exception.
# theoretically we could be using py_compiler here to
# generate .pyc/.pyo without importing but not really
# worth it.
pass
assert sourceless in (
"pep3147_envonly", "simple", "pep3147_everything"), sourceless
make_sourceless(
os.path.join(path, "env.py"),
"pep3147" if "pep3147" in sourceless else "simple"
)
sc = script.ScriptDirectory.from_config(cfg)
return sc
def clear_staging_env():
shutil.rmtree(_get_staging_directory(), True)
def script_file_fixture(txt):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
path = os.path.join(dir_, "script.py.mako")
with open(path, 'w') as f:
f.write(txt)
def env_file_fixture(txt):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
txt = """
from alembic import context
config = context.config
""" + txt
path = os.path.join(dir_, "env.py")
pyc_path = util.pyc_file_from_path(path)
if pyc_path:
os.unlink(pyc_path)
with open(path, 'w') as f:
f.write(txt)
def _sqlite_file_db(tempname="foo.db"):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
url = "sqlite:///%s/%s" % (dir_, tempname)
return engines.testing_engine(url=url)
def _sqlite_testing_config(sourceless=False):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
url = "sqlite:///%s/foo.db" % dir_
return _write_config_file("""
[alembic]
script_location = %s
sqlalchemy.url = %s
sourceless = %s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, url, "true" if sourceless else "false"))
def _multi_dir_testing_config(sourceless=False, extra_version_location=''):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
url = "sqlite:///%s/foo.db" % dir_
return _write_config_file("""
[alembic]
script_location = %s
sqlalchemy.url = %s
sourceless = %s
version_locations = %%(here)s/model1/ %%(here)s/model2/ %%(here)s/model3/ %s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, url, "true" if sourceless else "false",
extra_version_location))
def _no_sql_testing_config(dialect="postgresql", directives=""):
"""use a postgresql url with no host so that
connections guaranteed to fail"""
dir_ = os.path.join(_get_staging_directory(), 'scripts')
return _write_config_file("""
[alembic]
script_location = %s
sqlalchemy.url = %s://
%s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, dialect, directives))
def _write_config_file(text):
cfg = _testing_config()
with open(cfg.config_file_name, 'w') as f:
f.write(text)
return cfg
def _testing_config():
from alembic.config import Config
if not os.access(_get_staging_directory(), os.F_OK):
os.mkdir(_get_staging_directory())
return Config(os.path.join(_get_staging_directory(), 'test_alembic.ini'))
def write_script(
scriptdir, rev_id, content, encoding='ascii', sourceless=False):
old = scriptdir.revision_map.get_revision(rev_id)
path = old.path
content = textwrap.dedent(content)
if encoding:
content = content.encode(encoding)
with open(path, 'wb') as fp:
fp.write(content)
pyc_path = util.pyc_file_from_path(path)
if pyc_path:
os.unlink(pyc_path)
script = Script._from_path(scriptdir, path)
old = scriptdir.revision_map.get_revision(script.revision)
if old.down_revision != script.down_revision:
raise Exception("Can't change down_revision "
"on a refresh operation.")
scriptdir.revision_map.add_revision(script, _replace=True)
if sourceless:
make_sourceless(
path,
"pep3147" if sourceless == "pep3147_everything" else "simple"
)
def make_sourceless(path, style):
import py_compile
py_compile.compile(path)
if style == "simple" and has_pep3147():
pyc_path = util.pyc_file_from_path(path)
suffix = get_current_bytecode_suffixes()[0]
filepath, ext = os.path.splitext(path)
simple_pyc_path = filepath + suffix
shutil.move(pyc_path, simple_pyc_path)
pyc_path = simple_pyc_path
elif style == "pep3147" and not has_pep3147():
raise NotImplementedError()
else:
assert style in ("pep3147", "simple")
pyc_path = util.pyc_file_from_path(path)
assert os.access(pyc_path, os.F_OK)
os.unlink(path)
def three_rev_fixture(cfg):
a = util.rev_id()
b = util.rev_id()
c = util.rev_id()
script = ScriptDirectory.from_config(cfg)
script.generate_revision(a, "revision a", refresh=True)
write_script(script, a, """\
"Rev A"
revision = '%s'
down_revision = None
from alembic import op
def upgrade():
op.execute("CREATE STEP 1")
def downgrade():
op.execute("DROP STEP 1")
""" % a)
script.generate_revision(b, "revision b", refresh=True)
write_script(script, b, u("""# coding: utf-8
"Rev B, méil"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 2")
def downgrade():
op.execute("DROP STEP 2")
""") % (b, a), encoding="utf-8")
script.generate_revision(c, "revision c", refresh=True)
write_script(script, c, """\
"Rev C"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 3")
def downgrade():
op.execute("DROP STEP 3")
""" % (c, b))
return a, b, c
def _multidb_testing_config(engines):
"""alembic.ini fixture to work exactly with the 'multidb' template"""
dir_ = os.path.join(_get_staging_directory(), 'scripts')
databases = ", ".join(
engines.keys()
)
engines = "\n\n".join(
"[%s]\n"
"sqlalchemy.url = %s" % (key, value.url)
for key, value in engines.items()
)
return _write_config_file("""
[alembic]
script_location = %s
sourceless = false
databases = %s
%s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, databases, engines)
)
|
{
"content_hash": "15f3991cc716da4e4d3620dc184fb916",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 78,
"avg_line_length": 22.614754098360656,
"alnum_prop": 0.6266763320043494,
"repo_name": "Widiot/simpleblog",
"id": "7e328fda306c0b8f913d499b7b714861c93fcd9a",
"size": "8295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python3.5/site-packages/alembic/testing/env.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "HTML",
"bytes": "69273"
},
{
"name": "JavaScript",
"bytes": "6748"
},
{
"name": "Mako",
"bytes": "10018"
},
{
"name": "Python",
"bytes": "10622849"
},
{
"name": "Shell",
"bytes": "3300"
}
],
"symlink_target": ""
}
|
from collections import Sequence # noqa
import logging
from django.conf import settings
from horizon import exceptions
import six
__all__ = ('APIResourceWrapper', 'APIDictWrapper',
'get_service_from_catalog', 'url_for',)
LOG = logging.getLogger(__name__)
class APIVersionManager(object):
"""Object to store and manage API versioning data and utility methods."""
SETTINGS_KEY = "OPENSTACK_API_VERSIONS"
def __init__(self, service_type, preferred_version=None):
self.service_type = service_type
self.preferred = preferred_version
self._active = None
self.supported = {}
# As a convenience, we can drop in a placeholder for APIs that we
# have not yet needed to version. This is useful, for example, when
# panels such as the admin metadata_defs wants to check the active
# version even though it's not explicitly defined. Previously
# this caused a KeyError.
if self.preferred:
self.supported[self.preferred] = {"version": self.preferred}
@property
def active(self):
if self._active is None:
self.get_active_version()
return self._active
def load_supported_version(self, version, data):
self.supported[version] = data
def get_active_version(self):
if self._active is not None:
return self.supported[self._active]
key = getattr(settings, self.SETTINGS_KEY, {}).get(self.service_type)
if key is None:
# TODO(gabriel): support API version discovery here; we'll leave
# the setting in as a way of overriding the latest available
# version.
key = self.preferred
# Since we do a key lookup in the supported dict the type matters,
# let's ensure people know if they use a string when the key isn't.
if isinstance(key, six.string_types):
msg = ('The version "%s" specified for the %s service should be '
'either an integer or a float, not a string.' %
(key, self.service_type))
raise exceptions.ConfigurationError(msg)
# Provide a helpful error message if the specified version isn't in the
# supported list.
if key not in self.supported:
choices = ", ".join(str(k) for k in six.iterkeys(self.supported))
msg = ('%s is not a supported API version for the %s service, '
' choices are: %s' % (key, self.service_type, choices))
raise exceptions.ConfigurationError(msg)
self._active = key
return self.supported[self._active]
def clear_active_cache(self):
self._active = None
class APIResourceWrapper(object):
"""Simple wrapper for api objects.
Define _attrs on the child class and pass in the
api object as the only argument to the constructor
"""
_attrs = []
_apiresource = None # Make sure _apiresource is there even in __init__.
def __init__(self, apiresource):
self._apiresource = apiresource
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._attrs:
raise
# __getattr__ won't find properties
return getattr(self._apiresource, attr)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
dict((attr, getattr(self, attr))
for attr in self._attrs
if hasattr(self, attr)))
def to_dict(self):
obj = {}
for key in self._attrs:
obj[key] = getattr(self._apiresource, key, None)
return obj
class APIDictWrapper(object):
"""Simple wrapper for api dictionaries
Some api calls return dictionaries. This class provides identical
behavior as APIResourceWrapper, except that it will also behave as a
dictionary, in addition to attribute accesses.
Attribute access is the preferred method of access, to be
consistent with api resource objects from novaclient.
"""
_apidict = {} # Make sure _apidict is there even in __init__.
def __init__(self, apidict):
self._apidict = apidict
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._apidict:
raise
return self._apidict[attr]
def __getitem__(self, item):
try:
return getattr(self, item)
except (AttributeError, TypeError) as e:
# caller is expecting a KeyError
raise KeyError(e)
def __contains__(self, item):
try:
return hasattr(self, item)
except TypeError:
return False
def get(self, item, default=None):
try:
return getattr(self, item)
except (AttributeError, TypeError):
return default
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._apidict)
def __cmp__(self, other):
if hasattr(other, '_apidict'):
return cmp(self._apidict, other._apidict)
return cmp(self._apidict, other)
def to_dict(self):
return self._apidict
class Quota(object):
"""Wrapper for individual limits in a quota."""
def __init__(self, name, limit):
self.name = name
self.limit = limit
def __repr__(self):
return "<Quota: (%s, %s)>" % (self.name, self.limit)
class QuotaSet(Sequence):
"""Wrapper for client QuotaSet objects which turns the individual quotas
into Quota objects for easier handling/iteration.
`QuotaSet` objects support a mix of `list` and `dict` methods; you can use
the bracket notation (`qs["my_quota"] = 0`) to add new quota values, and
use the `get` method to retrieve a specific quota, but otherwise it
behaves much like a list or tuple, particularly in supporting iteration.
"""
def __init__(self, apiresource=None):
self.items = []
if apiresource:
if hasattr(apiresource, '_info'):
items = apiresource._info.items()
else:
items = apiresource.items()
for k, v in items:
if k == 'id':
continue
self[k] = v
def __setitem__(self, k, v):
v = int(v) if v is not None else v
q = Quota(k, v)
self.items.append(q)
def __getitem__(self, index):
return self.items[index]
def __add__(self, other):
"""Merge another QuotaSet into this one. Existing quotas are
not overridden.
"""
if not isinstance(other, QuotaSet):
msg = "Can only add QuotaSet to QuotaSet, " \
"but received %s instead" % type(other)
raise ValueError(msg)
for item in other:
if self.get(item.name).limit is None:
self.items.append(item)
return self
def __len__(self):
return len(self.items)
def __repr__(self):
return repr(self.items)
def get(self, key, default=None):
match = [quota for quota in self.items if quota.name == key]
return match.pop() if len(match) else Quota(key, default)
def add(self, other):
return self.__add__(other)
def get_service_from_catalog(catalog, service_type):
if catalog:
for service in catalog:
if 'type' not in service:
continue
if service['type'] == service_type:
return service
return None
def get_version_from_service(service):
if service and service.get('endpoints'):
endpoint = service['endpoints'][0]
if 'interface' in endpoint:
return 3
else:
return 2.0
return 2.0
# Mapping of V2 Catalog Endpoint_type to V3 Catalog Interfaces
ENDPOINT_TYPE_TO_INTERFACE = {
'publicURL': 'public',
'internalURL': 'internal',
'adminURL': 'admin',
}
def get_url_for_service(service, region, endpoint_type):
if 'type' not in service:
return None
identity_version = get_version_from_service(service)
service_endpoints = service.get('endpoints', [])
available_endpoints = [endpoint for endpoint in service_endpoints
if region == _get_endpoint_region(endpoint)]
"""if we are dealing with the identity service and there is no endpoint
in the current region, it is okay to use the first endpoint for any
identity service endpoints and we can assume that it is global
"""
if service['type'] == 'identity' and not available_endpoints:
available_endpoints = [endpoint for endpoint in service_endpoints]
for endpoint in available_endpoints:
try:
if identity_version < 3:
return endpoint.get(endpoint_type)
else:
interface = \
ENDPOINT_TYPE_TO_INTERFACE.get(endpoint_type, '')
if endpoint.get('interface') == interface:
return endpoint.get('url')
except (IndexError, KeyError):
"""it could be that the current endpoint just doesn't match the
type, continue trying the next one
"""
pass
return None
def url_for(request, service_type, endpoint_type=None, region=None):
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'publicURL')
fallback_endpoint_type = getattr(settings, 'SECONDARY_ENDPOINT_TYPE', None)
catalog = request.user.service_catalog
service = get_service_from_catalog(catalog, service_type)
if service:
if not region:
region = request.user.services_region
url = get_url_for_service(service,
region,
endpoint_type)
if not url and fallback_endpoint_type:
url = get_url_for_service(service,
region,
fallback_endpoint_type)
if url:
return url
raise exceptions.ServiceCatalogException(service_type)
def is_service_enabled(request, service_type):
service = get_service_from_catalog(request.user.service_catalog,
service_type)
if service:
region = request.user.services_region
for endpoint in service.get('endpoints', []):
if 'type' not in service:
continue
# ignore region for identity
if service['type'] == 'identity' or \
_get_endpoint_region(endpoint) == region:
return True
return False
def _get_endpoint_region(endpoint):
"""Common function for getting the region from endpoint.
In Keystone V3, region has been deprecated in favor of
region_id.
This method provides a way to get region that works for
both Keystone V2 and V3.
"""
return endpoint.get('region_id') or endpoint.get('region')
|
{
"content_hash": "19903e8d79e6c240b43f783174bbe7ea",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 79,
"avg_line_length": 33.43952802359882,
"alnum_prop": 0.5845977417078334,
"repo_name": "melon-li/openstack-dashboard",
"id": "d08b016aae430f5760325b84518ffa12971c1e52",
"size": "12100",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "openstack_dashboard/api/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2107919"
},
{
"name": "HTML",
"bytes": "519704"
},
{
"name": "JavaScript",
"bytes": "980733"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4960498"
},
{
"name": "Shell",
"bytes": "18658"
}
],
"symlink_target": ""
}
|
from moto.core import BaseBackend
import boto.logs
from moto.core.utils import unix_time_millis
from .exceptions import (
ResourceNotFoundException,
ResourceAlreadyExistsException
)
class LogEvent:
_event_id = 0
def __init__(self, ingestion_time, log_event):
self.ingestionTime = ingestion_time
self.timestamp = log_event["timestamp"]
self.message = log_event['message']
self.eventId = self.__class__._event_id
self.__class__._event_id += 1
def to_filter_dict(self):
return {
"eventId": self.eventId,
"ingestionTime": self.ingestionTime,
# "logStreamName":
"message": self.message,
"timestamp": self.timestamp
}
def to_response_dict(self):
return {
"ingestionTime": self.ingestionTime,
"message": self.message,
"timestamp": self.timestamp
}
class LogStream:
_log_ids = 0
def __init__(self, region, log_group, name):
self.region = region
self.arn = "arn:aws:logs:{region}:{id}:log-group:{log_group}:log-stream:{log_stream}".format(
region=region, id=self.__class__._log_ids, log_group=log_group, log_stream=name)
self.creationTime = unix_time_millis()
self.firstEventTimestamp = None
self.lastEventTimestamp = None
self.lastIngestionTime = None
self.logStreamName = name
self.storedBytes = 0
self.uploadSequenceToken = 0 # I'm guessing this is token needed for sequenceToken by put_events
self.events = []
self.__class__._log_ids += 1
def _update(self):
# events can be empty when stream is described soon after creation
self.firstEventTimestamp = min([x.timestamp for x in self.events]) if self.events else None
self.lastEventTimestamp = max([x.timestamp for x in self.events]) if self.events else None
def to_describe_dict(self):
# Compute start and end times
self._update()
res = {
"arn": self.arn,
"creationTime": self.creationTime,
"logStreamName": self.logStreamName,
"storedBytes": self.storedBytes,
}
if self.events:
rest = {
"firstEventTimestamp": self.firstEventTimestamp,
"lastEventTimestamp": self.lastEventTimestamp,
"lastIngestionTime": self.lastIngestionTime,
"uploadSequenceToken": str(self.uploadSequenceToken),
}
res.update(rest)
return res
def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token):
# TODO: ensure sequence_token
# TODO: to be thread safe this would need a lock
self.lastIngestionTime = unix_time_millis()
# TODO: make this match AWS if possible
self.storedBytes += sum([len(log_event["message"]) for log_event in log_events])
self.events += [LogEvent(self.lastIngestionTime, log_event) for log_event in log_events]
self.uploadSequenceToken += 1
return '{:056d}'.format(self.uploadSequenceToken)
def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head):
def filter_func(event):
if start_time and event.timestamp < start_time:
return False
if end_time and event.timestamp > end_time:
return False
return True
events = sorted(filter(filter_func, self.events), key=lambda event: event.timestamp, reverse=start_from_head)
back_token = next_token
if next_token is None:
next_token = 0
events_page = [event.to_response_dict() for event in events[next_token: next_token + limit]]
next_token += limit
if next_token >= len(self.events):
next_token = None
return events_page, back_token, next_token
def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved):
def filter_func(event):
if start_time and event.timestamp < start_time:
return False
if end_time and event.timestamp > end_time:
return False
return True
events = []
for event in sorted(filter(filter_func, self.events), key=lambda x: x.timestamp):
event_obj = event.to_filter_dict()
event_obj['logStreamName'] = self.logStreamName
events.append(event_obj)
return events
class LogGroup:
def __init__(self, region, name, tags):
self.name = name
self.region = region
self.arn = "arn:aws:logs:{region}:1:log-group:{log_group}".format(
region=region, log_group=name)
self.creationTime = unix_time_millis()
self.tags = tags
self.streams = dict() # {name: LogStream}
def create_log_stream(self, log_stream_name):
if log_stream_name in self.streams:
raise ResourceAlreadyExistsException()
self.streams[log_stream_name] = LogStream(self.region, self.name, log_stream_name)
def delete_log_stream(self, log_stream_name):
if log_stream_name not in self.streams:
raise ResourceNotFoundException()
del self.streams[log_stream_name]
def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by):
# responses only logStreamName, creationTime, arn, storedBytes when no events are stored.
log_streams = [(name, stream.to_describe_dict()) for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)]
def sorter(item):
return item[0] if order_by == 'logStreamName' else item[1].get('lastEventTimestamp', 0)
if next_token is None:
next_token = 0
log_streams = sorted(log_streams, key=sorter, reverse=descending)
new_token = next_token + limit
log_streams_page = [x[1] for x in log_streams[next_token: new_token]]
if new_token >= len(log_streams):
new_token = None
return log_streams_page, new_token
def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token):
if log_stream_name not in self.streams:
raise ResourceNotFoundException()
stream = self.streams[log_stream_name]
return stream.put_log_events(log_group_name, log_stream_name, log_events, sequence_token)
def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head):
if log_stream_name not in self.streams:
raise ResourceNotFoundException()
stream = self.streams[log_stream_name]
return stream.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head)
def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved):
streams = [stream for name, stream in self.streams.items() if not log_stream_names or name in log_stream_names]
events = []
for stream in streams:
events += stream.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved)
if interleaved:
events = sorted(events, key=lambda event: event['timestamp'])
if next_token is None:
next_token = 0
events_page = events[next_token: next_token + limit]
next_token += limit
if next_token >= len(events):
next_token = None
searched_streams = [{"logStreamName": stream.logStreamName, "searchedCompletely": True} for stream in streams]
return events_page, next_token, searched_streams
def to_describe_dict(self):
return {
"arn": self.arn,
"creationTime": self.creationTime,
"logGroupName": self.name,
"metricFilterCount": 0,
"retentionInDays": 30,
"storedBytes": sum(s.storedBytes for s in self.streams.values()),
}
class LogsBackend(BaseBackend):
def __init__(self, region_name):
self.region_name = region_name
self.groups = dict() # { logGroupName: LogGroup}
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_log_group(self, log_group_name, tags):
if log_group_name in self.groups:
raise ResourceAlreadyExistsException()
self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags)
def ensure_log_group(self, log_group_name, tags):
if log_group_name in self.groups:
return
self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags)
def delete_log_group(self, log_group_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
del self.groups[log_group_name]
def describe_log_groups(self, limit, log_group_name_prefix, next_token):
if log_group_name_prefix is None:
log_group_name_prefix = ''
if next_token is None:
next_token = 0
groups = sorted(group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix))
groups_page = groups[next_token:next_token + limit]
next_token += limit
if next_token >= len(groups):
next_token = None
return groups_page, next_token
def create_log_stream(self, log_group_name, log_stream_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.create_log_stream(log_stream_name)
def delete_log_stream(self, log_group_name, log_stream_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.delete_log_stream(log_stream_name)
def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.describe_log_streams(descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by)
def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token):
# TODO: add support for sequence_tokens
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.put_log_events(log_group_name, log_stream_name, log_events, sequence_token)
def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head)
def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved)
logs_backends = {region.name: LogsBackend(region.name) for region in boto.logs.regions()}
|
{
"content_hash": "4d35bfe359e671a7941c5c09da616a20",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 150,
"avg_line_length": 40.86986301369863,
"alnum_prop": 0.6344896933132227,
"repo_name": "botify-labs/moto",
"id": "a4ff9db46114fb53fa9694f1b7058d32e555b230",
"size": "11934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/logs/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1143"
},
{
"name": "Python",
"bytes": "4578457"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
}
|
from .mcpp_lib import *
|
{
"content_hash": "5a2e8c3a4e71d92b6441c51b517c0e10",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 23,
"avg_line_length": 23,
"alnum_prop": 0.7391304347826086,
"repo_name": "filonik/clibs",
"id": "c64b6d4e039ded4eaf38c81f4d7fad092ca869cd",
"size": "23",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clibs/mcpp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "194397"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_snapshot_group
short_description: NetApp E-Series manage snapshot groups
description:
- Create, update, delete snapshot groups for NetApp E-series storage arrays
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
state:
description:
- Whether to ensure the group is present or absent.
required: True
choices:
- present
- absent
name:
description:
- The name to give the snapshot group
required: True
base_volume_name:
description:
- The name of the base volume or thin volume to use as the base for the new snapshot group.
- If a snapshot group with an identical C(name) already exists but with a different base volume
an error will be returned.
required: True
repo_pct:
description:
- The size of the repository in relation to the size of the base volume
required: False
default: 20
warning_threshold:
description:
- The repository utilization warning threshold, as a percentage of the repository volume capacity.
required: False
default: 80
delete_limit:
description:
- The automatic deletion indicator.
- If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of
snapshot images limited to the number specified.
- This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group.
required: False
default: 30
full_policy:
description:
- The behavior on when the data repository becomes full.
- This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
required: False
default: purgepit
choices:
- purgepit
- unknown
- failbasewrites
- __UNDEFINED
storage_pool_name:
required: True
description:
- The name of the storage pool on which to allocate the repository volume.
rollback_priority:
required: False
description:
- The importance of the rollback operation.
- This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
choices:
- highest
- high
- medium
- low
- lowest
- __UNDEFINED
default: medium
"""
EXAMPLES = """
- name: Configure Snapshot group
netapp_e_snapshot_group:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
base_volume_name: SSGroup_test
name=: OOSS_Group
repo_pct: 20
warning_threshold: 85
delete_limit: 30
full_policy: purgepit
storage_pool_name: Disk_Pool_1
rollback_priority: medium
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
sample: json facts for newly created snapshot group.
"""
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class SnapshotGroup(object):
def __init__(self):
argument_spec = basic_auth_argument_spec()
argument_spec.update(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
state=dict(required=True, choices=['present', 'absent']),
base_volume_name=dict(required=True),
name=dict(required=True),
repo_pct=dict(default=20, type='int'),
warning_threshold=dict(default=80, type='int'),
delete_limit=dict(default=30, type='int'),
full_policy=dict(default='purgepit', choices=['unknown', 'failbasewrites', 'purgepit']),
rollback_priority=dict(default='medium', choices=['highest', 'high', 'medium', 'low', 'lowest']),
storage_pool_name=dict(type='str'),
ssid=dict(required=True),
)
self.module = AnsibleModule(argument_spec=argument_spec)
self.post_data = dict()
self.warning_threshold = self.module.params['warning_threshold']
self.base_volume_name = self.module.params['base_volume_name']
self.name = self.module.params['name']
self.repo_pct = self.module.params['repo_pct']
self.delete_limit = self.module.params['delete_limit']
self.full_policy = self.module.params['full_policy']
self.rollback_priority = self.module.params['rollback_priority']
self.storage_pool_name = self.module.params['storage_pool_name']
self.state = self.module.params['state']
self.url = self.module.params['api_url']
self.user = self.module.params['api_username']
self.pwd = self.module.params['api_password']
self.certs = self.module.params['validate_certs']
self.ssid = self.module.params['ssid']
if not self.url.endswith('/'):
self.url += '/'
self.changed = False
@property
def pool_id(self):
pools = 'storage-systems/%s/storage-pools' % self.ssid
url = self.url + pools
try:
(rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd)
except Exception as err:
self.module.fail_json(msg="Snapshot group module - Failed to fetch storage pools. " +
"Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
for pool in data:
if pool['name'] == self.storage_pool_name:
self.pool_data = pool
return pool['id']
self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
@property
def volume_id(self):
volumes = 'storage-systems/%s/volumes' % self.ssid
url = self.url + volumes
try:
rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Snapshot group module - Failed to fetch volumes. " +
"Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
qty = 0
for volume in data:
if volume['name'] == self.base_volume_name:
qty += 1
if qty > 1:
self.module.fail_json(msg="More than one volume with the name: %s was found, "
"please ensure your volume has a unique name" % self.base_volume_name)
else:
Id = volume['id']
self.volume = volume
try:
return Id
except NameError:
self.module.fail_json(msg="No volume with the name: %s, was found" % self.base_volume_name)
@property
def snapshot_group_id(self):
url = self.url + 'storage-systems/%s/snapshot-groups' % self.ssid
try:
rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Failed to fetch snapshot groups. " +
"Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
for ssg in data:
if ssg['name'] == self.name:
self.ssg_data = ssg
return ssg['id']
return None
@property
def ssg_needs_update(self):
if self.ssg_data['fullWarnThreshold'] != self.warning_threshold or \
self.ssg_data['autoDeleteLimit'] != self.delete_limit or \
self.ssg_data['repFullPolicy'] != self.full_policy or \
self.ssg_data['rollbackPriority'] != self.rollback_priority:
return True
else:
return False
def create_snapshot_group(self):
self.post_data = dict(
baseMappableObjectId=self.volume_id,
name=self.name,
repositoryPercentage=self.repo_pct,
warningThreshold=self.warning_threshold,
autoDeleteLimit=self.delete_limit,
fullPolicy=self.full_policy,
storagePoolId=self.pool_id,
)
snapshot = 'storage-systems/%s/snapshot-groups' % self.ssid
url = self.url + snapshot
try:
rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Failed to create snapshot group. " +
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
self.ssid,
to_native(err)))
if not self.snapshot_group_id:
self.snapshot_group_id = self.ssg_data['id']
if self.ssg_needs_update:
self.update_ssg()
else:
self.module.exit_json(changed=True, **self.ssg_data)
def update_ssg(self):
self.post_data = dict(
warningThreshold=self.warning_threshold,
autoDeleteLimit=self.delete_limit,
fullPolicy=self.full_policy,
rollbackPriority=self.rollback_priority
)
url = self.url + "storage-systems/%s/snapshot-groups/%s" % (self.ssid, self.snapshot_group_id)
try:
rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Failed to update snapshot group. " +
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
self.ssid,
to_native(err)))
def apply(self):
if self.state == 'absent':
if self.snapshot_group_id:
try:
rc, resp = request(
self.url + 'storage-systems/%s/snapshot-groups/%s' % (self.ssid, self.snapshot_group_id),
method='DELETE', headers=HEADERS, url_password=self.pwd, url_username=self.user,
validate_certs=self.certs)
except Exception as err:
self.module.fail_json(msg="Failed to delete snapshot group. " +
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
self.ssid,
to_native(err)))
self.module.exit_json(changed=True, msg="Snapshot group removed", **self.ssg_data)
else:
self.module.exit_json(changed=False, msg="Snapshot group absent")
elif self.snapshot_group_id:
if self.ssg_needs_update:
self.update_ssg()
self.module.exit_json(changed=True, **self.ssg_data)
else:
self.module.exit_json(changed=False, **self.ssg_data)
else:
self.create_snapshot_group()
def main():
vg = SnapshotGroup()
vg.apply()
if __name__ == '__main__':
main()
|
{
"content_hash": "c0fa840c8040667706a780ff26f143d7",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 145,
"avg_line_length": 39.69315068493151,
"alnum_prop": 0.5584621755935947,
"repo_name": "SergeyCherepanov/ansible",
"id": "0b678fe818c09758bda0a479e8e43d6b1045fe35",
"size": "14625",
"binary": false,
"copies": "51",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/storage/netapp/netapp_e_snapshot_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
from project import db
import datetime
class Movie(db.Document):
created = db.DateTimeField(default=datetime.datetime.now, required=True)
title = db.StringField(max_length=255, required=True)
summary = db.StringField(max_length=10000, required=True)
tags = db.ListField(db.StringField(max_length=50))
cast = db.ListField(db.StringField())
director = db.StringField()
tmdb_id = db.IntField()
runtime = db.IntField()
poster = db.StringField()
popularity = db.FloatField()
def addTag(self,tag):
if tag not in self.tags:
self.tags.append(tag)
return self
def removeTag(self,tag):
if tag in self.tags:
self.tags.remove(tag)
return self
def getLoan(self,user):
from Loan import Loan
loan = Loan.objects(movie=self,user=user).first()
return loan
def __str__(self):
return self.title
def __repr__(self):
return self.__str__()
def toJSON(self):
import json
return json.dumps({'created': self.created.isoformat(), 'title': self.title, 'summary': self.summary, 'tags': str(self.tags), 'id':str(self.id)})
@staticmethod
def convertMovie(movie):
result = Movie()
result.tmdb_id = int(movie.id)
result.title = str(movie.title)
result.summary = str(movie.overview.encode('utf-8'))
if movie.poster:
sizes = movie.poster.sizes()
if len(sizes) > 0:
medium = int(len(sizes)/2)
result.poster = str(movie.poster.geturl(sizes[medium]))
result.popularity = float(movie.userrating)
result.runtime = int(movie.runtime)
tags = movie.keywords
for tag in tags:
result.addTag(str(tag.name.encode('utf-8')))
genres = movie.genres
for genre in genres:
result.addTag(str(genre.name.encode('utf-8')))
cast = movie.cast
for actor in cast:
result.cast.append("%s:%s" % (actor.name,actor.character))
crew = movie.crew
for person in crew:
job = person.job.encode('utf-8')
if 'director' == job.lower():
result.director = person.name.encode('utf-8')
result.save()
return result
|
{
"content_hash": "816d1151317aa3ccbf42a4deb0ad17f2",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 147,
"avg_line_length": 28.420289855072465,
"alnum_prop": 0.6925038245792963,
"repo_name": "ndbills/MyMovieLibrary",
"id": "08ab6b4bcf6271c8afb9c8d9e038e1e9f71987ac",
"size": "1961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/model/Movie.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1543"
},
{
"name": "JavaScript",
"bytes": "21284"
},
{
"name": "Python",
"bytes": "24366"
}
],
"symlink_target": ""
}
|
import time
import copy
import numpy as np
from .. import pick_channels
from ..utils import logger, verbose
from ..epochs import _BaseEpochs
from ..event import _find_events
from ..io.proj import setup_proj
class RtEpochs(_BaseEpochs):
"""Realtime Epochs
Can receive epochs in real time from an RtClient.
For example, to get some epochs from a running mne_rt_server on
'localhost', you could use::
client = mne.realtime.RtClient('localhost')
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.realtime.RtEpochs(client, event_id, tmin, tmax)
epochs.start() # start the measurement and start receiving epochs
evoked_1 = epochs.average() # computed over all epochs
evoked_2 = epochs[-5:].average() # computed over the last 5 epochs
Parameters
----------
client : instance of mne.realtime.RtClient
The realtime client.
event_id : int | list of int
The id of the event to consider. If int, only events with the
ID specified by event_id are considered. Multiple event ID's
can be specified using a list.
tmin : float
Start time before event.
tmax : float
End time after event.
stim_channel : string or list of string
Name of the stim channel or all the stim channels affected by
the trigger.
sleep_time : float
Time in seconds to wait between checking for new epochs when epochs
are requested and the receive queue is empty.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels are used).
name : string
Comment that describes the Evoked data created.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6 # uV (EOG channels))
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
proj : bool, optional
Apply SSP projection vectors
decim : int
Factor by which to downsample the data from the raw file upon import.
Warning: This simply selects every nth sample, data is not filtered
here. If data is not properly filtered, aliasing artifacts may occur.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
detrend : int | None
If 0 or 1, the data channels (MEG and EEG) will be detrended when
loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
is no detrending. Note that detrending is performed before baseline
correction. If no DC offset is preferred (zeroth order detrending),
either turn off baseline correction, as this may introduce a DC
shift, or set baseline correction to use the entire time interval
(will yield equivalent results but be slower).
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
isi_max : float
The maximmum time in seconds between epochs. If no epoch
arrives in the next isi_max seconds the RtEpochs stops.
find_events : dict
The arguments to the real-time `find_events` method as a dictionary.
If `find_events` is None, then default values are used.
Valid keys are 'output' | 'consecutive' | 'min_duration' | 'mask'.
Example (also default values)::
find_events = dict(output='onset', consecutive='increasing',
min_duration=0, mask=0)
See mne.find_events for detailed explanation of these options.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to client.verbose.
Attributes
----------
info : dict
Measurement info.
event_id : dict
Names of of conditions corresponding to event_ids.
ch_names : list of string
List of channels' names.
events : array, shape (n_events, 3)
The events associated with the epochs currently in the queue.
verbose : bool, str, int, or None
See above.
"""
@verbose
def __init__(self, client, event_id, tmin, tmax, stim_channel='STI 014',
sleep_time=0.1, baseline=(None, 0), picks=None,
name='Unknown', reject=None, flat=None, proj=True,
decim=1, reject_tmin=None, reject_tmax=None, detrend=None,
add_eeg_ref=True, isi_max=2., find_events=None, verbose=None):
info = client.get_measurement_info()
# the measurement info of the data as we receive it
self._client_info = copy.deepcopy(info)
verbose = client.verbose if verbose is None else verbose
# call _BaseEpochs constructor
super(RtEpochs, self).__init__(
info, event_id, tmin, tmax, baseline=baseline, picks=picks,
name=name, reject=reject, flat=flat, decim=decim,
reject_tmin=reject_tmin, reject_tmax=reject_tmax, detrend=detrend,
add_eeg_ref=add_eeg_ref, verbose=verbose)
self._projector, self.info = setup_proj(self.info, add_eeg_ref,
activate=proj)
self._client = client
if not isinstance(stim_channel, list):
stim_channel = [stim_channel]
stim_picks = pick_channels(self._client_info['ch_names'],
include=stim_channel, exclude=[])
if len(stim_picks) == 0:
raise ValueError('No stim channel found to extract event '
'triggers.')
self._stim_picks = stim_picks
# find_events default options
self._find_events_kwargs = dict(output='onset',
consecutive='increasing',
min_duration=0, mask=0)
# update default options if dictionary is provided
if find_events is not None:
self._find_events_kwargs.update(find_events)
min_samples = (self._find_events_kwargs['min_duration'] *
self.info['sfreq'])
self._find_events_kwargs.pop('min_duration', None)
self._find_events_kwargs['min_samples'] = min_samples
self._sleep_time = sleep_time
# add calibration factors
cals = np.zeros(self._client_info['nchan'])
for k in range(self._client_info['nchan']):
cals[k] = (self._client_info['chs'][k]['range'] *
self._client_info['chs'][k]['cal'])
self._cals = cals[:, None]
# FIFO queues for received epochs and events
self._epoch_queue = list()
self._events = list()
# variables needed for receiving raw buffers
self._last_buffer = None
self._first_samp = 0
self._event_backlog = list()
# Number of good and bad epochs received
self._n_good = 0
self._n_bad = 0
self._started = False
self._last_time = time.time()
self.isi_max = isi_max
@property
def events(self):
"""The events associated with the epochs currently in the queue."""
return np.array(self._events)
def start(self):
"""Start receiving epochs
The measurement will be started if it has not already been started.
"""
if not self._started:
# register the callback
self._client.register_receive_callback(self._process_raw_buffer)
# start the measurement and the receive thread
nchan = self._client_info['nchan']
self._client.start_receive_thread(nchan)
self._started = True
self._last_time = np.inf # init delay counter. Will stop iters
def stop(self, stop_receive_thread=False, stop_measurement=False):
"""Stop receiving epochs
Parameters
----------
stop_receive_thread : bool
Stop the receive thread. Note: Other RtEpochs instances will also
stop receiving epochs when the receive thread is stopped. The
receive thread will always be stopped if stop_measurement is True.
stop_measurement : bool
Also stop the measurement. Note: Other clients attached to the
server will also stop receiving data.
"""
if self._started:
self._client.unregister_receive_callback(self._process_raw_buffer)
self._started = False
if stop_receive_thread or stop_measurement:
self._client.stop_receive_thread(stop_measurement=stop_measurement)
def next(self, return_event_id=False):
"""To make iteration over epochs easy.
Parameters
----------
return_event_id : bool
If True, return both an epoch and and event_id.
Returns
-------
epoch : instance of Epochs
The epoch.
event_id : int
The event id. Only returned if ``return_event_id`` is ``True``.
"""
first = True
while True:
current_time = time.time()
if current_time > (self._last_time + self.isi_max):
logger.info('Time of %s seconds exceeded.' % self.isi_max)
raise StopIteration
if len(self._epoch_queue) > self._current:
epoch = self._epoch_queue[self._current]
event_id = self._events[self._current][-1]
self._current += 1
self._last_time = current_time
if return_event_id:
return epoch, event_id
else:
return epoch
if self._started:
if first:
logger.info('Waiting for epoch %d' % (self._current + 1))
first = False
time.sleep(self._sleep_time)
else:
raise RuntimeError('Not enough epochs in queue and currently '
'not receiving epochs, cannot get epochs!')
def _get_data_from_disk(self):
"""Return the data for n_epochs epochs"""
epochs = list()
for epoch in self:
epochs.append(epoch)
data = np.array(epochs)
return data
def _process_raw_buffer(self, raw_buffer):
"""Process raw buffer (callback from RtClient)
Note: Do not print log messages during regular use. It will be printed
asynchronously which is annoying when working in an interactive shell.
Parameters
----------
raw_buffer : array of float, shape=(nchan, n_times)
The raw buffer.
"""
verbose = 'ERROR'
sfreq = self.info['sfreq']
n_samp = len(self._raw_times)
# relative start and stop positions in samples
tmin_samp = int(round(sfreq * self.tmin))
tmax_samp = tmin_samp + n_samp
last_samp = self._first_samp + raw_buffer.shape[1] - 1
# apply calibration without inplace modification
raw_buffer = self._cals * raw_buffer
# detect events
data = np.abs(raw_buffer[self._stim_picks]).astype(np.int)
data = np.atleast_2d(data)
buff_events = _find_events(data, self._first_samp, verbose=verbose,
**self._find_events_kwargs)
events = self._event_backlog
for event_id in self.event_id.values():
idx = np.where(buff_events[:, -1] == event_id)[0]
events.extend(zip(list(buff_events[idx, 0]),
list(buff_events[idx, -1])))
events.sort()
event_backlog = list()
for event_samp, event_id in events:
epoch = None
if (event_samp + tmin_samp >= self._first_samp and
event_samp + tmax_samp <= last_samp):
# easy case: whole epoch is in this buffer
start = event_samp + tmin_samp - self._first_samp
stop = event_samp + tmax_samp - self._first_samp
epoch = raw_buffer[:, start:stop]
elif (event_samp + tmin_samp < self._first_samp and
event_samp + tmax_samp <= last_samp):
# have to use some samples from previous buffer
if self._last_buffer is None:
continue
n_last = self._first_samp - (event_samp + tmin_samp)
n_this = n_samp - n_last
epoch = np.c_[self._last_buffer[:, -n_last:],
raw_buffer[:, :n_this]]
elif event_samp + tmax_samp > last_samp:
# we need samples from the future
# we will process this epoch with the next buffer
event_backlog.append((event_samp, event_id))
else:
raise RuntimeError('Unhandled case..')
if epoch is not None:
self._append_epoch_to_queue(epoch, event_samp, event_id)
# set things up for processing of next buffer
self._event_backlog = event_backlog
n_buffer = raw_buffer.shape[1]
if self._last_buffer is None:
self._last_buffer = raw_buffer
self._first_samp = last_samp + 1
elif self._last_buffer.shape[1] <= n_samp + n_buffer:
self._last_buffer = np.c_[self._last_buffer, raw_buffer]
else:
# do not increase size of _last_buffer any further
self._first_samp = self._first_samp + n_buffer
self._last_buffer[:, :-n_buffer] = self._last_buffer[:, n_buffer:]
self._last_buffer[:, -n_buffer:] = raw_buffer
def _append_epoch_to_queue(self, epoch, event_samp, event_id):
"""Append a (raw) epoch to queue
Note: Do not print log messages during regular use. It will be printed
asynchronously which is annyoing when working in an interactive shell.
Parameters
----------
epoch : array of float, shape=(nchan, n_times)
The raw epoch (only calibration has been applied) over all
channels.
event_samp : int
The time in samples when the epoch occurred.
event_id : int
The event ID of the epoch.
"""
# select the channels
epoch = epoch[self.picks, :]
# handle offset
if self._offset is not None:
epoch += self._offset
# apply SSP
if self.proj and self._projector is not None:
epoch = np.dot(self._projector, epoch)
# Detrend, baseline correct, decimate
epoch = self._preprocess(epoch, verbose='ERROR')
# Decide if this is a good epoch
is_good, _ = self._is_good_epoch(epoch, verbose='ERROR')
if is_good:
self._epoch_queue.append(epoch)
self._events.append((event_samp, 0, event_id))
self._n_good += 1
else:
self._n_bad += 1
def __repr__(self):
s = 'good / bad epochs received: %d / %d, epochs in queue: %d, '\
% (self._n_good, self._n_bad, len(self._epoch_queue))
s += ', tmin : %s (s)' % self.tmin
s += ', tmax : %s (s)' % self.tmax
s += ', baseline : %s' % str(self.baseline)
return '<RtEpochs | %s>' % s
|
{
"content_hash": "6efc4f7ece87f5f1e2e1ffe82dc955ff",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 79,
"avg_line_length": 39.266666666666666,
"alnum_prop": 0.5750060635459617,
"repo_name": "Odingod/mne-python",
"id": "acec512083c66aabb2cc91288b7111d134587deb",
"size": "16809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/realtime/epochs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3403"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "3741370"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import orm
from scheduler_service import pg_db
from . import metadata
from .mixin import CRUDMixin
class Task(orm.Model, CRUDMixin):
__tablename__ = 'task'
__metadata__ = metadata
__database__ = pg_db
id = orm.Integer(primary_key=True)
name = orm.String(max_length=32)
interval = orm.Integer(allow_null=True)
start_time = orm.DateTime(default=datetime.utcnow)
cookies = orm.JSON(allow_null=True)
user_id = orm.Integer()
def to_dict(self) -> dict:
return {
"id": self.id,
"name": self.name,
"email": self.interval,
"start_time": self.start_time,
"cookies": self.cookies
}
class URLDetail(orm.Model, CRUDMixin):
__tablename__ = 'url_detail'
__metadata__ = metadata
__database__ = pg_db
id = orm.Integer(primary_key=True)
name = orm.String(max_length=32)
request_url = orm.String(max_length=128)
callback_url = orm.String(max_length=128)
params = orm.JSON()
task_id = orm.Integer()
# class Response(Document):
# time = DateTimeField(default=datetime.now)
# body = JsonField()
|
{
"content_hash": "181c137574a004fd772b431c989db5a1",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 54,
"avg_line_length": 24.081632653061224,
"alnum_prop": 0.6161016949152542,
"repo_name": "moonlitlaputa/scheduler-service",
"id": "8154b5472ef14bdefd2127813b64842d839e2682",
"size": "1180",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scheduler_service/models/task.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Nginx",
"bytes": "2510"
},
{
"name": "Python",
"bytes": "19798"
},
{
"name": "Shell",
"bytes": "209"
}
],
"symlink_target": ""
}
|
"""
Copyright 2016-present Tony Peng
Implementation of the papers "Perceptual Losses for Real-Time Style Transfer and Super-Resolution"
by Justin Johnson, Alexandre Alahi, and Li Fei-Fei and "A Neural Algorithm of Artistic Style"
by Leon Gatys, Alexander S Ecker, and Matthias Bethge.
"""
import nets
import numpy as np
import os
import shutil
import tensorflow as tf
import utils
from random import shuffle
CONTENT_WEIGHT = 5
STYLE_WEIGHT = 85
DENOISE_WEIGHT = 5
LEARNING_RATE = 1e-3
EPOCHS = 2
DEVICE = '/gpu:0'
MODEL_OUTPUT_PATH = 'models/trained/WhiteLine'
MODEL_NAME = 'model'
TRAIN_DATASET_PATH = '/home/ubuntu/dataset/train2014'
VGG_MODEL_PATH = 'models/vgg/imagenet-vgg-verydeep-19.mat'
STYLE_IMAGE_PATH = 'runs/WhiteLine/style.jpg'
CONTENT_IMAGE_SIZE = (256, 256) # (height, width)
STYLE_SCALE = 1.0
MINI_BATCH_SIZE = 16
VALIDATION_IMAGE_PATH = 'runs/WhiteLine/content.jpg'
OUTPUT_PATH = 'runs/WhiteLine'
PREVIEW_ITERATIONS = 50
CHECKPOINT_ITERATIONS = 500
CONTENT_LAYER = 'relu4_2'
# layer: w_l
STYLE_LAYERS = {
'relu1_1': 0.2,
'relu2_1': 0.2,
'relu3_1': 0.2,
'relu4_1': 0.2,
'relu5_1': 0.2,
}
# batch shape is (batch, height, width, channels)
batch_shape = (MINI_BATCH_SIZE, ) + CONTENT_IMAGE_SIZE + (3, )
style_image = utils.read_image(STYLE_IMAGE_PATH,
size=tuple(int(d * STYLE_SCALE) for d in CONTENT_IMAGE_SIZE))
train_data = utils.get_train_data_filepaths(TRAIN_DATASET_PATH)
print("Training dataset loaded: " + str(len(train_data)) + " images.")
validation_image = utils.read_image(VALIDATION_IMAGE_PATH, size=CONTENT_IMAGE_SIZE)
def evaluate_stylzr_output(t, feed_dict=None):
return t.eval(feed_dict=feed_dict)
output_evaluator = evaluate_stylzr_output
# Overrides for Gatys style transfer
# CONTENT_IMAGE_PATH = 'KillianCourt.jpg'
# gatys_content_image = utils.read_image(CONTENT_IMAGE_PATH)
# CONTENT_IMAGE_SIZE = gatys_content_image.shape[:2]
# train_data = np.array([CONTENT_IMAGE_PATH])
# batch_shape = (1, ) + gatys_content_image.shape
# style_image = utils.read_image(STYLE_IMAGE_PATH,
# size=tuple(gatys_content_image.shape[:2]))
#
# def evaluate_gatys_output(t, **kwargs):
# return np.clip(t.eval(), 0, 255).astype(np.uint8)
#
# output_evaluator = evaluate_gatys_output
# End overrides for Gatys style transfer
g = tf.Graph()
with g.as_default(), g.device(DEVICE), tf.Session(
config=tf.ConfigProto(allow_soft_placement=True)) as sess:
style_input = tf.placeholder(tf.float32, (1,) + style_image.shape)
content_batch = tf.placeholder(tf.float32, shape=batch_shape,
name="input_content_batch")
# Pre-compute style gram matrices
print("1. Pre-computing style Gram matrices...")
style_net, style_layers = nets.vgg(VGG_MODEL_PATH, style_input)
grams = {}
for layer, _ in STYLE_LAYERS.items():
feature_maps = style_layers[layer].eval(
feed_dict={style_input: np.array([style_image])})
grams[layer] = utils.gram_matrix(feature_maps[0])
# Clean up
style_net = None
style_layers = None
# Create content target
print("2. Creating content target...")
content_net, content_layers = nets.vgg(VGG_MODEL_PATH, content_batch)
content_target = content_layers[CONTENT_LAYER]
# Construct transfer network
print("3. Constructing style transfer network...")
# transfer_net = nets.gatys(gatys_content_image.shape)
transfer_net = nets.stylzr(content_batch)
# Set up losses
print("4. Constructing loss network...")
loss_network, loss_layers = nets.vgg(VGG_MODEL_PATH, transfer_net)
print("5. Creating losses...")
loss_content = (tf.nn.l2_loss(loss_layers[CONTENT_LAYER] - content_target)
/ tf.to_float(tf.size(content_target)))
loss_style = 0
for layer, w_l in STYLE_LAYERS.items():
feature_maps = loss_layers[layer]
gram = utils.tf_batch_gram_matrix(feature_maps)
gram_target = grams[layer]
loss_style += w_l * tf.nn.l2_loss(gram_target - gram) / (gram_target.size * MINI_BATCH_SIZE)
loss_tv = (
(tf.nn.l2_loss(transfer_net[:, 1:, :, :] - transfer_net[:, :batch_shape[1]-1, :, :]) / tf.to_float(tf.size(transfer_net[0, 1:, :, :]))
+ tf.nn.l2_loss(transfer_net[:, :, 1:, :] - transfer_net[:, :, :batch_shape[2]-1, :]) / tf.to_float(tf.size(transfer_net[0, :, 1:, :])))
/ MINI_BATCH_SIZE
)
loss = CONTENT_WEIGHT * loss_content + STYLE_WEIGHT * loss_style + DENOISE_WEIGHT * loss_tv
# Optimize
print("6. Optimizing...")
optimize = (tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
.minimize(loss))
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver()
global_it = 0
for n in range(EPOCHS):
shuffle(train_data)
for s in range(0, len(train_data), MINI_BATCH_SIZE):
global_it_num = global_it + 1
batch = np.array([utils.read_image(f, size=CONTENT_IMAGE_SIZE)
for f in train_data[s:s+MINI_BATCH_SIZE]])
if len(batch) < MINI_BATCH_SIZE:
print(
"Skipping mini-batch because there are not enough samples.")
continue
_, curr_loss = sess.run([optimize, loss],
feed_dict={content_batch: batch})
print("Iteration "+str(global_it_num)+": Loss="+str(curr_loss))
if global_it_num % PREVIEW_ITERATIONS == 0:
curr_styled_images = output_evaluator(transfer_net,
feed_dict={content_batch: batch})
# take the first images
curr_styled_image = curr_styled_images[0]
curr_orig_image = batch[0]
styled_output_path = utils.get_output_filepath(OUTPUT_PATH,
'styled', str(global_it_num))
orig_output_path = utils.get_output_filepath(OUTPUT_PATH,
'orig', str(global_it_num))
utils.write_image(curr_styled_image, styled_output_path)
utils.write_image(curr_orig_image, orig_output_path)
valid_styled_image = output_evaluator(transfer_net,
feed_dict={content_batch: np.array([validation_image]*MINI_BATCH_SIZE)})
valid_output_path = utils.get_output_filepath(OUTPUT_PATH,
'valid', str(global_it_num))
utils.write_image(valid_styled_image[0], valid_output_path)
if global_it_num % CHECKPOINT_ITERATIONS == 0:
utils.save_model_with_backup(sess, saver, MODEL_OUTPUT_PATH, MODEL_NAME)
global_it += 1
utils.save_model_with_backup(sess, saver, MODEL_OUTPUT_PATH, MODEL_NAME)
print("7: Profit!")
|
{
"content_hash": "c3d67f611e20fb91a57bf688eb5898ac",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 148,
"avg_line_length": 38.23728813559322,
"alnum_prop": 0.6323877068557919,
"repo_name": "tonypeng/tensorstyle",
"id": "b4e6ab7bd5faf76321f0b2699244e51a7d87b956",
"size": "6768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16073"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.