text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from .._util import weak_method
from ..api_wrapper_interface import ApiWrapperInterface
from ..helpers import log_and_show_message
from ..server_resource_interface import ServerStatus
from .api_decorator import register_decorated_handlers
from .interface import ClientHandlerInterface
from functools import partial
from LSP.plugin import ClientConfig
from LSP.plugin import LanguageHandler
from LSP.plugin import Notification
from LSP.plugin import read_client_config
from LSP.plugin import Request
from LSP.plugin import Response
from LSP.plugin import WorkspaceFolder
from LSP.plugin.core.typing import Any, Callable, Dict
from sublime_lib import ActivityIndicator
from weakref import ref
import sublime
__all__ = ['ClientHandler']
ApiNotificationHandler = Callable[[Any], None]
ApiRequestHandler = Callable[[Any, Callable[[Any], None]], None]
class ApiWrapper(ApiWrapperInterface):
def __init__(self, client: 'ref[LanguageHandler]'):
self.__client = client
# --- ApiWrapperInterface -----------------------------------------------------------------------------------------
def on_notification(self, method: str, handler: Callable[[Any], None]) -> None:
def handle_notification(weak_handler: ApiNotificationHandler, params: Any) -> None:
weak_handler(params)
client = self.__client()
if client:
client.on_notification(method, partial(handle_notification, weak_method(handler)))
def on_request(self, method: str, handler: ApiRequestHandler) -> None:
def on_response(weak_handler: ApiRequestHandler, params: Any, request_id: Any) -> None:
weak_handler(params, lambda result: send_response(request_id, result))
def send_response(request_id, result):
client = self.__client()
if client:
client.send_response(Response(request_id, result))
client = self.__client()
if client:
client.on_request(method, partial(on_response, weak_method(handler)))
def send_notification(self, method: str, params: Any) -> None:
client = self.__client()
if client:
client.send_notification(Notification(method, params))
def send_request(self, method: str, params: Any, handler: Callable[[Any, bool], None]) -> None:
client = self.__client()
if client:
client.send_request(
Request(method, params), lambda result: handler(result, False), lambda result: handler(result, True))
class ClientHandler(LanguageHandler, ClientHandlerInterface):
_setup_called = False
# --- LanguageHandler handlers ------------------------------------------------------------------------------------
@property
def name(self) -> str:
return self.get_displayed_name().lower()
@classmethod
def additional_variables(cls) -> Dict[str, str]:
return cls.get_additional_variables()
@property
def config(self) -> ClientConfig:
settings, filepath = self.read_settings()
settings_dict = {}
for key, default in self.get_default_settings_schema().items():
settings_dict[key] = settings.get(key, default)
if self.manages_server():
can_enable = self.get_server() is not None
else:
can_enable = True
enabled = settings_dict.get('enabled', True) and can_enable
settings_dict['enabled'] = enabled
if not settings_dict['command']:
settings_dict['command'] = self.get_command()
client_config = read_client_config(self.name, settings_dict, filepath)
self.on_settings_changed(client_config.settings)
return client_config
@classmethod
def on_start(cls, window: sublime.Window) -> bool:
if cls.manages_server():
server = cls.get_server()
if server is None or server.get_status() != ServerStatus.READY:
log_and_show_message('{}: Server not ready'.format(cls.get_displayed_name()))
return False
startup_view = window.active_view()
workspace_folders = [WorkspaceFolder.from_path(folder) for folder in window.folders()]
message = cls.is_allowed_to_start(window, startup_view, workspace_folders)
if message:
log_and_show_message('{}: {}'.format(cls.get_displayed_name(), message))
return False
return True
def on_initialized(self, client) -> None:
api = ApiWrapper(ref(client))
register_decorated_handlers(self, api)
self.on_ready(api)
# --- ClientHandlerInterface --------------------------------------------------------------------------------------
@classmethod
def setup(cls) -> None:
if cls._setup_called:
return
cls._setup_called = True
super().setup()
if cls.manages_server():
name = cls.package_name
server = cls.get_server()
if not server:
return
try:
if not server.needs_installation():
return
except Exception as exception:
log_and_show_message('{}: Error checking if server was installed: {}'.format(name, str(exception)))
return
def perform_install() -> None:
try:
message = '{}: Installing server in path: {}'.format(name, cls.storage_path())
log_and_show_message(message, show_in_status=False)
with ActivityIndicator(sublime.active_window(), message):
server.install_or_update()
log_and_show_message('{}: Server installed. Sublime Text restart is required.'.format(name))
except Exception as exception:
log_and_show_message('{}: Server installation error: {}'.format(name, str(exception)))
sublime.set_timeout_async(perform_install)
@classmethod
def cleanup(cls) -> None:
super().cleanup()
cls._setup_called = False
# --- Internals ---------------------------------------------------------------------------------------------------
@classmethod
def get_default_settings_schema(cls) -> Dict[str, Any]:
return {
'command': [],
'enabled': True,
'env': {},
'experimental_capabilities': {},
'initializationOptions': {},
'languages': [],
'settings': {},
}
def __init__(self):
super().__init__()
# Calling setup() also here as this might run before `plugin_loaded`.
# Will be a no-op if already ran.
# See https://github.com/sublimelsp/LSP/issues/899
self.setup()
|
{
"content_hash": "31fcd4a455e5b61bc4a59851716243c2",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 119,
"avg_line_length": 39.406976744186046,
"alnum_prop": 0.5824727058129242,
"repo_name": "dmilith/SublimeText3-dmilith",
"id": "4bb280e572703d68763d9b02431e251018a220f1",
"size": "6793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Packages/lsp_utils/st3/lsp_utils/_client_handler/language_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "659"
},
{
"name": "C",
"bytes": "3406335"
},
{
"name": "C++",
"bytes": "532143"
},
{
"name": "CSS",
"bytes": "8658"
},
{
"name": "DTrace",
"bytes": "12324"
},
{
"name": "Elixir",
"bytes": "346"
},
{
"name": "Go",
"bytes": "282"
},
{
"name": "HTML",
"bytes": "123"
},
{
"name": "JavaScript",
"bytes": "119153"
},
{
"name": "Makefile",
"bytes": "242"
},
{
"name": "PHP",
"bytes": "4545"
},
{
"name": "Python",
"bytes": "9755009"
},
{
"name": "Roff",
"bytes": "20024"
},
{
"name": "Ruby",
"bytes": "45"
},
{
"name": "Shell",
"bytes": "7754"
}
],
"symlink_target": ""
}
|
from sublime_plugin import TextCommand
from ..libraries.tools import add_library_to_sketch
class DeviotInsertLibraryCommand(TextCommand):
def run(self, edit, path):
add_library_to_sketch(self.view, edit, path)
|
{
"content_hash": "74aebebc60746d485931604186b8b0b2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 52,
"avg_line_length": 32,
"alnum_prop": 0.7589285714285714,
"repo_name": "gepd/Deviot",
"id": "0d9d5744a190bae82a70f9450cf0646d0a32fbce",
"size": "224",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "commands/deviot_insert_library.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "260"
},
{
"name": "Python",
"bytes": "541555"
}
],
"symlink_target": ""
}
|
import ddt
import six
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
import testtools
from testtools import testcase as tc
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF
@ddt.ddt
class ManageNFSSnapshotTest(base.BaseSharesAdminTest):
protocol = 'nfs'
# NOTE(vponomaryov): be careful running these tests using generic driver
# because cinder volume snapshots won't be deleted.
@classmethod
@base.skip_if_microversion_lt("2.12")
@testtools.skipIf(
CONF.share.multitenancy_enabled,
"Only for driver_handles_share_servers = False driver mode.")
@testtools.skipUnless(
CONF.share.run_manage_unmanage_snapshot_tests,
"Manage/unmanage snapshot tests are disabled.")
def resource_setup(cls):
super(ManageNFSSnapshotTest, cls).resource_setup()
if cls.protocol not in CONF.share.enable_protocols:
message = "%s tests are disabled" % cls.protocol
raise cls.skipException(message)
# Create share type
cls.st_name = data_utils.rand_name("tempest-manage-st-name")
cls.extra_specs = {
'storage_protocol': CONF.share.capability_storage_protocol,
'driver_handles_share_servers': False,
'snapshot_support': six.text_type(
CONF.share.capability_snapshot_support),
'create_share_from_snapshot_support': six.text_type(
CONF.share.capability_create_share_from_snapshot_support)
}
cls.st = cls.create_share_type(
name=cls.st_name,
cleanup_in_class=True,
extra_specs=cls.extra_specs)
# Create the base share
cls.share = cls.create_share(share_type_id=cls.st['share_type']['id'],
share_protocol=cls.protocol)
# Get updated data
cls.share = cls.shares_v2_client.get_share(cls.share['id'])
def _test_manage(self, snapshot, version=CONF.share.max_api_microversion):
name = ("Name for 'managed' snapshot that had ID %s" %
snapshot['id'])
description = "Description for 'managed' snapshot"
# Manage snapshot
share_id = snapshot['share_id']
snapshot = self.shares_v2_client.manage_snapshot(
share_id,
snapshot['provider_location'],
name=name,
description=description,
# Some drivers require additional parameters passed as driver
# options, as follows:
# - size: Hitachi HNAS Driver
driver_options={'size': snapshot['size']},
version=version,
)
# Add managed snapshot to cleanup queue
self.method_resources.insert(
0, {'type': 'snapshot', 'id': snapshot['id'],
'client': self.shares_v2_client})
# Wait for success
self.shares_v2_client.wait_for_snapshot_status(snapshot['id'],
'available')
# Verify manage snapshot API response
expected_keys = ["status", "links", "share_id", "name",
"share_proto", "created_at",
"description", "id", "share_size", "size",
"provider_location"]
if utils.is_microversion_ge(version, '2.17'):
expected_keys.extend(["user_id", "project_id"])
actual_keys = snapshot.keys()
# Strict key check
self.assertEqual(set(expected_keys), set(actual_keys))
# Verify data of managed snapshot
get_snapshot = self.shares_v2_client.get_snapshot(snapshot['id'])
self.assertEqual(name, get_snapshot['name'])
self.assertEqual(description, get_snapshot['description'])
self.assertEqual(snapshot['share_id'], get_snapshot['share_id'])
# Delete snapshot
self.shares_v2_client.delete_snapshot(get_snapshot['id'])
self.shares_client.wait_for_resource_deletion(
snapshot_id=get_snapshot['id'])
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.get_snapshot,
get_snapshot['id'])
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@ddt.data('2.12', '2.16', CONF.share.max_api_microversion)
def test_manage_different_versions(self, version):
"""Run snapshot manage test for multiple versions.
This test is configured with ddt to run for the configured maximum
version as well as versions 2.12 (when the API was introduced) and
2.16.
"""
# Skip in case specified version is not supported
utils.skip_if_microversion_not_supported(version)
snap_name = data_utils.rand_name("tempest-snapshot-name")
snap_desc = data_utils.rand_name("tempest-snapshot-description")
# Create snapshot
snapshot = self.create_snapshot_wait_for_active(
self.share['id'], snap_name, snap_desc)
snapshot = self.shares_v2_client.get_snapshot(snapshot['id'])
# Unmanage snapshot
self.shares_v2_client.unmanage_snapshot(snapshot['id'],
version=version)
self.shares_client.wait_for_resource_deletion(
snapshot_id=snapshot['id'])
# Manage snapshot
self._test_manage(snapshot=snapshot, version=version)
class ManageCIFSSnapshotTest(ManageNFSSnapshotTest):
protocol = 'cifs'
class ManageGLUSTERFSSnapshotTest(ManageNFSSnapshotTest):
protocol = 'glusterfs'
class ManageHDFSSnapshotTest(ManageNFSSnapshotTest):
protocol = 'hdfs'
class ManageMapRFSSnapshotTest(ManageNFSSnapshotTest):
protocol = 'maprfs'
|
{
"content_hash": "47fde86028ab642135b754cc9a0912f1",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 78,
"avg_line_length": 37.6258064516129,
"alnum_prop": 0.6195130315500685,
"repo_name": "vponomaryov/manila",
"id": "a93f737f0dc6a445a7b6408edcdaca6836066f85",
"size": "6464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila_tempest_tests/tests/api/admin/test_snapshot_manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "9697997"
},
{
"name": "Shell",
"bytes": "103800"
}
],
"symlink_target": ""
}
|
import importlib
from django.conf import settings
class DefaultSettings(object):
@property
def LOGIN_URL(self):
"""
REQUIRED. Used to log the user in.
"""
return None
@property
def SITE_URL(self):
"""
REQUIRED. The OP server url.
"""
return None
@property
def OIDC_AFTER_USERLOGIN_HOOK(self):
"""
OPTIONAL. Provide a way to plug into the process after
the user has logged in, typically to perform some business logic.
"""
def default_hook_func(request, user, client):
return None
return default_hook_func
@property
def OIDC_CODE_EXPIRE(self):
"""
OPTIONAL. Code expiration time expressed in seconds.
"""
return 60*10
@property
def OIDC_EXTRA_SCOPE_CLAIMS(self):
"""
OPTIONAL. A string with the location of your class.
Used to add extra scopes specific for your app.
"""
return 'oidc_provider.lib.claims.AbstractScopeClaims'
@property
def OIDC_IDTOKEN_EXPIRE(self):
"""
OPTIONAL. Id token expiration time expressed in seconds.
"""
return 60*10
@property
def OIDC_IDTOKEN_SUB_GENERATOR(self):
"""
OPTIONAL. Subject Identifier. A locally unique and never
reassigned identifier within the Issuer for the End-User,
which is intended to be consumed by the Client.
"""
def default_sub_generator(user):
return str(user.id)
return default_sub_generator
@property
def OIDC_RSA_KEY_FOLDER(self):
"""
REQUIRED.
"""
return None
@property
def OIDC_SKIP_CONSENT_ENABLE(self):
"""
OPTIONAL. If enabled, the Server will save the user consent
given to a specific client, so that user won't be prompted for
the same authorization multiple times.
"""
return True
@property
def OIDC_SKIP_CONSENT_EXPIRE(self):
"""
OPTIONAL. User consent expiration after been granted.
"""
return 30*3
@property
def OIDC_TOKEN_EXPIRE(self):
"""
OPTIONAL. Token object expiration after been created.
Expressed in seconds.
"""
return 60*60
@property
def REGISTRATION_ENDPOINT_ENABLED(self):
"""
OPTIONAL. True if dynamic client registration endpoint is enabled
https://openid.net/specs/openid-connect-registration-1_0.html#ClientRegistration
"""
return True
@property
def REGISTRATION_ENDPOINT_REQ_TOKEN(self):
"""
OPTIONAL. True if client registration requires bearer (access) token.
False if clients can be dynamically registered without authentication
http://tools.ietf.org/html/rfc6750#section-2.1
"""
return True
@property
def OIDC_USERINFO(self):
"""
OPTIONAL. A string with the location of your class.
Used to add extra scopes specific for your app.
"""
return 'oidc_provider.lib.utils.common.DefaultUserInfo'
default_settings = DefaultSettings()
def import_from_str(value):
"""
Attempt to import a class from a string representation.
"""
try:
parts = value.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
msg = 'Could not import %s for settings. %s: %s.' % (value, e.__class__.__name__, e)
raise ImportError(msg)
def get(name, import_str=False):
"""
Helper function to use inside the package.
"""
try:
value = getattr(default_settings, name)
value = getattr(settings, name)
except AttributeError:
if value == None:
raise Exception('You must set ' + name + ' in your settings.')
value = import_from_str(value) if import_str else value
return value
|
{
"content_hash": "e2c7cae480deaa343095781279db9e9d",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 92,
"avg_line_length": 26.84313725490196,
"alnum_prop": 0.5965424884343803,
"repo_name": "wayward710/django-oidc-provider",
"id": "26b5e94ec0e30093b9eac58dd5b212d910a00184",
"size": "4107",
"binary": false,
"copies": "1",
"ref": "refs/heads/v0.2.x",
"path": "oidc_provider/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "560"
},
{
"name": "HTML",
"bytes": "8018"
},
{
"name": "Python",
"bytes": "94512"
}
],
"symlink_target": ""
}
|
"""Unit tests for Superset"""
import json
from unittest.mock import patch
from superset import security_manager
from tests.integration_tests.base_tests import SupersetTestCase
meUri = "/api/v1/me/"
class TestCurrentUserApi(SupersetTestCase):
def test_get_me_logged_in(self):
self.login(username="admin")
rv = self.client.get(meUri)
self.assertEqual(200, rv.status_code)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual("admin", response["result"]["username"])
self.assertEqual(True, response["result"]["is_active"])
self.assertEqual(False, response["result"]["is_anonymous"])
def test_get_me_with_roles(self):
self.login(username="admin")
rv = self.client.get(meUri + "roles/")
self.assertEqual(200, rv.status_code)
response = json.loads(rv.data.decode("utf-8"))
roles = list(response["result"]["roles"].keys())
self.assertEqual("Admin", roles.pop())
@patch("superset.security.manager.g")
def test_get_my_roles_anonymous(self, mock_g):
mock_g.user = security_manager.get_anonymous_user
rv = self.client.get(meUri + "roles/")
self.assertEqual(401, rv.status_code)
def test_get_me_unauthorized(self):
self.logout()
rv = self.client.get(meUri)
self.assertEqual(401, rv.status_code)
@patch("superset.security.manager.g")
def test_get_me_anonymous(self, mock_g):
mock_g.user = security_manager.get_anonymous_user
rv = self.client.get(meUri)
self.assertEqual(401, rv.status_code)
|
{
"content_hash": "c9d362e181019e60c3e525c4f71d4d9f",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 67,
"avg_line_length": 34.12765957446808,
"alnum_prop": 0.6514962593516209,
"repo_name": "airbnb/caravel",
"id": "f4c897b6a0ca1254c6fc4ef001c7462718a4a49f",
"size": "2405",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/integration_tests/users/api_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57416"
},
{
"name": "HTML",
"bytes": "112618"
},
{
"name": "JavaScript",
"bytes": "406496"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "588212"
},
{
"name": "Shell",
"bytes": "980"
}
],
"symlink_target": ""
}
|
import ctypes
import os
import subprocess
import time
import shlex
import signal as signal_module
from robot.utils import (ConnectionCache, abspath, encode_to_system,
decode_output, is_falsy, is_truthy, secs_to_timestr,
timestr_to_secs, IRONPYTHON, JYTHON)
from robot.version import get_version
from robot.api import logger
class Process(object):
"""Robot Framework test library for running processes.
This library utilizes Python's
[http://docs.python.org/2/library/subprocess.html|subprocess]
module and its
[http://docs.python.org/2/library/subprocess.html#subprocess.Popen|Popen]
class.
The library has following main usages:
- Running processes in system and waiting for their completion using
`Run Process` keyword.
- Starting processes on background using `Start Process`.
- Waiting started process to complete using `Wait For Process` or
stopping them with `Terminate Process` or `Terminate All Processes`.
This library is new in Robot Framework 2.8.
== Table of contents ==
- `Specifying command and arguments`
- `Process configuration`
- `Active process`
- `Result object`
- `Boolean arguments`
- `Using with OperatingSystem library`
- `Example`
- `Shortcuts`
- `Keywords`
= Specifying command and arguments =
Both `Run Process` and `Start Process` accept the command to execute and
all arguments passed to the command as separate arguments. This makes usage
convenient and also allows these keywords to automatically escape possible
spaces and other special characters in commands and arguments. Notice that
if a command accepts options that themselves accept values, these options
and their values must be given as separate arguments.
When `running processes in shell`, it is also possible to give the whole
command to execute as a single string. The command can then contain
multiple commands to be run together. When using this approach, the caller
is responsible on escaping.
Examples:
| `Run Process` | ${tools}${/}prog.py | argument | second arg with spaces |
| `Run Process` | java | -jar | ${jars}${/}example.jar | --option | value |
| `Run Process` | prog.py "one arg" && tool.sh | shell=yes | cwd=${tools} |
Starting from Robot Framework 2.8.6, possible non-string arguments are
converted to strings automatically.
= Process configuration =
`Run Process` and `Start Process` keywords can be configured using
optional ``**configuration`` keyword arguments. Configuration arguments
must be given after other arguments passed to these keywords and must
use syntax like ``name=value``. Available configuration arguments are
listed below and discussed further in sections afterwards.
| = Name = | = Explanation = |
| shell | Specifies whether to run the command in shell or not. |
| cwd | Specifies the working directory. |
| env | Specifies environment variables given to the process. |
| env:<name> | Overrides the named environment variable(s) only. |
| stdout | Path of a file where to write standard output. |
| stderr | Path of a file where to write standard error. |
| alias | Alias given to the process. |
Note that because ``**configuration`` is passed using ``name=value`` syntax,
possible equal signs in other arguments passed to `Run Process` and
`Start Process` must be escaped with a backslash like ``name\\=value``.
See `Run Process` for an example.
== Running processes in shell ==
The ``shell`` argument specifies whether to run the process in a shell or
not. By default shell is not used, which means that shell specific commands,
like ``copy`` and ``dir`` on Windows, are not available. You can, however,
run shell scripts and batch files without using a shell.
Giving the ``shell`` argument any non-false value, such as ``shell=True``,
changes the program to be executed in a shell. It allows using the shell
capabilities, but can also make the process invocation operating system
dependent. Having a shell between the actually started process and this
library can also interfere communication with the process such as stopping
it and reading its outputs. Because of these problems, it is recommended
to use the shell only when absolutely necessary.
When using a shell it is possible to give the whole command to execute
as a single string. See `Specifying command and arguments` section for
examples and more details in general.
== Current working directory ==
By default the child process will be executed in the same directory
as the parent process, the process running tests, is executed. This
can be changed by giving an alternative location using the ``cwd`` argument.
Forward slashes in the given path are automatically converted to
backslashes on Windows.
`Standard output and error streams`, when redirected to files,
are also relative to the current working directory possibly set using
the ``cwd`` argument.
Example:
| `Run Process` | prog.exe | cwd=${ROOT}/directory | stdout=stdout.txt |
== Environment variables ==
By default the child process will get a copy of the parent process's
environment variables. The ``env`` argument can be used to give the
child a custom environment as a Python dictionary. If there is a need
to specify only certain environment variable, it is possible to use the
``env:<name>=<value>`` format to set or override only that named variables.
It is also possible to use these two approaches together.
Examples:
| `Run Process` | program | env=${environ} |
| `Run Process` | program | env:http_proxy=10.144.1.10:8080 | env:PATH=%{PATH}${:}${PROGDIR} |
| `Run Process` | program | env=${environ} | env:EXTRA=value |
== Standard output and error streams ==
By default processes are run so that their standard output and standard
error streams are kept in the memory. This works fine normally,
but if there is a lot of output, the output buffers may get full and
the program can hang. Additionally on Jython, everything written to
these in-memory buffers can be lost if the process is terminated.
To avoid the above mentioned problems, it is possible to use ``stdout``
and ``stderr`` arguments to specify files on the file system where to
redirect the outputs. This can also be useful if other processes or
other keywords need to read or manipulate the outputs somehow.
Given ``stdout`` and ``stderr`` paths are relative to the `current working
directory`. Forward slashes in the given paths are automatically converted
to backslashes on Windows.
As a special feature, it is possible to redirect the standard error to
the standard output by using ``stderr=STDOUT``.
Regardless are outputs redirected to files or not, they are accessible
through the `result object` returned when the process ends.
Examples:
| ${result} = | `Run Process` | program | stdout=${TEMPDIR}/stdout.txt | stderr=${TEMPDIR}/stderr.txt |
| `Log Many` | stdout: ${result.stdout} | stderr: ${result.stderr} |
| ${result} = | `Run Process` | program | stderr=STDOUT |
| `Log` | all output: ${result.stdout} |
Note that the created output files are not automatically removed after
the test run. The user is responsible to remove them if needed.
== Alias ==
A custom name given to the process that can be used when selecting the
`active process`.
Examples:
| `Start Process` | program | alias=example |
| `Run Process` | python | -c | print 'hello' | alias=hello |
= Active process =
The test library keeps record which of the started processes is currently
active. By default it is latest process started with `Start Process`,
but `Switch Process` can be used to select a different one. Using
`Run Process` does not affect the active process.
The keywords that operate on started processes will use the active process
by default, but it is possible to explicitly select a different process
using the ``handle`` argument. The handle can be the identifier returned by
`Start Process` or an ``alias`` explicitly given to `Start Process` or
`Run Process`.
= Result object =
`Run Process`, `Wait For Process` and `Terminate Process` keywords return a
result object that contains information about the process execution as its
attributes. The same result object, or some of its attributes, can also
be get using `Get Process Result` keyword. Attributes available in the
object are documented in the table below.
| = Attribute = | = Explanation = |
| rc | Return code of the process as an integer. |
| stdout | Contents of the standard output stream. |
| stderr | Contents of the standard error stream. |
| stdout_path | Path where stdout was redirected or ``None`` if not redirected. |
| stderr_path | Path where stderr was redirected or ``None`` if not redirected. |
Example:
| ${result} = | `Run Process` | program |
| `Should Be Equal As Integers` | ${result.rc} | 0 |
| `Should Match` | ${result.stdout} | Some t?xt* |
| `Should Be Empty` | ${result.stderr} | |
| ${stdout} = | `Get File` | ${result.stdout_path} |
| `Should Be Equal` | ${stdout} | ${result.stdout} |
| `File Should Be Empty` | ${result.stderr_path} | |
= Boolean arguments =
Some keywords accept arguments that are handled as Boolean values true or
false. If such an argument is given as a string, it is considered false if
it is either empty or case-insensitively equal to ``false`` or ``no``.
Other strings are considered true regardless their value, and other
argument types are tested using same
[http://docs.python.org/2/library/stdtypes.html#truth-value-testing|rules
as in Python].
True examples:
| `Terminate Process` | kill=True | # Strings are generally true. |
| `Terminate Process` | kill=yes | # Same as the above. |
| `Terminate Process` | kill=${TRUE} | # Python ``True`` is true. |
| `Terminate Process` | kill=${42} | # Numbers other than 0 are true. |
False examples:
| `Terminate Process` | kill=False | # String ``false`` is false. |
| `Terminate Process` | kill=no | # Also string ``no`` is false. |
| `Terminate Process` | kill=${EMPTY} | # Empty string is false. |
| `Terminate Process` | kill=${FALSE} | # Python ``False`` is false. |
Note that prior to Robot Framework 2.8 all non-empty strings, including
``false``, were considered true. Additionally, ``no`` is considered false
only in Robot Framework 2.9 and newer.
= Using with OperatingSystem library =
The OperatingSystem library also contains keywords for running processes.
They are not as flexible as the keywords provided by this library, and
thus not recommended to be used anymore. They may eventually even be
deprecated.
There is a name collision because both of these libraries have
`Start Process` and `Switch Process` keywords. This is handled so that
if both libraries are imported, the keywords in the Process library are
used by default. If there is a need to use the OperatingSystem variants,
it is possible to use `OperatingSystem.Start Process` syntax or use
the BuiltIn keyword `Set Library Search Order` to change the priority.
Other keywords in the OperatingSystem library can be used freely with
keywords in the Process library.
= Example =
| ***** Settings *****
| Library Process
| Suite Teardown `Terminate All Processes` kill=True
|
| ***** Test Cases *****
| Example
| `Start Process` program arg1 arg2 alias=First
| ${handle} = `Start Process` command.sh arg | command2.sh shell=True cwd=/path
| ${result} = `Run Process` ${CURDIR}/script.py
| `Should Not Contain` ${result.stdout} FAIL
| `Terminate Process` ${handle}
| ${result} = `Wait For Process` First
| `Should Be Equal As Integers` ${result.rc} 0
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = get_version()
TERMINATE_TIMEOUT = 30
KILL_TIMEOUT = 10
def __init__(self):
self._processes = ConnectionCache('No active process.')
self._results = {}
def run_process(self, command, *arguments, **configuration):
"""Runs a process and waits for it to complete.
``command`` and ``*arguments`` specify the command to execute and
arguments passed to it. See `Specifying command and arguments` for
more details.
``**configuration`` contains additional configuration related to
starting processes and waiting for them to finish. See `Process
configuration` for more details about configuration related to starting
processes. Configuration related to waiting for processes consists of
``timeout`` and ``on_timeout`` arguments that have same semantics as
with `Wait For Process` keyword. By default there is no timeout, and
if timeout is defined the default action on timeout is ``terminate``.
Returns a `result object` containing information about the execution.
Note that possible equal signs in ``*arguments`` must be escaped
with a backslash (e.g. ``name\\=value``) to avoid them to be passed in
as ``**configuration``.
Examples:
| ${result} = | Run Process | python | -c | print 'Hello, world!' |
| Should Be Equal | ${result.stdout} | Hello, world! |
| ${result} = | Run Process | ${command} | stderr=STDOUT | timeout=10s |
| ${result} = | Run Process | ${command} | timeout=1min | on_timeout=continue |
| ${result} = | Run Process | java -Dname\\=value Example | shell=True | cwd=${EXAMPLE} |
This keyword does not change the `active process`.
``timeout`` and ``on_timeout`` arguments are new in Robot Framework
2.8.4.
"""
current = self._processes.current
timeout = configuration.pop('timeout', None)
on_timeout = configuration.pop('on_timeout', 'terminate')
try:
handle = self.start_process(command, *arguments, **configuration)
return self.wait_for_process(handle, timeout, on_timeout)
finally:
self._processes.current = current
def start_process(self, command, *arguments, **configuration):
"""Starts a new process on background.
See `Specifying command and arguments` and `Process configuration`
for more information about the arguments, and `Run Process` keyword
for related examples.
Makes the started process new `active process`. Returns an identifier
that can be used as a handle to active the started process if needed.
Starting from Robot Framework 2.8.5, processes are started so that
they create a new process group. This allows sending signals to and
terminating also possible child processes. This is not supported by
Jython in general nor by Python versions prior to 2.7 on Windows.
"""
config = ProcessConfig(**configuration)
executable_command = self._cmd(command, arguments, config.shell)
logger.info('Starting process:\n%s' % executable_command)
logger.debug('Process configuration:\n%s' % config)
process = subprocess.Popen(executable_command, **config.full_config)
self._results[process] = ExecutionResult(process,
config.stdout_stream,
config.stderr_stream)
return self._processes.register(process, alias=config.alias)
def _cmd(self, command, args, use_shell):
command = [encode_to_system(item) for item in [command] + list(args)]
if not use_shell:
return command
if args:
return subprocess.list2cmdline(command)
return command[0]
def is_process_running(self, handle=None):
"""Checks is the process running or not.
If ``handle`` is not given, uses the current `active process`.
Returns ``True`` if the process is still running and ``False`` otherwise.
"""
return self._processes[handle].poll() is None
def process_should_be_running(self, handle=None,
error_message='Process is not running.'):
"""Verifies that the process is running.
If ``handle`` is not given, uses the current `active process`.
Fails if the process has stopped.
"""
if not self.is_process_running(handle):
raise AssertionError(error_message)
def process_should_be_stopped(self, handle=None,
error_message='Process is running.'):
"""Verifies that the process is not running.
If ``handle`` is not given, uses the current `active process`.
Fails if the process is still running.
"""
if self.is_process_running(handle):
raise AssertionError(error_message)
def wait_for_process(self, handle=None, timeout=None, on_timeout='continue'):
"""Waits for the process to complete or to reach the given timeout.
The process to wait for must have been started earlier with
`Start Process`. If ``handle`` is not given, uses the current
`active process`.
``timeout`` defines the maximum time to wait for the process. It can be
given in
[http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#time-format|
various time formats] supported by Robot Framework, for example, ``42``,
``42 s``, or ``1 minute 30 seconds``.
``on_timeout`` defines what to do if the timeout occurs. Possible values
and corresponding actions are explained in the table below. Notice
that reaching the timeout never fails the test.
| = Value = | = Action = |
| continue | The process is left running (default). |
| terminate | The process is gracefully terminated. |
| kill | The process is forcefully stopped. |
See `Terminate Process` keyword for more details how processes are
terminated and killed.
If the process ends before the timeout or it is terminated or killed,
this keyword returns a `result object` containing information about
the execution. If the process is left running, Python ``None`` is
returned instead.
Examples:
| # Process ends cleanly | | |
| ${result} = | Wait For Process | example |
| Process Should Be Stopped | example | |
| Should Be Equal As Integers | ${result.rc} | 0 |
| # Process does not end | | |
| ${result} = | Wait For Process | timeout=42 secs |
| Process Should Be Running | | |
| Should Be Equal | ${result} | ${NONE} |
| # Kill non-ending process | | |
| ${result} = | Wait For Process | timeout=1min 30s | on_timeout=kill |
| Process Should Be Stopped | | |
| Should Be Equal As Integers | ${result.rc} | -9 |
``timeout`` and ``on_timeout`` are new in Robot Framework 2.8.2.
"""
process = self._processes[handle]
logger.info('Waiting for process to complete.')
if timeout:
timeout = timestr_to_secs(timeout)
if not self._process_is_stopped(process, timeout):
logger.info('Process did not complete in %s.'
% secs_to_timestr(timeout))
return self._manage_process_timeout(handle, on_timeout.lower())
return self._wait(process)
def _manage_process_timeout(self, handle, on_timeout):
if on_timeout == 'terminate':
return self.terminate_process(handle)
elif on_timeout == 'kill':
return self.terminate_process(handle, kill=True)
else:
logger.info('Leaving process intact.')
return None
def _wait(self, process):
result = self._results[process]
result.rc = process.wait() or 0
result.close_streams()
logger.info('Process completed.')
return result
def terminate_process(self, handle=None, kill=False):
"""Stops the process gracefully or forcefully.
If ``handle`` is not given, uses the current `active process`.
By default first tries to stop the process gracefully. If the process
does not stop in 30 seconds, or ``kill`` argument is given a true value,
(see `Boolean arguments`) kills the process forcefully. Stops also all
the child processes of the originally started process.
Waits for the process to stop after terminating it. Returns a `result
object` containing information about the execution similarly as `Wait
For Process`.
On Unix-like machines graceful termination is done using ``TERM (15)``
signal and killing using ``KILL (9)``. Use `Send Signal To Process`
instead if you just want to send either of these signals without
waiting for the process to stop.
On Windows graceful termination is done using ``CTRL_BREAK_EVENT``
event and killing using Win32 API function ``TerminateProcess()``.
Examples:
| ${result} = | Terminate Process | |
| Should Be Equal As Integers | ${result.rc} | -15 | # On Unixes |
| Terminate Process | myproc | kill=true |
Limitations:
- Graceful termination is not supported on Windows by Jython nor by
Python versions prior to 2.7. Process is killed instead.
- Stopping the whole process group is not supported by Jython at all
nor by Python versions prior to 2.7 on Windows.
- On Windows forceful kill only stops the main process, not possible
child processes.
Automatically killing the process if termination fails as well as
returning a result object are new features in Robot Framework 2.8.2.
Terminating also possible child processes, including using
``CTRL_BREAK_EVENT`` on Windows, is new in Robot Framework 2.8.5.
"""
process = self._processes[handle]
if not hasattr(process, 'terminate'):
raise RuntimeError('Terminating processes is not supported '
'by this Python version.')
terminator = self._kill if is_truthy(kill) else self._terminate
try:
terminator(process)
except OSError:
if not self._process_is_stopped(process, self.KILL_TIMEOUT):
raise
logger.debug('Ignored OSError because process was stopped.')
return self._wait(process)
def _kill(self, process):
logger.info('Forcefully killing process.')
if hasattr(os, 'killpg'):
os.killpg(process.pid, signal_module.SIGKILL)
else:
process.kill()
if not self._process_is_stopped(process, self.KILL_TIMEOUT):
raise RuntimeError('Failed to kill process.')
def _terminate(self, process):
logger.info('Gracefully terminating process.')
# Sends signal to the whole process group both on POSIX and on Windows
# if supported by the interpreter.
if hasattr(os, 'killpg'):
os.killpg(process.pid, signal_module.SIGTERM)
elif hasattr(signal_module, 'CTRL_BREAK_EVENT'):
if IRONPYTHON:
# https://ironpython.codeplex.com/workitem/35020
ctypes.windll.kernel32.GenerateConsoleCtrlEvent(
signal_module.CTRL_BREAK_EVENT, process.pid)
else:
process.send_signal(signal_module.CTRL_BREAK_EVENT)
else:
process.terminate()
if not self._process_is_stopped(process, self.TERMINATE_TIMEOUT):
logger.info('Graceful termination failed.')
self._kill(process)
def terminate_all_processes(self, kill=False):
"""Terminates all still running processes started by this library.
This keyword can be used in suite teardown or elsewhere to make
sure that all processes are stopped,
By default tries to terminate processes gracefully, but can be
configured to forcefully kill them immediately. See `Terminate Process`
that this keyword uses internally for more details.
"""
for handle in range(1, len(self._processes) + 1):
if self.is_process_running(handle):
self.terminate_process(handle, kill=kill)
self.__init__()
def send_signal_to_process(self, signal, handle=None, group=False):
"""Sends the given ``signal`` to the specified process.
If ``handle`` is not given, uses the current `active process`.
Signal can be specified either as an integer as a signal name. In the
latter case it is possible to give the name both with or without ``SIG``
prefix, but names are case-sensitive. For example, all the examples
below send signal ``INT (2)``:
| Send Signal To Process | 2 | | # Send to active process |
| Send Signal To Process | INT | | |
| Send Signal To Process | SIGINT | myproc | # Send to named process |
This keyword is only supported on Unix-like machines, not on Windows.
What signals are supported depends on the system. For a list of
existing signals on your system, see the Unix man pages related to
signal handling (typically ``man signal`` or ``man 7 signal``).
By default sends the signal only to the parent process, not to possible
child processes started by it. Notice that when `running processes in
shell`, the shell is the parent process and it depends on the system
does the shell propagate the signal to the actual started process.
To send the signal to the whole process group, ``group`` argument can
be set to any true value (see `Boolean arguments`). This is not
supported by Jython, however.
New in Robot Framework 2.8.2. Support for ``group`` argument is new
in Robot Framework 2.8.5.
"""
if os.sep == '\\':
raise RuntimeError('This keyword does not work on Windows.')
process = self._processes[handle]
signum = self._get_signal_number(signal)
logger.info('Sending signal %s (%d).' % (signal, signum))
if is_truthy(group) and hasattr(os, 'killpg'):
os.killpg(process.pid, signum)
elif hasattr(process, 'send_signal'):
process.send_signal(signum)
else:
raise RuntimeError('Sending signals is not supported '
'by this Python version.')
def _get_signal_number(self, int_or_name):
try:
return int(int_or_name)
except ValueError:
return self._convert_signal_name_to_number(int_or_name)
def _convert_signal_name_to_number(self, name):
try:
return getattr(signal_module,
name if name.startswith('SIG') else 'SIG' + name)
except AttributeError:
raise RuntimeError("Unsupported signal '%s'." % name)
def get_process_id(self, handle=None):
"""Returns the process ID (pid) of the process as an integer.
If ``handle`` is not given, uses the current `active process`.
Notice that the pid is not the same as the handle returned by
`Start Process` that is used internally by this library.
"""
return self._processes[handle].pid
def get_process_object(self, handle=None):
"""Return the underlying ``subprocess.Popen`` object.
If ``handle`` is not given, uses the current `active process`.
"""
return self._processes[handle]
def get_process_result(self, handle=None, rc=False, stdout=False,
stderr=False, stdout_path=False, stderr_path=False):
"""Returns the specified `result object` or some of its attributes.
The given ``handle`` specifies the process whose results should be
returned. If no ``handle`` is given, results of the current `active
process` are returned. In either case, the process must have been
finishes before this keyword can be used. In practice this means
that processes started with `Start Process` must be finished either
with `Wait For Process` or `Terminate Process` before using this
keyword.
If no other arguments than the optional ``handle`` are given, a whole
`result object` is returned. If one or more of the other arguments
are given any true value, only the specified attributes of the
`result object` are returned. These attributes are always returned
in the same order as arguments are specified in the keyword signature.
See `Boolean arguments` section for more details about true and false
values.
Examples:
| Run Process | python | -c | print 'Hello, world!' | alias=myproc |
| # Get result object | | |
| ${result} = | Get Process Result | myproc |
| Should Be Equal | ${result.rc} | ${0} |
| Should Be Equal | ${result.stdout} | Hello, world! |
| Should Be Empty | ${result.stderr} | |
| # Get one attribute | | |
| ${stdout} = | Get Process Result | myproc | stdout=true |
| Should Be Equal | ${stdout} | Hello, world! |
| # Multiple attributes | | |
| ${stdout} | ${stderr} = | Get Process Result | myproc | stdout=yes | stderr=yes |
| Should Be Equal | ${stdout} | Hello, world! |
| Should Be Empty | ${stderr} | |
Although getting results of a previously executed process can be handy
in general, the main use case for this keyword is returning results
over the remote library interface. The remote interface does not
support returning the whole result object, but individual attributes
can be returned without problems.
New in Robot Framework 2.8.2.
"""
result = self._results[self._processes[handle]]
if result.rc is None:
raise RuntimeError('Getting results of unfinished processes '
'is not supported.')
attributes = self._get_result_attributes(result, rc, stdout, stderr,
stdout_path, stderr_path)
if not attributes:
return result
elif len(attributes) == 1:
return attributes[0]
return attributes
def _get_result_attributes(self, result, *includes):
attributes = (result.rc, result.stdout, result.stderr,
result.stdout_path, result.stderr_path)
includes = (is_truthy(incl) for incl in includes)
return tuple(attr for attr, incl in zip(attributes, includes) if incl)
def switch_process(self, handle):
"""Makes the specified process the current `active process`.
The handle can be an identifier returned by `Start Process` or
the ``alias`` given to it explicitly.
Example:
| Start Process | prog1 | alias=process1 |
| Start Process | prog2 | alias=process2 |
| # currently active process is process2 |
| Switch Process | process1 |
| # now active process is process1 |
"""
self._processes.switch(handle)
def _process_is_stopped(self, process, timeout):
stopped = lambda: process.poll() is not None
max_time = time.time() + timeout
while time.time() <= max_time and not stopped():
time.sleep(min(0.1, timeout))
return stopped()
def command_line_to_list(self, args, escaping=False):
lexer = shlex.shlex(args.encode('UTF-8'), posix=True)
if is_falsy(escaping):
lexer.escape = ''
lexer.escapedquotes = '"\''
lexer.commenters = ''
lexer.whitespace_split = True
try:
return [token.decode('UTF-8') for token in lexer]
except ValueError as err:
raise ValueError("Parsing '%s' failed: %s" % (args, err))
def list_to_command_line(self, args):
return subprocess.list2cmdline(args)
class ExecutionResult(object):
def __init__(self, process, stdout, stderr, rc=None):
self._process = process
self.stdout_path = self._get_path(stdout)
self.stderr_path = self._get_path(stderr)
self.rc = rc
self._stdout = None
self._stderr = None
self._custom_streams = [stream for stream in (stdout, stderr)
if self._is_custom_stream(stream)]
def _get_path(self, stream):
return stream.name if self._is_custom_stream(stream) else None
def _is_custom_stream(self, stream):
return stream not in (subprocess.PIPE, subprocess.STDOUT)
@property
def stdout(self):
if self._stdout is None:
self._read_stdout()
return self._stdout
@property
def stderr(self):
if self._stderr is None:
self._read_stderr()
return self._stderr
def _read_stdout(self):
self._stdout = self._read_stream(self.stdout_path, self._process.stdout)
def _read_stderr(self):
self._stderr = self._read_stream(self.stderr_path, self._process.stderr)
def _read_stream(self, stream_path, stream):
if stream_path:
stream = open(stream_path, 'r')
elif not self._is_open(stream):
return ''
try:
return self._format_output(stream.read())
except IOError: # http://bugs.jython.org/issue2218
return ''
finally:
if stream_path:
stream.close()
def _is_open(self, stream):
return stream and not stream.closed
def _format_output(self, output):
if output.endswith('\n'):
output = output[:-1]
return decode_output(output, force=True)
def close_streams(self):
standard_streams = self._get_and_read_standard_streams(self._process)
for stream in standard_streams + self._custom_streams:
if self._is_open(stream):
stream.close()
def _get_and_read_standard_streams(self, process):
stdin, stdout, stderr = process.stdin, process.stdout, process.stderr
if stdout:
self._read_stdout()
if stderr:
self._read_stderr()
return [stdin, stdout, stderr]
def __str__(self):
return '<result object with rc %d>' % self.rc
class ProcessConfig(object):
def __init__(self, cwd=None, shell=False, stdout=None, stderr=None,
alias=None, env=None, **rest):
self.cwd = self._get_cwd(cwd)
self.stdout_stream = self._new_stream(stdout)
self.stderr_stream = self._get_stderr(stderr, stdout, self.stdout_stream)
self.shell = is_truthy(shell)
self.alias = alias
self.env = self._construct_env(env, rest)
def _get_cwd(self, cwd):
if cwd:
return cwd.replace('/', os.sep)
return abspath('.')
def _new_stream(self, name):
if name:
name = name.replace('/', os.sep)
return open(os.path.join(self.cwd, name), 'w')
return subprocess.PIPE
def _get_stderr(self, stderr, stdout, stdout_stream):
if stderr and stderr in ['STDOUT', stdout]:
if stdout_stream != subprocess.PIPE:
return stdout_stream
return subprocess.STDOUT
return self._new_stream(stderr)
def _construct_env(self, env, extra):
if env:
env = dict((encode_to_system(k), encode_to_system(v))
for k, v in env.items())
for key in extra:
if not key.startswith('env:'):
raise RuntimeError("Keyword argument '%s' is not supported by "
"this keyword." % key)
if env is None:
env = os.environ.copy()
env[encode_to_system(key[4:])] = encode_to_system(extra[key])
return env
@property
def full_config(self):
config = {'stdout': self.stdout_stream,
'stderr': self.stderr_stream,
'stdin': subprocess.PIPE,
'shell': self.shell,
'cwd': self.cwd,
'env': self.env,
'universal_newlines': True}
if not JYTHON:
self._add_process_group_config(config)
return config
def _add_process_group_config(self, config):
if hasattr(os, 'setsid'):
config['preexec_fn'] = os.setsid
if hasattr(subprocess, 'CREATE_NEW_PROCESS_GROUP'):
config['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
def __str__(self):
return encode_to_system("""\
cwd = %s
stdout_stream = %s
stderr_stream = %s
shell = %r
alias = %s
env = %r""" % (self.cwd, self.stdout_stream, self.stderr_stream,
self.shell, self.alias, self.env))
|
{
"content_hash": "f7338c6f79b64cccc997ad04f69fafd8",
"timestamp": "",
"source": "github",
"line_count": 875,
"max_line_length": 111,
"avg_line_length": 44.374857142857145,
"alnum_prop": 0.6162305552693932,
"repo_name": "Colorfulstan/robotframework",
"id": "637680629088c9a20a59340666a8d34e78f11d5a",
"size": "39436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot/libraries/Process.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "210"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140926"
},
{
"name": "Java",
"bytes": "60101"
},
{
"name": "JavaScript",
"bytes": "160787"
},
{
"name": "Python",
"bytes": "2169646"
},
{
"name": "RobotFramework",
"bytes": "2041199"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
}
|
"""
@version: python3.6
@author: ‘sj‘
@contact: songjie1217@hotmail.com
@file: boll-trend-2.0.py
@time: 12/11/17 3:26 PM
"""
from backtest.core.backteststrategy_test import *
from backtest.optimizer.optimizer import *
from backtest.tools.ta import *
from datetime import datetime as dtL
class BollStrategyTrend(BacktestStrategyTest):
def initialize(self):
self.context.universe = ['rb1801']
self.context.run_info.strategy_name = 'bolltrend2-rb_main-15m'
self.context.run_info.feed_frequency = '15m'
self.context.run_info.start_date = '2017-01-01'
self.context.run_info.end_date = '2017-10-28'
self.context.run_info.ip = localip
self.context.run_info.main_contract = True
self.context.init_cash = 1000000
self.context.boll = Boll()
self.context.cash_rate = 0.8 # cash usage
self.context.slippage = 0
self.context.direction = ''
self.context.open_vol = 0
self.context.open_flag = False
self.context.close_flag = False
self.context.reverse_flag = False
self.context.close_count = 0
self.context.reverse_count = 0
self.context.open_price = 0
self.context.open_bar_open = 0
self.context.max_loss = 0
self.context.max_gain = 0
self.context.max_gain_t = 0
self.context.max_loss_t = 0
self.context.hold_flag = False
self.context.temp_price = 0
self.context.count_from_open = 0
self.context.fill_count = 0
self.context.order_count = 0
self.context.stop_point = 30
self.context.stop_coef = 0.006
def handle_data(self, data):
boll = self.context.boll.compute(data)
if boll is not None:
# print((boll.up - boll.dn)/boll.mb)
# print('date%d,time:%s, barclose:%d,up:%d,dn:.%d,mb:%d'%(self.context.date,self.context.current_bar.end_time,data.close,boll.up,boll.dn,boll.mb))
# print(self.context.open_price, self.context.instmt_info['tick_size'])
# print(data.close, self.context.open_price)
# print(self.context.open_flag, self.context.can_open_flag, self.context.direction)
# print(self.context.open_bar_open, data.close)
if not self.context.open_flag:
if data.close > boll.up:
# print('时间:%s 突破上轨' % datetime.now())
self.context.direction = SHORT
# print('change to short')
elif data.close < boll.dn:
# print('时间:%s 突破下轨' % datetime.now())
self.context.direction = LONG
# print('change to long')
# prerequisites for opening positions
preq_one = False
if (boll.up - boll.dn)/boll.mb > 0.006:
preq_one = True
if self.context.direction == LONG and self.context.open_flag:
self.context.max_loss = min(self.context.max_loss, data.low - self.context.open_price)
self.context.max_gain = max(self.context.max_gain, data.high - self.context.open_price)
if self.context.direction == SHORT and self.context.open_flag:
self.context.max_loss = min(self.context.max_loss, self.context.open_price - data.high)
self.context.max_gain = max(self.context.max_gain, self.context.open_price - data.low)
if self.context.open_flag:
self.context.count_from_open += 1
self._stoploss_check(data, boll)
# open long position
if boll.mb < data.close and data.close > data.open and self.context.direction == LONG and not self.context.open_flag:
self.context.order_count += 1
if preq_one:
self._open(data,'signal open long')
self.context.fill_count += 1
# open short position
if data.close < boll.mb and data.close < data.open and self.context.direction == SHORT and not self.context.open_flag:
self.context.order_count += 1
if preq_one:
self._open(data, 'signal open short')
self.context.fill_count += 1
# close long position, close count + 1
if data.close < boll.mb and data.close < data.open and self.context.direction == LONG and preq_one and self.context.open_flag:
self._close(data, 'signal close long')
# close short position, close count + 1
if data.close > boll.mb and data.close > data.open and self.context.direction == SHORT and preq_one and self.context.open_flag:
self._close(data, 'signal close short')
def _stoploss_check(self,data,boll):
thres = int(int(self.context.stop_coef * data.open)/self.context.instmt_info['tick_size'])*self.context.instmt_info['tick_size']
if self.context.direction == LONG:
if (self.context.open_price - data.low) > thres:
self._close(data,'stoploss close long price spread', thres)
if data.open > self.context.open_price > data.low and self.context.count_from_open > 6:
self._close(data,'stoploss long lower than open',thres = 0)
if data.close < boll.dn:
self._close(data, 'stoploss close long touch bot')
if self.context.direction == SHORT:
if (data.high - self.context.open_price) > thres:
self._close(data,'stoploss close short price spread',thres)
if data.open < self.context.open_price < data.high and self.context.count_from_open > 6:
self._close(data,'stoploss short greater than open', thres = 0)
if data.close > boll.up:
self._close(data, 'stoploss close short touch top')
def _open(self, bar, type = ''):
# 开空
if self.context.open_switch:
open_price = bar.close - self.context.slippage
if self.context.direction == LONG:
# 开多
open_price = bar.close + self.context.slippage
self.context.open_price = open_price
print(self.context.portfolio.avail_cash)
open_vol = int((self.context.portfolio.avail_cash * self.context.cash_rate) / (open_price * self.context.instmt_info['broker_margin'] * 0.01 * self.context.instmt_info['contract_size']))
print('时间:%d %s 开:%s手 类型:%s' % (self.context.date, self.context.current_bar.end_time,open_vol,type))
self.order(self.context.current_contract[0], self.context.direction, OPEN, open_vol, limit_price=open_price, type = type)
self.context.open_flag = True
self.context.open_vol = open_vol
def _close(self, bar,type='',thres = 0):
print('close type', type)
if type == 'signal close long' or type == 'signal close short':
print('signal close')
self.context.close_count += 1
if self.context.close_count >= 3:
self.__close_base(bar, type)
elif type == 'stoploss close short touch top' or type == 'stoploss close long touch bot':
# print('stoploss')
self.__close_base(bar, type)
else:
close_price = self.context.open_price + thres + self.context.slippage
if self.context.direction == LONG:
# 平多
close_price = self.context.open_price - thres - self.context.slippage
print('时间:%d %s 平:%s 手 类型:%s' % (self.context.date, self.context.current_bar.end_time, self.context.open_vol,type))
self.order(self.context.current_contract[0], self.context.direction, CLOSE, self.context.open_vol,
limit_price=close_price, type=type, max_loss=self.context.max_loss,max_gain=self.context.max_gain)
self.context.open_flag = False
self.context.open_vol = 0
self.context.close_count = 0
self.context.open_price = 0
self.context.direction = ''
self.context.max_loss = 0
self.context.max_gain = 0
self.context.count_from_open = 0
def __close_base(self, bar,type=''):
close_price = bar.close + self.context.slippage
if self.context.direction == LONG:
# 平多
close_price = bar.close - self.context.slippage
if self.context.open_flag:
print('时间:%d %s 平:%s 手 类型:%s' % (self.context.date, self.context.current_bar.end_time, self.context.open_vol,type))
self.order(self.context.current_contract[0], self.context.direction, CLOSE, self.context.open_vol,
limit_price=close_price, type=type, max_loss=self.context.max_loss,max_gain=self.context.max_gain)
self.context.open_flag = False
self.context.open_vol = 0
self.context.close_count = 0
self.context.open_price = 0
self.context.direction = ''
self.context.max_loss = 0
self.context.max_gain = 0
self.context.count_from_open = 0
if __name__ == '__main__':
t = BollStrategyTrend()
t.run()
|
{
"content_hash": "083c57ef1dfa0d09a405c068bdaaac78",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 198,
"avg_line_length": 45.62376237623762,
"alnum_prop": 0.5893012152777778,
"repo_name": "sjsj0101/backtestengine",
"id": "a5a3b90fa50ba94697b96a586c758e9d9e6a03c3",
"size": "9316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backtest/strategys/boll-trend/boll-trend-2.0.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "950"
},
{
"name": "Python",
"bytes": "1995524"
}
],
"symlink_target": ""
}
|
import logging,sys,multiprocessing,time,os,pwd,grp
logger = logging.getLogger(__name__)
from django.db import connections,DEFAULT_DB_ALIAS
from django.db.utils import load_backend
from django.conf import settings
from argo import models,QueueMessage
from common import db_tools
from common import MessageReceiver,Serializer
def CreateWorkingPath(job_id):
path = os.path.join(settings.ARGO_WORK_DIRECTORY,str(job_id))
os.makedirs(path)
return path
class UserJobReceiver(MessageReceiver.MessageReceiver):
''' subscribes to the input user job queue and adds jobs to the database '''
def __init__(self,process_queue = None):
super(UserJobReceiver,self).__init__(
settings.RABBITMQ_USER_JOB_QUEUE_NAME,
settings.RABBITMQ_USER_JOB_ROUTING_KEY,
settings.RABBITMQ_SERVER_NAME,
settings.RABBITMQ_SERVER_PORT,
settings.RABBITMQ_USER_EXCHANGE_NAME,
settings.RABBITMQ_SSL_CERT,
settings.RABBITMQ_SSL_KEY,
settings.RABBITMQ_SSL_CA_CERTS,
)
self.process_queue = process_queue
# This is where the real processing of incoming messages happens
def consume_msg(self,channel,method_frame,header_frame,body):
logger.debug('in consume_msg')
if body is not None:
logger.debug(' received message: ' + body )
# convert body text to ArgoUserJob
try:
userjob = Serializer.deserialize(body)
except Exception,e:
logger.error(' received exception while deserializing message to create ArgoUserJob, \nexception message: ' + str(e) + '\n message body: \n' + body + ' \n cannot continue with this job, ignoring it and moving on.')
# acknoledge message
channel.basic_ack(method_frame.delivery_tag)
return
# create unique DB connection string
try:
db_connection_id = db_tools.get_db_connection_id(os.getpid())
db_backend = load_backend(connections.databases[DEFAULT_DB_ALIAS]['ENGINE'])
db_conn = db_backend.DatabaseWrapper(connections.databases[DEFAULT_DB_ALIAS], db_connection_id)
connections[db_connection_id] = db_conn
except Exception,e:
logger.error(' received exception while creating DB connection, exception message: ' + str(e) + ' \n job id: ' + str(userjob['user_id']) + ' job user: ' + userjob['username'] + ' job description: ' + userjob['description'] + '\n cannot continue with this job, moving on.')
# acknoledge message
channel.basic_ack(method_frame.delivery_tag)
return
# create ArgoJob and initialize it
try:
argojob = models.ArgoJob()
argojob.job_id = models.ArgoJob.generate_job_id()
logger.debug(' created ArgoJob with id: ' + str(argojob.job_id) )
argojob.working_directory = CreateWorkingPath(argojob.job_id)
argojob.user_id = userjob['user_id']
argojob.job_name = userjob['name']
argojob.job_description = userjob['description']
argojob.group_identifier = userjob['group_identifier']
argojob.username = userjob['username']
argojob.email = userjob['email']
argojob.input_url = userjob['input_url']
argojob.output_url = userjob['output_url']
argojob.job_status_routing_key = userjob['job_status_routing_key']
# if there are no subjobs, there isn't anything to do
if len(userjob['subjobs']) == 0:
logger.error(' Job received with no subjobs, failing job and moving on.')
argojob.state_id = models.REJECTED.id
argojob.save()
message = 'Job rejected because there are no subjobs.'
models.send_status_message(job,message)
# acknoledge message
channel.basic_ack(method_frame.delivery_tag)
del connections[db_connection_id]
return
# add subjobs
subjob_pks = []
for usersubjob in userjob['subjobs']:
argosubjob = models.ArgoSubJob()
argosubjob.site = usersubjob['site']
argosubjob.job_id = models.ArgoJob.generate_job_id()
argosubjob.name = usersubjob['name']
argosubjob.description = usersubjob['description']
argosubjob.argo_job_id = argojob.job_id
argosubjob.queue = usersubjob['queue']
argosubjob.project = usersubjob['project']
argosubjob.wall_time_minutes = usersubjob['wall_time_minutes']
argosubjob.num_nodes = usersubjob['num_nodes']
argosubjob.processes_per_node = usersubjob['processes_per_node']
argosubjob.scheduler_config = usersubjob['scheduler_config']
argosubjob.application = usersubjob['application']
argosubjob.config_file = usersubjob['config_file']
argosubjob.input_url = (
settings.GRIDFTP_PROTOCOL +
settings.GRIDFTP_SERVER +
argojob.working_directory
)
argosubjob.output_url = argosubjob.input_url
argosubjob.save()
subjob_pks.append(argosubjob.pk)
argojob.subjob_pk_list = Serializer.serialize(subjob_pks)
argojob.save()
self.process_queue.put(QueueMessage.QueueMessage(argojob.pk,0,'new job received'))
except Exception,e:
message = 'received an exception while parsing the incomping user job. Exception: ' + str(e) + '; userjob id = ' + str(userjob['user_id']) + '; job_id = ' + str(argojob.job_id) + '; job_name = ' + userjob['name']
logger.error(message)
# delete DB connection
del connections[db_connection_id]
logger.debug('added user job')
else:
logger.error('received user job message with no body')
# acknoledge message
channel.basic_ack(method_frame.delivery_tag)
|
{
"content_hash": "8b21de0a2aad40d7a46e7827153e2ce8",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 284,
"avg_line_length": 50.40625,
"alnum_prop": 0.5829200247985121,
"repo_name": "hep-cce/hpc-edge-service",
"id": "4e47770eb741b8b755841a2d2cbb6219a4c76dc8",
"size": "6452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "argo/UserJobReceiver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17780"
},
{
"name": "HTML",
"bytes": "13197"
},
{
"name": "JavaScript",
"bytes": "455848"
},
{
"name": "Python",
"bytes": "306077"
},
{
"name": "Shell",
"bytes": "2825"
}
],
"symlink_target": ""
}
|
import pytest
import jwe
from jwe import validation
from cryptography.exceptions import InvalidTag
class TestValidation:
def test_correct_header(self):
try:
validation.validate_header({
'alg': 'dir',
'enc': 'A256GCM'
})
except Exception as e:
pytest.fail(e)
def test_missing_keys(self):
with pytest.raises(jwe.exceptions.MalformedHeader):
validation.validate_header({})
def test_missing_alg(self):
with pytest.raises(jwe.exceptions.MalformedHeader):
validation.validate_header({'alg': 'dir'})
def test_missing_enc(self):
with pytest.raises(jwe.exceptions.MalformedHeader):
validation.validate_header({'enc': 'A256GCM'})
def test_unsupported_alg(self):
with pytest.raises(jwe.exceptions.UnsupportedAlgorithm):
validation.validate_header({'alg': 'foo', 'enc': 'A256GCM'})
with pytest.raises(jwe.exceptions.UnsupportedAlgorithm):
validation.validate_header({'alg': 'bar', 'enc': 'A256GCM'})
with pytest.raises(jwe.exceptions.UnsupportedAlgorithm):
validation.validate_header({'alg': 'baz', 'enc': 'A256GCM'})
def test_unsupported_enc(self):
with pytest.raises(jwe.exceptions.UnsupportedEncryption):
validation.validate_header({'alg': 'dir', 'enc': 'RSA'})
with pytest.raises(jwe.exceptions.UnsupportedEncryption):
validation.validate_header({'alg': 'dir', 'enc': 'A126'})
with pytest.raises(jwe.exceptions.UnsupportedEncryption):
validation.validate_header({'alg': 'dir', 'enc': 'Base64'})
class TestApi:
def test_encrypt_decrypt(self):
key = jwe.kdf(b'Testing', b'Pepper')
data = b'Just some data'
encrypted = jwe.encrypt(data, key)
assert encrypted != data
assert jwe.decrypt(encrypted, key) == data
def test_improper_key(self):
key = jwe.kdf(b'Testing', b'Pepper')
data = b'Just some data'
encrypted = jwe.encrypt(data, key)
with pytest.raises(InvalidTag):
# TODO make this a custom exception
jwe.decrypt(encrypted, jwe.kdf(b'somekey', b'Salt')) == data
class TestDecryption:
def test_invalid_data(self):
with pytest.raises(jwe.exceptions.MalformedData):
jwe.decrypt(b'junkdata', jwe.kdf(b'key', b'Salt'))
def test_invalid_header_json(self):
with pytest.raises(jwe.exceptions.MalformedData) as e:
jwe.decrypt(
jwe.encrypt(
b'Just Some Data',
jwe.kdf(b'key', b'Salt')
)[3:], # Cut out some of the JSON
jwe.kdf(b'key', b'Salt')
)
assert e.value.args[0] == 'Header is not valid JSON'
def test_no_key_wrapping(self):
data = jwe.encrypt(b'Just Some Data', jwe.kdf(b'key', b'Salt')).split(b'.')
data[1] = b'cmFwcGE='
with pytest.raises(jwe.exceptions.UnsupportedOption) as e:
jwe.decrypt(b'.'.join(data), jwe.kdf(b'key', b'Salt'))
assert e.value.args[0] == 'Key wrapping is currently not supported'
|
{
"content_hash": "bed2d4637df7173b359a55c1652a6573",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 83,
"avg_line_length": 34.191489361702125,
"alnum_prop": 0.6014312383322962,
"repo_name": "chrisseto/pyjwe",
"id": "67df6bfb38ec286b167f4776ef766bc461cd4103",
"size": "3214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pyjwe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8751"
}
],
"symlink_target": ""
}
|
import riffle
import time
import os
import sys
import random
import argparse
import json
import math
import struct
import binascii
parser = argparse.ArgumentParser(description="A script for spamming the Exis node backend with random data")
parser.add_argument('-p','--parser',default='../app/parser.json',help="Location of parser json file",metavar="parser")
parser.add_argument('-f','--frequency_hz',default=1, help="Frequency in hertz of messages sent over the network. Defaults to 10",metavar="frequency_hz")
parser.add_argument('-b','--batch_size',default=5, help="Size of CAN message batches. Defaults to 5",metavar="batch_size")
parser.add_argument('-i','--can_interface',default="can1", help="can interface used to read message from the bus",metavar="can_interface")
parser.add_argument('-l','--backend_location',default="ws://192.168.1.99:9000", help="Location of backend. Defaults to ws://192.168.1.99:9000",metavar="backend")
parser.add_argument('-sb','--spam_bus',default=True, help="Boolean: spam CAN bus on the WCM. Defaults to True",metavar="spam_bus")
args = vars(parser.parse_args())
with open(args['parser']) as parser_file:
parser = json.load(parser_file)
def pick_message():
rand_msg_type = random.choice(parser['msg_type'].keys())
return rand_msg_type, parser['msg_type'][rand_msg_type]
def generate_sid(module):
if module is not 'ALL' and 'from' in parser['SID'][module]:
int_sid = parser['SID'][module]['from']
sid = "{0:0{1}X}".format(int_sid,3)
print(sid)
else:
sid = '000'
return sid
def encode_data(data, bytes):
byte_mask= {
1: 0xFF,
2: 0xFFFF,
3: 0xFFFFFF,
4: 0xFFFFFFFF,
5: 0xFFFFFFFFFF,
6: 0xFFFFFFFFFFFF,
7: 0xFFFFFFFFFFFFFF
}
data = int(data & byte_mask[bytes])
encoded = format(data, 'x')
length = len(encoded)
encoded = encoded.zfill(length+length%2)
print("Data: %s Byte Size: %s length %s" %(encoded ,bytes, len(encoded)))
return encoded
def generate_message():
print("Generating random message")
data_hex = ""
msg_type,msg_spec = pick_message()
while 'cmd' in msg_spec:
msg_type,msg_spec = pick_message()
module = msg_spec['module']
for val in msg_spec['values']:
spec = val[val.keys()[0]]
#print(val.keys()[0])
if 'nominal_high' in spec and 'nominal_low' in spec and 'scalar' in spec and 'byte_size' in spec and str(spec['units']) != 'str':
off = spec['nominal_high'] * .1
high = spec['nominal_high'] + off
low = spec['nominal_low'] - off
byte_size = spec['byte_size']
scalar = spec['scalar']
data = int(random.uniform(low,high)/scalar)
data_hex = data_hex + encode_data(data,byte_size)
else:
data_hex = "00"
sid = generate_sid(module)
print("SID: %s Type: %s Data: %s " %(sid,msg_type,data_hex))
return sid, msg_type, data_hex
class spammer(riffle.Domain):
def onJoin(self):
print("Connected to Exis Node at %s" % args['backend_location'])
sleep_time = math.pow(int(args['frequency_hz']), -1)
while True:
if args['spam_bus']:
sid, msg_type, data = generate_message()
self.publish("cmd", "%s#%s%s" %(sid,msg_type,data))
#subprocess.call('cansend %s %s#%s%s'%(args['can_interface'],sid,msg_type,data) , shell=True)
else:
batch = []
for i in range(0,args['batch_size']):
sid, msg_type, data = generate_message()
batch.append(mock_msg)
#self.publish("can",[[str(current_milli_time()),"100","00","00 00"],[str(current_milli_time()),"100","00","00 00"]]) # Using for tests
wait_time = (1/int(args['frequency_hz']))
time.sleep(sleep_time)
if __name__ == '__main__':
print("Starting exis node spammer")
print(args['backend_location'])
riffle.SetFabric(args['backend_location'])
spammer('xs.node').join()
|
{
"content_hash": "b90a09f5b077001bd54e5751aa208bc3",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 162,
"avg_line_length": 39.628571428571426,
"alnum_prop": 0.596731554914684,
"repo_name": "jonahpelfrey/Badgerloop_Competition_Dashboard",
"id": "5e43c8dc2ef0e8cf16434f47e0a41d220b280de6",
"size": "4161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/spam_exis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "24139"
},
{
"name": "CSS",
"bytes": "72559"
},
{
"name": "HTML",
"bytes": "3713227"
},
{
"name": "JavaScript",
"bytes": "506485"
},
{
"name": "Python",
"bytes": "6157"
}
],
"symlink_target": ""
}
|
import pscheduler
import tempfile
import time
from pschedulerapiserver import application
from flask import request
from flask import send_file
from .dbcursor import dbcursor_query
from .json import *
from .limitproc import *
from .log import log
from .response import *
from .tasks import task_exists
from .util import server_netloc
# These are used by schedule()
CLASS_LEVELS = {
"exclusive": 1,
"normal": 2,
"background": 3,
"background-multi": 4,
"nonstart": 5,
"preempted": 6
}
CHART_COLUMNS = [
"\\n",
"\\nExclusive",
"\\nNormal",
"Background\\nSingle-Result",
"Background\\nMulti-Result",
"Non\\nStarting",
"\\nPreempted"
]
CHART_COLUMNS_COUNT = len(CHART_COLUMNS) - 1
CHART_SCRIPT = """
reset
set terminal {term}
set output "{image_file}"
set timefmt "%Y-%m-%dT%H:%M:%S"
unset xtics
set x2range [0.5:{columns_count}.5]
set x2tics out scale 0 ( {x2tics} )
set ydata time
set ytics out nomirror
set format y "%Y-%m-%d\\n%H:%M:%S"
set yrange [{start}:{end}] reverse
set title "pScheduler Schedule for {host} at {time} / {runs} Runs"
set key off
set grid front noxtics ytics mytics linetype 0
set boxwidth 0.9
set style fill solid border lc rgb "#00e000"
plot "{data_file}" using 1:2:3:2:3 \\
with candlesticks \\
linewidth 1 \\
linecolor rgb "#00e000" \\
axes x2y1
"""
CHART_TERM = {
"png": "png notransparent truecolor size 800,1200 background rgb \"#ffffff\"",
"svg": "svg size 800,1200 dynamic",
# This doesn't get used and is there so the format is considered valid.
"json": "",
# These are for debug and should revert to SVG by default.
"data": "svg size 800,1200 dynamic",
"script": "svg size 800,1200 dynamic",
}
CHART_SCRIPT_X2TICS = ", ".join(
[ '"{0}" {1}'.format(pair[1], pair[0])
for pair in enumerate(CHART_COLUMNS, start=0) ]
)
# Schedule
@application.route("/schedule", methods=['GET'])
def schedule():
try:
range_start = arg_datetime('start')
range_end = arg_datetime('end')
except ValueError:
return bad_request('Invalid start or end time')
try:
task = arg_uuid("task")
except ValueError:
return bad_request('Invalid task UUID')
out_format = arg_string("format") or "json"
if out_format not in CHART_TERM:
return bad_request("Invalid format '%s'" % (out_format))
query = ["""
SELECT
lower(times),
upper(times),
upper(times) - lower(times) AS run_duration,
task,
run,
state_enum,
state_display,
task_json,
task_cli,
test_json,
tool_json,
errors,
priority
FROM schedule
WHERE times && tstzrange(%s, %s, '[)')
"""]
args = [range_start, range_end]
if task is not None:
query.append("AND task = %s")
args.append(task)
if out_format != "json":
query.append("ORDER BY run_duration DESC")
cursor = dbcursor_query(" ".join(query), args)
runs = cursor.rowcount
if out_format == "json":
result = []
base_url = pscheduler.api_url_hostport(server_netloc(), "tasks/")
for row in cursor:
task_href = base_url + row[3]
run_href = "%s/runs/%s" % (task_href, row[4])
run = {
"start-time": pscheduler.datetime_as_iso8601(row[0]),
"end-time": pscheduler.datetime_as_iso8601(row[1]),
"href": run_href,
"result-href": "%s/result" % run_href,
"state": row[5],
"state-display": row[6],
"task": row[7],
"cli": row[8],
"test": row[9],
"tool": row[9],
"errors": row[10],
"priority": row[11]
}
run["task"]["href"] = task_href
result.append(run)
# This is sanitized because it contains data from multiple tasks
return ok_json(result)
else:
try:
data_path = None
image_path = None
(_, data_path) = tempfile.mkstemp()
(_, image_path) = tempfile.mkstemp()
# Data
with open(data_path, "w") as data:
for row in cursor:
sched_class = row[5] if row[5] in [ "nonstart" ] \
else row[9]["scheduling-class"]
line = "%d %s %s\n" % (
CLASS_LEVELS[sched_class],
row[0].isoformat(),
row[1].isoformat()
)
data.write(line)
log.error("LINE %s", line)
data.close()
if out_format == "data":
return send_file(data_path, cache_timeout=1)
# Script
script = CHART_SCRIPT.format(
term=CHART_TERM[out_format],
image_file=image_path,
columns_count=CHART_COLUMNS_COUNT,
x2tics=CHART_SCRIPT_X2TICS,
start='"{}"'.format(range_start.isoformat()) if range_start is not None else "",
end='"{}"'.format(range_end.isoformat()) if range_end is not None else "",
host=server_hostname(),
time=pscheduler.time_now().strftime("%Y-%m-%d %T %Z"),
runs=runs,
data_file=data_path
)
if out_format == "script":
return ok(script)
status, out, err = pscheduler.run_program(["gnuplot"], stdin=script)
if status != 0:
log.error("GNUPlot failed: %s" % (err))
return error("Failed to plot schedule; see system logs.")
return send_file(image_path, cache_timeout=1)
except Exception as ex:
log.exception()
return error("Unable to plot schedule; see system logs. %s" % (ex))
finally:
for target in [ data_path, image_path ]:
if target is not None:
os.unlink(target)
raise RuntimeError("Reached code that should not be reached.")
# Schedule, tuned for use with the monitor
@application.route("/monitor", methods=['GET'])
def monitor():
try:
window_size = arg_cardinal('window')
except ValueError as ex:
return bad_request(str(ex))
try:
cursor = dbcursor_query("""SELECT ppf, lower(times), upper(times), task, run,
state_enum, state_display, task_json,
task_cli, priority FROM schedule_monitor(%s)""",
[window_size])
except Exception as ex:
log.exception()
return error(str(ex))
result = []
base_url = pscheduler.api_url_hostport(server_netloc())
for row in cursor:
task_href = "%s/tasks/%s" % (base_url, row[2])
run_href = "%s/runs/%s" % (task_href, row[3])
run = {
"ppf": row[0],
"start-time": pscheduler.datetime_as_iso8601(row[1]),
"end-time": pscheduler.datetime_as_iso8601(row[2]),
"href": run_href,
"result-href": "%s/result" % run_href,
"state": row[5],
"state-display": row[6],
"task": row[7],
"cli": row[8],
"priority": row[9]
}
run["task"]["href"] = task_href
result.append(run)
# This is sanitized because it contains data from multiple tasks
return ok_json(result)
|
{
"content_hash": "03bebe934039907967775303f8b8b82e",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 96,
"avg_line_length": 27.835125448028673,
"alnum_prop": 0.5207313932526397,
"repo_name": "perfsonar/pscheduler",
"id": "c4548037a4ef4a06d3f447fb9081538133533f3b",
"size": "7796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pscheduler-server/pscheduler-server/api-server/pschedulerapiserver/schedule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2076"
},
{
"name": "Dockerfile",
"bytes": "2027"
},
{
"name": "Jinja",
"bytes": "586"
},
{
"name": "M4",
"bytes": "4638"
},
{
"name": "Makefile",
"bytes": "177025"
},
{
"name": "PLpgSQL",
"bytes": "184547"
},
{
"name": "Perl",
"bytes": "4575"
},
{
"name": "Python",
"bytes": "1866695"
},
{
"name": "Roff",
"bytes": "2379"
},
{
"name": "Shell",
"bytes": "138364"
},
{
"name": "jq",
"bytes": "2680"
},
{
"name": "sed",
"bytes": "27049"
}
],
"symlink_target": ""
}
|
import phoneme_features, utterance_load, os, prosody, pos
from error_messages import SiReError
class Utterance(object):
"""An object representing an utterance."""
#Create an empty utt.
#The proto utterance follows the following conventions:
#It is a dictionary with two keys, id giving the name of the utt,
#and utt containing the actual utt.
#Utt is itself a list of proto_words.
#A proto_word is a dict containing two keys, id giving the name of the word,
#and syllables.
#Syllables is itself a list of proto_syllables.
#A proto_syllable is a dict containing three keys, id giving the name of the syllable,
#stress giving its stress value and phonemes.
#The id of a syllable is expected to be the id of all its contained
#phonemes in order as a string.
#Phonemes is itself a list of proto_phonemes.
#A proto_phoneme is a dict containing 4 keys.
#Id being the type of phoneme, must be a valid phoneme in phoneme set used.
#Start - the start time in HTK style values where 10000 is 1ms.
#End - the end time in HTK style values where 10000 is 1 ms.
#Stress - the stress value of the phoneme - this may be None in case we do not have the information.
def __init__(self, lab, args):
#Make a proto utt from the input
if args.intype == "align_mlf":
proto = utterance_load.proto_from_align_lab(lab)
self.txtloaded = False
elif args.intype == "state_align_mlf":
proto = utterance_load.proto_from_state_align_lab(lab)
self.txtloaded = False
elif args.intype == "hts_mlf":
proto = utterance_load.proto_from_hts_lab(lab, args.state_level)
self.txtloaded = False
elif args.intype == "sire_lab":
#As we need additional information here we check if args contains it
if not hasattr(args, "context_type"):
raise SiReError("You're trying to create an utterance from a SiRe label but have not told what kind of positional context_type was used!")
if not hasattr(args, "HHEd_fix"):
raise SiReError("You're trying to create an utterance from a SiRe label but have not told if HHEd_fix was used to create the labels!")
proto = utterance_load.proto_from_sire_lab(lab, args.context_type, args.HHEd_fix)
self.txtloaded = False
elif args.intype == "txt":
#Check if args has all the necessary elements and insert defaults if not.
if not hasattr(args, 'pron_reduced') or args.pron_reduced == False:
args.pron_reduced = False
args.lm_score_dir = None
args.reduction_level = 1.0
else:
#If we are we need to check if we know enough to do it and fail if we don't.
if not hasattr(args, 'lm_score_dir'):
raise SiReError("You have asked to produce a reduced phonemisation but no path to a directory containing LM word probabilities to base the reduction on.")
if not hasattr(args, 'reduction_level'):
raise SiReError("You have asked to produce a reduced phonemisation but not specified to which degree the sentence should be reduced.")
if not hasattr(args, 'general_sil_phoneme'):
print "Warning! args does not tell if there is a standard silence phoneme! Using default... (\"sil\")"
args.general_sil_phoneme = "sil"
if not hasattr(args, 'comma_is_pause'):
print "Warning! args does not tell if commas should be used as pauses! Using default... (no)"
args.comma_is_pause = False
if not hasattr(args, 'stanford_pcfg_parse'):
print "Warning! args does not tell if we are using stanford parsing! Using default... (no)"
args.stanford_pcfg_parse = False
if args.stanford_pcfg_parse == False:
args.pcfgdict = False
proto = utterance_load.proto_from_txt(lab, args.dictionary, args.general_sil_phoneme, args.comma_is_pause, args.stanford_pcfg_parse, args.pcfgdict, args.pron_reduced, args.lm_score_dir, args.reduction_level)
self.txtloaded = True
else:
raise SiReError("Don't know what to do with intype - {0}".format(args.intype))
#Construct the utt from the proto utt
self.id = proto["id"]
self.phonemes = []
self.syllables = []
self.words = []
#We need to know which phoneme features this utterance is created with.
if hasattr(args, 'dictionary'):
self.phoneme_features = args.dictionary.phoneme_feats
elif hasattr(args, 'phoneme_features'):
self.phoneme_features = args.phoneme_features
else:
raise SiReError("args does not contain either a dictionary or a phoneme featureset!")
s_utt_pos = 0
p_utt_pos = 0
for wi, proto_word in enumerate(proto["utt"]):
p_word_pos = 0
word = Word()
word.load_from_proto(proto_word, wi, p_utt_pos, s_utt_pos, len(proto["utt"]), self)
self.words.append(word)
for si, proto_syll in enumerate(proto_word["syllables"]):
syll = Syllable()
syll.load_from_proto(proto_syll, p_utt_pos, si, s_utt_pos, word, self)
self.syllables.append(syll)
for pi, proto_phone in enumerate(proto_syll["phonemes"]):
phoneme = Phoneme()
phoneme.load_from_proto(proto_phone, pi, p_utt_pos, p_word_pos, syll, word, self)
self.phonemes.append(phoneme)
p_word_pos += 1
p_utt_pos += 1
s_utt_pos += 1
syll.add_phonemes()
word.add_phonemes()
word.add_syllables()
#If we should use the stanford pcfg parse info
if hasattr(args, 'stanford_pcfg_parse') and args.stanford_pcfg_parse:
print "Loading stanford pcfg parse info to utt..."
if args.intype != "txt":
utterance_load.load_txt(self, os.path.join(args.txtdir, self.id+".txt"))
utterance_load.load_stanford_pcfg_parse(self, args.pcfgdict[self.id], args.comma_is_pause)
#If we should use the stanford dependency parse info
if hasattr(args, 'stanford_dependency_parse') and args.stanford_dependency_parse:
print "Loading stanford dependency parse info to utt..."
if args.intype != "txt" and self.txtloaded == False:
utterance_load.load_txt(self, os.path.join(args.txtdir, self.id+".txt"))
utterance_load.load_stanford_dependency_parse(self, args.dependencydict[self.id])
#If we output a Festival context set we should modify the UTT a bit further.
#Right now we use the full festival features as standard, but some operations, like corpus analysis, does not rely on this and it is a nuisance to have the text a requirement so this is still just an option.
if args.festival_features == True:
#We need to know the words
if args.intype != "txt" and self.txtloaded == False:
# print "ID", self.id
utterance_load.load_txt(self, os.path.join(args.txtdir, self.id+".txt"), args.emphasis)
#If we have a pcfg parse we have a proper POS tag mechanism and they have already been added
if not args.stanford_pcfg_parse:
pos.simple_festival_pos_predict(self)
prosody.simple_festival_accent_predict(self)
# #Replacing UH - test!
# if not self.txtloaded:
# utterance_load.load_txt(self, os.path.join(args.txtdir, self.id+".txt"))
# for w in self.words:
# if w.id.lower() in ["uh", "uhu", "um", "uhum"]:
# for p in w.phonemes:
# if p.id in ["@", "V"]:
# p.id = "UHV"
def num_phonemes(self):
return len(self.phonemes)
def num_syllables(self):
return len(self.syllables)
def num_words(self):
return len(self.words)
def num_words_no_pau(self, keep_comma=False):
return len(self.get_words_no_pau(keep_comma))
def num_phonemes_no_pau(self):
return len(self.get_phonemes_no_pau())
#Gets the words without pausing
#Used when comparing to stanford parse etc.
def get_words_no_pau(self, keep_comma=False):
tmp = []
ignore = self.phoneme_features.get_sil_phonemes()
for word in self.words:
if keep_comma == False:
ignore += [","]
if word.id not in ignore:
tmp.append(word)
return tmp
def get_phonemes_no_pau(self):
tmp = []
ignore = self.phoneme_features.get_sil_phonemes()
for p in self.phonemes:
if p.id not in ignore:
tmp.append(p)
return tmp
def num_emph_words(self):
num = 0
for i in xrange(0,len(self.words)):
if self.words[i].id.isupper():
num += 1
# print "Number of emphasised words is:", num
return num
class Phoneme:
"""A class representing a phoneme."""
def __init__(self, p_id=None):
self.id = p_id
def load_from_proto(self, proto_phone, p_syll_pos, p_utt_pos, p_word_pos, syll, word, utt):
self.id = proto_phone["id"]
self.start = proto_phone["start"]
self.end = proto_phone["end"]
self.stress = proto_phone["stress"]
#These work because they are objects and are so passed by reference value.
#I.e. a copy of the reference to the object is what is stored not the full object.
self.parent_syllable = syll
self.parent_word = word
self.parent_utt = utt
if "states" in proto_phone:
self.states = proto_phone["states"]
#These are expensive operations I think.
#But necessary to keep things properly dynamic and has lots of benefits further on
#when merging parses into utterances.
def pos_in_syllable(self):
for i, p in enumerate(self.parent_syllable.phonemes):
if p == self:
return i
raise SiReError("Cannot find self {0} in syll {1}!".format(self.id, self.parent_syllable.id))
def pos_in_word(self):
for i, p in enumerate(self.parent_word.phonemes):
if p == self:
return i
raise SiReError("Cannot find phoneme {0} in word {1}!".format(self.id, self.parent_word.id))
def pos_in_utt(self):
for i, p in enumerate(self.parent_utt.phonemes):
if p == self:
return i
raise SiReError("Cannot find phoneme {0} in utt {1}!".format(self.id, self.parent_utt.id))
def get_feats(self):
return self.parent_utt.phoneme_features.get_phoneme_feats(self.id)
def get_feats_dict(self):
return self.parent_utt.phoneme_features.get_phoneme_feats_dict(self.id)
def get_left_phoneme(self):
pos = self.pos_in_utt()
if pos == 0:
return Phoneme("xx")
else:
return self.parent_utt.phonemes[pos-1]
def get_left_left_phoneme(self):
pos = self.pos_in_utt()
if pos <= 1:
return Phoneme("xx")
else:
return self.parent_utt.phonemes[pos-2]
def get_right_phoneme(self):
pos = self.pos_in_utt()
u_len = self.parent_utt.num_phonemes() - 1
if pos == u_len:
return Phoneme("xx")
else:
return self.parent_utt.phonemes[pos+1]
def get_righ_right_phoneme(self):
pos = self.pos_in_utt()
u_len = self.parent_utt.num_phonemes() - 2
if pos >= u_len:
return Phoneme("xx")
else:
return self.parent_utt.phonemes[pos+2]
#Returns the duration of the phone
#Note that if the utterance was created from text
#then this duration is phony and not valid.
def get_duration(self):
return int(self.end)-int(self.start)
class Syllable:
"""A class representing a syllable."""
#An empty syll.
#Using this can be dangerous if you don't add everything necesary later.
def __init__(self, s_id=None):
self.id = None
def load_from_proto(self, proto_syll, current_phoneme_utt_pos, s_word_pos, s_utt_pos, word, utt):
self.id = proto_syll["id"]
self.stress = proto_syll["stress"]
#These work because they are objects and are so passed by reference value.
#I.e. a copy of the reference to the object is what is stored not the full object.
self.parent_word = word
self.parent_utt = utt
#We initially save the utt pos of each phoneme in the syll
#instead of direct references because of the order we create these.
#These are later added and this list removed.
self.child_phoneme_utt_positions = []
for i in range(len(proto_syll["phonemes"])):
self.child_phoneme_utt_positions.append(i+current_phoneme_utt_pos)
#This will be filled later
self.phonemes = None
self.vowel_id = self.find_vowel(proto_syll, utt.phoneme_features)
#This works because we are passing by reference value.
def add_phonemes(self):
self.phonemes = []
for i in self.child_phoneme_utt_positions:
self.phonemes.append(self.parent_utt.phonemes[i])
del self.child_phoneme_utt_positions
#If a syllable contains more than one vowel this return the first one.
def find_vowel(self, proto_syll, phoneme_features):
for p in proto_syll["phonemes"]:
if phoneme_features.is_vowel(p["id"]):
return p["id"]
return "novowel"
def pos_in_word(self):
for i, p in enumerate(self.parent_word.syllables):
if p == self:
return i
raise SiReError("Cannot find syllable {0} in word {1}!".format(self.id, self.parent_word.id))
def pos_in_utt(self):
for i, p in enumerate(self.parent_utt.syllables):
if p == self:
return i
raise SiReError("Cannot find syllable {0} in utt {1}!".format(self.id, self.parent_utt.id))
def num_phonemes(self):
return len(self.phonemes)
def start_time(self):
return self.phonemes[0].start
def end_time(self):
return self.phonemes[-1].end
def get_vowel_feats(self):
return self.parent_utt.phoneme_features.get_phone_feats(self.vowel_id)
def get_vowel_feats_dict(self):
return self.parent_utt.phoneme_features.get_phone_feats_dict(self.vowel_id)
def get_left_syllable(self):
pos = self.pos_in_utt()
if pos == 0:
return "xx"
else:
return self.parent_utt.syllables[pos-1]
def get_right_syllable(self):
pos = self.pos_in_utt()
u_len = self.parent_utt.num_syllables() - 1
if pos == u_len:
return "xx"
else:
return self.parent_utt.syllables[pos+1]
class Word:
"""A classs representing a word."""
#An empty word with only a name.
#This can go wrong if you don't add everything needed later.
def __init__(self):
self.id = None
def load_from_proto(self, proto_word, word_utt_pos, current_phoneme_utt_pos, current_syll_utt_pos, proto_utt_len, utt):
self.id = proto_word["id"]
self.parent_utt = utt
#We initially save the utt pos of each syllable and phoneme in the word
#instead of direct references because for the order we create these.
#These can later be added, but should not be necessary
#as each phoneme and syll already nows its parent word.
self.child_syllable_utt_positions = []
for i in range(len(proto_word["syllables"])):
self.child_syllable_utt_positions.append(i+current_syll_utt_pos)
self.phonemes = None
self.child_phoneme_utt_positions = []
tmp = 0
for s in proto_word["syllables"]:
tmp += len(s["phonemes"])
for i in range(tmp):
self.child_phoneme_utt_positions.append(i+current_phoneme_utt_pos)
self.syllables = None
# print proto_word["id"]
#This works because we are passing by reference value.
def add_syllables(self):
self.syllables = []
for i in self.child_syllable_utt_positions:
self.syllables.append(self.parent_utt.syllables[i])
del self.child_syllable_utt_positions
#This works because we are passing by reference value.
def add_phonemes(self):
self.phonemes = []
for i in self.child_phoneme_utt_positions:
self.phonemes.append(self.parent_utt.phonemes[i])
del self.child_phoneme_utt_positions
def pos_in_utt(self):
for i, p in enumerate(self.parent_utt.words):
if p == self:
return i
raise SiReError("Cannot find word {0} in utt {1}!".format(self.id, self.parent_utt.id))
def num_syllables(self):
return len(self.syllables)
def num_phonemes(self):
return len(self.phonemes)
def start_time(self):
return self.phonemes[0].start
def end_time(self):
return self.phonemes[-1].end
#Gets the durations of the word.
#NOTE: This will be a phony if utt created from text.
def get_duration(self):
return int(self.phonemes[-1].end) - int(self.phonemes[0].start)
#Returns the previous word in the utterance. Returns "xx" if this is the first word in the utt.
def get_prev_word(self):
if self.pos_in_utt() == 0:
return "xx"
else:
return self.parent_utt.words[self.pos_in_utt() - 1]
#Returns the next word in the utterance. Returns "xx" if this is the last word in the utt.
def get_next_word(self):
#Pos in utt starts from 0, len starts at 1, so add 1
pos = self.pos_in_utt() + 1
if pos == len(self.parent_utt.words):
return "xx"
else:
#We want the next one and have already added one to pos.
return self.parent_utt.words[pos]
#Assigns emphasis to word if original text is capitalised
def get_emph(self):
if self.id.isupper():
return True
# Check if the next word is emphasised
def forward_emph(self):
pos = self.pos_in_utt() + 1
if pos == len(self.parent_utt.words):
return "xx"
else:
if self.parent_utt.words[pos].id.isupper():
return True
# Check if the previous word is emphasised
def backward_emph(self):
if self.pos_in_utt() <= 0:
return "xx"
else:
pos = self.pos_in_utt() - 1
if self.parent_utt.words[pos].id.isupper():
return True
# Count how many words until the next emphasised word
def next_emph(self):
pos = self.pos_in_utt()
for i in xrange(pos+1, len(self.parent_utt.words)+1):
if i == len(self.parent_utt.words):
return "xx"
elif self.parent_utt.words[i].id.isupper():
num = i - pos
return num
# Count how many words until the previous emphasised word
def prev_emph(self):
pos = self.pos_in_utt()
for i in xrange(pos, -1, -1):
if i <= 0 and self.parent_utt.words[i].id.islower():
return "xx"
elif self.parent_utt.words[i].id.isupper():
num = pos - i
if num <= 0:
return "xx"
else:
return num
|
{
"content_hash": "2e997d15ef22289c76a08ab39075c0cb",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 213,
"avg_line_length": 37.98736842105263,
"alnum_prop": 0.6568942584792729,
"repo_name": "RasmusD/SiRe",
"id": "e4b79732bd9c252473d1a217670b6fbf719906d4",
"size": "19170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SiReCore/utterance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "264653"
},
{
"name": "Shell",
"bytes": "9614"
}
],
"symlink_target": ""
}
|
from tastypie.authentication import SessionAuthentication
from tastypie.authorization import DjangoAuthorization
from spa.api.v1.BaseResource import BaseResource
from spa.models.notification import Notification
from spa.models.userprofile import UserProfile
class NotificationResource(BaseResource):
class Meta:
queryset = Notification.objects.order_by('-id')
resource_name = 'notification'
authentication = SessionAuthentication()
authorization = DjangoAuthorization()
always_return_data = True
excludes = ['accepted_date']
def authorized_read_list(self, object_list, bundle):
return object_list.filter(to_user=bundle.request.user)
def dehydrate(self, bundle):
if bundle.obj.from_user is not None:
bundle.data['user_url'] = bundle.obj.from_user.get_absolute_url()
bundle.data['user_image'] = bundle.obj.from_user.get_sized_avatar_image(42, 42)
bundle.data['user_name'] = bundle.obj.from_user.get_nice_name()
else:
bundle.data['user_url'] = "#"
bundle.data['user_image'] = UserProfile.get_default_avatar_image()
bundle.data['user_name'] = UserProfile.get_default_moniker()
return bundle
def alter_list_data_to_serialize(self, request, data):
data['meta']['is_new'] = Notification.objects.filter(to_user=request.user, accepted_date__isnull=True).count()
return data
|
{
"content_hash": "3f70f1c21a2200814a567b98a0971dd4",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 118,
"avg_line_length": 45,
"alnum_prop": 0.67003367003367,
"repo_name": "fergalmoran/dss",
"id": "5562e42ef4680889e675b8e9b387a79b6bf29557",
"size": "1485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spa/api/v1/NotificationResource.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1335630"
},
{
"name": "CoffeeScript",
"bytes": "91082"
},
{
"name": "JavaScript",
"bytes": "3576558"
},
{
"name": "Python",
"bytes": "1543569"
}
],
"symlink_target": ""
}
|
from itertools import chain
import unittest
import numpy as np
import scipy.sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.preprocessing.data import scale
from ParamSklearn.implementations.StandardScaler import StandardScaler
from ParamSklearn.util import get_dataset
matrix1 = [[0, 1, 2],
[0, 1, 2],
[0, 1, 2]]
class TestStandardScaler(unittest.TestCase):
def test_scaler_1d(self):
"""Test scaling of dataset along single axis"""
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# Test with sparse list
X = scipy.sparse.coo_matrix((np.random.random((10,)),
([i**2 for i in range(10)],
[0 for i in range(10)])))
X = X.tocsr()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
self.assertFalse(np.any(np.isnan(X_scaled.data)))
self.assertAlmostEqual(X_scaled.mean(), 0)
self.assertAlmostEqual(np.sqrt(X_scaled.data.var()), 1)
# Check that X has not been copied
# self.assertTrue(X_scaled is X)
# Check that the matrix is still sparse
self.assertEqual(len(X.indices), 10)
def test_scaler_2d_arrays(self):
"""Test scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
self.assertFalse(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
self.assertTrue(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
self.assertTrue(X_scaled_back is not X)
self.assertTrue(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
self.assertFalse(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
self.assertFalse(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
self.assertTrue(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
self.assertFalse(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
self.assertTrue(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
self.assertFalse(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
self.assertTrue(X_scaled is not X)
# Same thing for sparse matrices...
X = scipy.sparse.coo_matrix((np.random.random((12,)),
([i for i in range(12)],
[int(i / 3) for i in range(12)])))
X = X.tocsr()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
self.assertFalse(np.any(np.isnan(X_scaled.data)))
assert_array_almost_equal(
[X_scaled.data[X_scaled.indptr[i]:X_scaled.indptr[i + 1]].mean()
for i in range(X_scaled.shape[1])],
np.zeros((4, ), dtype=np.float64))
assert_array_almost_equal(np.sqrt([
X_scaled.data[X_scaled.indptr[i]:X_scaled.indptr[i + 1]].var()
for i in range(X_scaled.shape[1])]),
np.ones((4, ), dtype=np.float64))
# Because we change the sparse format to csc, we cannot assert that
# the matrix did not change!
# self.assertTrue(X_scaled is X)
# Check that the matrix is still sparse
self.assertEqual(len(X.indices), 12)
# TODO add more tests from scikit-learn here:
# https://github.com/scikit-learn/scikit-learn/blob/0.15.X/sklearn/preprocessing/tests/test_data.py
def test_standard_scaler_sparse_boston_data(self):
X_train, Y_train, X_test, Y_test = get_dataset('boston',
make_sparse=True)
num_data_points = len(X_train.data)
scaler = StandardScaler()
scaler.fit(X_train, Y_train)
tr = scaler.transform(X_train)
# Test this for every single dimension!
means = np.array([tr.data[tr.indptr[i]:tr.indptr[i + 1]].mean()
for i in range(13)])
vars = np.array([tr.data[tr.indptr[i]:tr.indptr[i + 1]].var()
for i in range(13)])
for i in chain(range(1, 3), range(4, 13)):
self.assertAlmostEqual(means[i], 0, 2)
self.assertAlmostEqual(vars[i], 1, 2)
self.assertAlmostEqual(means[3], 1)
self.assertAlmostEqual(vars[3], 0)
# Test that the matrix is still sparse
self.assertTrue(scipy.sparse.issparse(tr))
self.assertEqual(num_data_points, len(tr.data))
|
{
"content_hash": "7a608706c4ec59e9c50f0ba52a96f0cd",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 103,
"avg_line_length": 41.54430379746835,
"alnum_prop": 0.5825716026812919,
"repo_name": "automl/paramsklearn",
"id": "09f9d7fbd74ba398987ce60fb5cb29580c509441",
"size": "6564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/implementations/test_standard_scaler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6722"
},
{
"name": "Makefile",
"bytes": "6796"
},
{
"name": "Python",
"bytes": "560048"
}
],
"symlink_target": ""
}
|
"""Unit test for sysinfo.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import os
import sys
import unittest
import mock
import six
import treadmill
import treadmill.appenv
from treadmill import sysinfo
@unittest.skipUnless(sys.platform.startswith('linux'), 'Requires Linux')
class LinuxSysinfoTest(unittest.TestCase):
"""treadmill.sysinfo test."""
PROC_MEMINFO = """
MemTotal: 7992596 kB
MemFree: 3572940 kB
Buffers: 202564 kB
Cached: 2371108 kB
SwapCached: 0 kB
Active: 2959388 kB
Inactive: 868476 kB
HighTotal: 0 kB
HighFree: 0 kB
LowTotal: 7992596 kB
LowFree: 3572940 kB
SwapTotal: 4064436 kB
SwapFree: 4064436 kB
Dirty: 240 kB
Writeback: 0 kB
AnonPages: 1254148 kB
Mapped: 104244 kB
Slab: 500152 kB
PageTables: 17180 kB
NFS_Unstable: 0 kB
Bounce: 0 kB
CommitLimit: 11257772 kB
Committed_AS: 2268028 kB
VmallocTotal: 34359738367 kB
VmallocUsed: 335508 kB
VmallocChunk: 34359375019 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
Hugepagesize: 2048 kB
"""
CPUINFO = """
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 58
model name : Intel(R) Core(TM) i5-3470 CPU @ 3.20GHz
stepping : 9
cpu MHz : 1600.000
cache size : 6144 KB
physical id : 0
siblings : 4
core id : 0
cpu cores : 4
apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse
bogomips : 6385.66
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management: [8]
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 58
model name : Intel(R) Core(TM) i5-3470 CPU @ 3.20GHz
stepping : 9
cpu MHz : 1600.000
cache size : 6144 KB
physical id : 0
siblings : 4
core id : 1
cpu cores : 4
apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse
bogomips : 6384.64
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management: [8]
processor : 2
vendor_id : GenuineIntel
cpu family : 6
model : 58
model name : Intel(R) Core(TM) i5-3470 CPU @ 3.20GHz
stepping : 9
cpu MHz : 1600.000
cache size : 6144 KB
physical id : 0
siblings : 4
core id : 2
cpu cores : 4
apicid : 4
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse
bogomips : 6385.26
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management: [8]
processor : 3
vendor_id : GenuineIntel
cpu family : 6
model : 58
model name : Intel(R) Core(TM) i5-3470 CPU @ 3.20GHz
stepping : 9
cpu MHz : 1600.000
cache size : 6144 KB
physical id : 0
siblings : 4
core id : 3
cpu cores : 4
apicid : 6
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse
bogomips : 6384.10
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management: [8]
"""
def test_proc_info(self):
"""Proc info test."""
proc_info = sysinfo.proc_info(os.getpid())
self.assertEqual(os.getppid(), proc_info.ppid)
# We do not check the starttime, but just verify that calling
# proc_info twice returns same starttime, which can be used as part of
# process signature.
self.assertEqual(
proc_info.starttime,
sysinfo.proc_info(os.getpid()).starttime
)
@mock.patch('io.open', mock.mock_open(read_data=PROC_MEMINFO.strip()))
def test_mem_info(self):
"""Mock test for mem info."""
meminfo = sysinfo.mem_info()
self.assertEqual(7992596, meminfo.total)
@mock.patch('os.statvfs', mock.Mock())
def test_disk_usage(self):
"""Mock test for disk usage."""
os.statvfs.return_value = collections.namedtuple(
'statvfs',
'f_blocks f_bavail, f_frsize')(100, 20, 4)
du = sysinfo.disk_usage('/var/tmp')
os.statvfs.assert_called_with('/var/tmp')
self.assertEqual(400, du.total)
self.assertEqual(80, du.free)
@mock.patch('treadmill.cgutils.get_cpuset_cores',
mock.Mock(return_value=six.moves.range(0, 4)))
@mock.patch('io.open', mock.mock_open(read_data=CPUINFO.strip()))
def test_bogomips(self):
"""Mock test for mem info."""
bogomips = sysinfo.total_bogomips()
# bogomips : 6385.66
# bogomips : 6384.64
# bogomips : 6385.26
# bogomips : 6384.10
# -------------------
# total : 25539.659999999996
self.assertEqual(25539, bogomips)
@mock.patch('time.time', mock.Mock(return_value=50))
@mock.patch('treadmill.cgroups.get_value',
mock.Mock(return_value=42 * 1024**2))
@mock.patch('treadmill.cgutils.get_cpu_shares',
mock.Mock(return_value=2))
@mock.patch('treadmill.sysinfo.BMIPS_PER_CPU', 1)
@mock.patch('psutil.boot_time', mock.Mock(return_value=8))
def test_node_info(self):
"""Test node information report generation.
"""
# Access protected members
# pylint: disable=W0212
mock_tm_env = mock.Mock(
spec_set=treadmill.appenv.AppEnvironment,
svc_cgroup=mock.Mock(
spec_set=treadmill.services._base_service.ResourceService,
),
svc_localdisk=mock.Mock(
spec_set=treadmill.services._base_service.ResourceService,
),
svc_network=mock.Mock(
spec_set=treadmill.services._base_service.ResourceService,
),
)
mock_tm_env.svc_localdisk.status.return_value = {
'size': 100 * 1024**2,
}
res = sysinfo.node_info(mock_tm_env, 'linux', 'treadmill')
mock_tm_env.svc_localdisk.status.assert_called_with(timeout=30)
mock_tm_env.svc_cgroup.status.assert_called_with(timeout=30)
self.assertEqual(
res,
{
'cpu': '200%', # 100% of 2 cores is available
'memory': '42M', # As read from cgroup
'disk': '100M', # As returned by localdisk service
'up_since': 8,
'network': mock_tm_env.svc_network.status.return_value,
'localdisk': mock_tm_env.svc_localdisk.status.return_value,
}
)
@mock.patch('socket.gethostname', mock.Mock(return_value='foo'))
@mock.patch('socket.getaddrinfo', mock.Mock(return_value=[
(None, None, None, 'foo.bar', None)
]))
def test_hostname(self):
"""Test getting hostname of the server.
"""
self.assertEqual(
sysinfo.hostname(),
'foo.bar'
)
@mock.patch.dict('os.environ', {'TREADMILL_HOSTNAME': 'foo.bar'})
@mock.patch('socket.gethostname', mock.Mock())
@mock.patch('socket.getaddrinfo', mock.Mock())
def test_hostname_env(self):
"""Test getting hostname of the server (TREADMILL_HOSTNAME is set).
"""
self.assertEqual(
sysinfo.hostname(),
'foo.bar'
)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c47e0094c9308a7570ceb9bc4b15af22",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 78,
"avg_line_length": 27.697080291970803,
"alnum_prop": 0.594808275135064,
"repo_name": "Morgan-Stanley/treadmill",
"id": "ee36a1cfab0691349add378ad309e671cb4dfd41",
"size": "7589",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/tests/sysinfo_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3750"
},
{
"name": "Python",
"bytes": "3372983"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "51646"
}
],
"symlink_target": ""
}
|
import os
from world import world
from bigml.api import HTTP_OK
#@step(r'I get the evaluation "(.*)"')
def i_get_the_evaluation(step, evaluation):
resource = world.api.get_evaluation(evaluation)
world.status = resource['code']
assert world.status == HTTP_OK
world.evaluation = resource['object']
|
{
"content_hash": "b4987c9a640aec8dea2cb644ffd2aa68",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 51,
"avg_line_length": 28.545454545454547,
"alnum_prop": 0.7070063694267515,
"repo_name": "xaowoodenfish/python-1",
"id": "36abb890763274a898264a2405f44a72a0476976",
"size": "937",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "bigml/tests/read_evaluation_steps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "691861"
}
],
"symlink_target": ""
}
|
import csv as csv
import numpy as np
#Open up the csv file in to a Python object
csv_file_object = csv.reader(open('data/train.csv', 'rb'))
header = csv_file_object.next() #The next() command just skips the
#first line which is a header
data=[] #Create a variable called 'data'
for row in csv_file_object: #Run through each row in the csv file
data.append(row) #adding each row to the data variable
data = np.array(data) #Then convert from a list to an array
#Be aware that each item is currently
#a string in this format
#The size function counts how many elements are in
#in the array and sum (as you would expects) sums up
#the elements in the array.
number_passengers = np.size(data[0::,0].astype(np.float))
number_survived = np.sum(data[0::,1].astype(np.float))
print "number of passengers : " + str(number_passengers)
print "number of survived : " + str(number_survived)
proportion_survivors = number_survived / number_passengers
print "survived % : " + str(proportion_survivors)
fare_ceiling = 40
data[data[0::,9].astype(np.float) >= fare_ceiling, 8] = fare_ceiling-1.0
fare_bracket_size = 10
number_of_price_brackets = fare_ceiling / fare_bracket_size
number_of_classes = 3 #There were 1st, 2nd and 3rd classes on board
# Define the survival table
survival_table = np.zeros((2, number_of_classes, number_of_price_brackets))
for i in xrange(number_of_classes): #search through each class
for j in xrange(number_of_price_brackets): #search through each price
women_only_stats = data[
(data[0::,4] == "female")
&(data[0::,2].astype(np.float)
== i+1)
&(data[0:,9].astype(np.float)
>= j*fare_bracket_size)
&(data[0:,9].astype(np.float)
< (j+1)*fare_bracket_size)
, 1] #in the 1st col
men_only_stats = data[
(data[0::,4] != "female")
&(data[0::,2].astype(np.float)
== i+1)
&(data[0:,9].astype(np.float)
>= j*fare_bracket_size)
&(data[0:,9].astype(np.float)
< (j+1)*fare_bracket_size)
, 1]
survival_table[0,i,j] = np.mean(women_only_stats.astype(np.float)) #Women stats
survival_table[1,i,j] = np.mean(men_only_stats.astype(np.float)) #Men stats
survival_table[ survival_table != survival_table ] = 0
survival_table[ survival_table < 0.5 ] = 0
survival_table[ survival_table >= 0.5 ] = 1
test_file_obect = csv.reader(open('data/test.csv', 'rb'))
fname = "data/genderclasspricebasedmodelpy.csv"
open_file_object = csv.writer(open(fname, "wb"))
header = test_file_obect.next()
for row in test_file_obect: #we are going to loop
#through each passenger in the test set
for j in xrange(number_of_price_brackets): #For each passenger we
#loop thro each price bin
try: #Some passengers have no
#price data so try to make
row[8] = float(row[8]) # a float
except: #If fails: no data, so
bin_fare = 3-float(row[1]) #bin the fare according class
break #Break from the bin loop
if row[8] > fare_ceiling: #If there is data see if
#it is greater than fare
#ceiling we set earlier
bin_fare = number_of_price_brackets-1 #If so set to highest bin
break #And then break bin loop
if row[8] >= j*fare_bracket_size and row[8] < (j+1)*fare_bracket_size:
#If passed these tests
#then loop through each
#bin
bin_fare = j #then assign index
break
print bin_fare
if row[3] == 'female': #If the passenger is female
row.insert(0, #at element 0, insert
int(survival_table[0,float(row[1])-1, #the prediction from
bin_fare])) #Insert the prediciton #survival table
open_file_object.writerow(row) #And write out row
else:
row.insert(0,
int(survival_table[1,float(row[1])-1,
bin_fare]))
open_file_object.writerow(row)
|
{
"content_hash": "7c260c2ea9d4179e17f5a8125301f9e4",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 106,
"avg_line_length": 53.38383838383838,
"alnum_prop": 0.47473982970671713,
"repo_name": "saran87/machine-learning",
"id": "0c86f43e83ce0dcdaa0283821aebdc145bc4223d",
"size": "5563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Titanic/predict_class.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "14137"
},
{
"name": "Python",
"bytes": "26958"
},
{
"name": "Shell",
"bytes": "5646"
}
],
"symlink_target": ""
}
|
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
import timezone_field.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AudienceLevel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
],
options={
'verbose_name': 'Audience Level',
'verbose_name_plural': 'Audience Levels',
},
),
migrations.CreateModel(
name='Conference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('start', models.DateTimeField(blank=True, null=True, verbose_name='start')),
('end', models.DateTimeField(blank=True, null=True, verbose_name='end')),
('name', models.CharField(max_length=100, verbose_name='name')),
('code', models.CharField(max_length=10, unique=True, verbose_name='code')),
('timezone', timezone_field.fields.TimeZoneField()),
],
options={
'verbose_name': 'Conference',
'verbose_name_plural': 'Conferences',
},
),
migrations.CreateModel(
name='Deadline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.DateTimeField(blank=True, null=True, verbose_name='start')),
('end', models.DateTimeField(blank=True, null=True, verbose_name='end')),
('name', models.CharField(blank=True, default='', max_length=100, verbose_name='name')),
('type', models.CharField(choices=[('cfp', 'Call for proposal'), ('voting', 'Voting'), ('refund', 'Ticket refund'), ('custom', 'Custom deadline')], max_length=10, verbose_name='type')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Duration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='name')),
('duration', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1)], verbose_name='duration')),
('notes', models.TextField(blank=True, verbose_name='notes')),
],
options={
'verbose_name': 'Duration',
'verbose_name_plural': 'Durations',
},
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
],
options={
'verbose_name': 'Topic',
'verbose_name_plural': 'Topics',
},
),
migrations.CreateModel(
name='TicketFare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('start', models.DateTimeField(blank=True, null=True, verbose_name='start')),
('end', models.DateTimeField(blank=True, null=True, verbose_name='end')),
('code', models.CharField(max_length=10, verbose_name='code')),
('name', models.CharField(max_length=100, verbose_name='name')),
('description', models.TextField(verbose_name='description')),
('price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='price')),
('conference', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ticket_fares', to='conferences.Conference', verbose_name='conference')),
],
options={
'verbose_name': 'Ticket Fare',
'verbose_name_plural': 'Ticket fares',
},
),
]
|
{
"content_hash": "cc84e88c15f6b555aae63347cd4c2c1e",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 201,
"avg_line_length": 50.01980198019802,
"alnum_prop": 0.5671021377672208,
"repo_name": "patrick91/pycon",
"id": "fb7882f4b9dfe485d3463f3fbbad6efdf59f4cd4",
"size": "5101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/conferences/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1456"
},
{
"name": "Python",
"bytes": "13911"
}
],
"symlink_target": ""
}
|
import os
import re
import socket
import sys
import time
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.utils import setup_logger, RedirectHandler
from django_extensions.management.technical_response import null_technical_500_response
try:
if 'django.contrib.staticfiles' in settings.INSTALLED_APPS:
from django.contrib.staticfiles.handlers import StaticFilesHandler
USE_STATICFILES = True
elif 'staticfiles' in settings.INSTALLED_APPS:
from staticfiles.handlers import StaticFilesHandler # noqa
USE_STATICFILES = True
else:
USE_STATICFILES = False
except ImportError:
USE_STATICFILES = False
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
DEFAULT_PORT = "8000"
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use a IPv6 address.'),
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--browser', action='store_true', dest='open_browser',
help='Tells Django to open a browser.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
make_option('--threaded', action='store_true', dest='threaded',
help='Run in multithreaded mode.'),
make_option('--output', dest='output_file', default=None,
help='Specifies an output file to send a copy of all messages (not flushed immediately).'),
make_option('--print-sql', action='store_true', default=False,
help="Print SQL queries as they're executed"),
make_option('--cert', dest='cert_path', action="store", type="string",
help='To use SSL, specify certificate path.'),
)
if USE_STATICFILES:
option_list += (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport='', *args, **options):
import django
setup_logger(logger, self.stderr, filename=options.get('output_file', None)) # , fmt="[%(name)s] %(message)s")
logredirect = RedirectHandler(__name__)
# Redirect werkzeug log items
werklogger = logging.getLogger('werkzeug')
werklogger.setLevel(logging.INFO)
werklogger.addHandler(logredirect)
werklogger.propagate = False
if options.get("print_sql", False):
try:
# Django 1.7 onwards
from django.db.backends import utils
except ImportError:
# Django 1.6 below
from django.db.backends import util as utils
try:
import sqlparse
except ImportError:
sqlparse = None # noqa
class PrintQueryWrapper(utils.CursorDebugWrapper):
def execute(self, sql, params=()):
starttime = time.time()
try:
return self.cursor.execute(sql, params)
finally:
raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params)
execution_time = time.time() - starttime
therest = ' -- [Execution time: %.6fs] [Database: %s]' % (execution_time, self.db.alias)
if sqlparse:
logger.info(sqlparse.format(raw_sql, reindent=True) + therest)
else:
logger.info(raw_sql + therest)
utils.CursorDebugWrapper = PrintQueryWrapper
try:
from django.core.servers.basehttp import AdminMediaHandler
USE_ADMINMEDIAHANDLER = True
except ImportError:
USE_ADMINMEDIAHANDLER = False
try:
from django.core.servers.basehttp import get_internal_wsgi_application as WSGIHandler
except ImportError:
from django.core.handlers.wsgi import WSGIHandler # noqa
try:
from werkzeug import run_simple, DebuggedApplication
# Set colored output
if settings.DEBUG:
try:
set_werkzeug_log_color()
except: # We are dealing with some internals, anything could go wrong
print("Wrapping internal werkzeug logger for color highlighting has failed!")
pass
except ImportError:
raise CommandError("Werkzeug is required to use runserver_plus. Please visit http://werkzeug.pocoo.org/ or install via pip. (pip install Werkzeug)")
# usurp django's handler
from django.views import debug
debug.technical_500_response = null_technical_500_response
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not addrport:
try:
addrport = settings.RUNSERVERPLUS_SERVER_ADDRESS_PORT
except AttributeError:
pass
if not addrport:
self.addr = ''
self.port = DEFAULT_PORT
else:
m = re.match(naiveip_re, addrport)
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % addrport)
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." %
self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.'
% self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
threaded = options.get('threaded', False)
use_reloader = options.get('use_reloader', True)
open_browser = options.get('open_browser', False)
cert_path = options.get("cert_path")
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
bind_url = "http://%s:%s/" % (
self.addr if not self._raw_ipv6 else '[%s]' % self.addr, self.port)
def inner_run():
print("Validating models...")
self.validate(display_num_errors=True)
print("\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE))
print("Development server is running at %s" % (bind_url,))
print("Using the Werkzeug debugger (http://werkzeug.pocoo.org/)")
print("Quit the server with %s." % quit_command)
path = options.get('admin_media_path', '')
if not path:
admin_media_path = os.path.join(django.__path__[0], 'contrib/admin/static/admin')
if os.path.isdir(admin_media_path):
path = admin_media_path
else:
path = os.path.join(django.__path__[0], 'contrib/admin/media')
handler = WSGIHandler()
if USE_ADMINMEDIAHANDLER:
handler = AdminMediaHandler(handler, path)
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
handler = StaticFilesHandler(handler)
if open_browser:
import webbrowser
webbrowser.open(bind_url)
if cert_path:
"""
OpenSSL is needed for SSL support.
This will make flakes8 throw warning since OpenSSL is not used
directly, alas, this is the only way to show meaningful error
messages. See:
http://lucumr.pocoo.org/2011/9/21/python-import-blackbox/
for more information on python imports.
"""
try:
import OpenSSL # NOQA
except ImportError:
raise CommandError("Python OpenSSL Library is "
"required to use runserver_plus with ssl support. "
"Install via pip (pip install pyOpenSSL).")
dir_path, cert_file = os.path.split(cert_path)
if not dir_path:
dir_path = os.getcwd()
root, ext = os.path.splitext(cert_file)
certfile = os.path.join(dir_path, root + ".crt")
keyfile = os.path.join(dir_path, root + ".key")
try:
from werkzeug.serving import make_ssl_devcert
if os.path.exists(certfile) and \
os.path.exists(keyfile):
ssl_context = (certfile, keyfile)
else: # Create cert, key files ourselves.
ssl_context = make_ssl_devcert(
os.path.join(dir_path, root), host='localhost')
except ImportError:
print("Werkzeug version is less than 0.9, trying adhoc certificate.")
ssl_context = "adhoc"
else:
ssl_context = None
run_simple(
self.addr,
int(self.port),
DebuggedApplication(handler, True),
use_reloader=use_reloader,
use_debugger=True,
threaded=threaded,
ssl_context=ssl_context
)
inner_run()
def set_werkzeug_log_color():
"""Try to set color to the werkzeug log.
"""
from django.core.management.color import color_style
from werkzeug.serving import WSGIRequestHandler
from werkzeug._internal import _log
_style = color_style()
_orig_log = WSGIRequestHandler.log
def werk_log(self, type, message, *args):
try:
msg = '%s - - [%s] %s' % (
self.address_string(),
self.log_date_time_string(),
message % args,
)
http_code = str(args[1])
except:
return _orig_log(type, message, *args)
# Utilize terminal colors, if available
if http_code[0] == '2':
# Put 2XX first, since it should be the common case
msg = _style.HTTP_SUCCESS(msg)
elif http_code[0] == '1':
msg = _style.HTTP_INFO(msg)
elif http_code == '304':
msg = _style.HTTP_NOT_MODIFIED(msg)
elif http_code[0] == '3':
msg = _style.HTTP_REDIRECT(msg)
elif http_code == '404':
msg = _style.HTTP_NOT_FOUND(msg)
elif http_code[0] == '4':
msg = _style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = _style.HTTP_SERVER_ERROR(msg)
_log(type, msg)
WSGIRequestHandler.log = werk_log
|
{
"content_hash": "cd44c0312acd50eb9626ba182f8445c7",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 161,
"avg_line_length": 41.91638795986622,
"alnum_prop": 0.5466368786403893,
"repo_name": "WillisXChen/django-oscar",
"id": "b00bd95e729a77186a0147ddbcf96a20f8d81dfa",
"size": "12533",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/django_extensions/management/commands/runserver_plus.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "78"
},
{
"name": "C",
"bytes": "5979"
},
{
"name": "C++",
"bytes": "572"
},
{
"name": "CSS",
"bytes": "694578"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Groff",
"bytes": "21346"
},
{
"name": "HTML",
"bytes": "708061"
},
{
"name": "JavaScript",
"bytes": "1433937"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Makefile",
"bytes": "6656"
},
{
"name": "Python",
"bytes": "47548581"
},
{
"name": "Shell",
"bytes": "6790"
},
{
"name": "Smarty",
"bytes": "21023"
},
{
"name": "TeX",
"bytes": "56837"
},
{
"name": "XSLT",
"bytes": "24882"
}
],
"symlink_target": ""
}
|
from collections import Sequence, Iterator
class PagedIterator(Iterator):
def __init__(self, parent):
self._parent = parent
self._index = -1
self._len = len(parent)
def __iter__(self):
return self
def next(self):
self._index += 1
if self._index == self._len:
raise StopIteration
try:
return self._parent[self._index]
except IndexError:
raise StopIteration
class UnpagedData(object):
def copy(self):
return self.__class__()
def __mul__(self, other):
return (self.copy() for a in range(other))
def __rmul__(self, other):
return (self.copy() for a in range(other))
class PagedList(Sequence):
"""
List-like object, with support for automatically grabbing
additional pages from a data source.
"""
_iter_class = None
def __iter__(self):
if self._iter_class is None:
self._iter_class = type(self.__class__.__name__ + 'Iterator',
(PagedIterator,), {})
return self._iter_class(self)
def __len__(self):
try:
return self._len
except:
return len(self._data)
def __init__(self, iterable, pagesize=20):
self._data = list(iterable)
self._pagesize = pagesize
def __getitem__(self, index):
if isinstance(index, slice):
return [self[x] for x in xrange(*index.indices(len(self)))]
if index >= len(self):
raise IndexError("list index outside range")
if (index >= len(self._data)) \
or isinstance(self._data[index], UnpagedData):
self._populatepage(index/self._pagesize + 1)
return self._data[index]
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
def __contains__(self, item):
raise NotImplementedError
def _populatepage(self, page):
pagestart = (page-1) * self._pagesize
if len(self._data) < pagestart:
self._data.extend(UnpagedData()*(pagestart-len(self._data)))
if len(self._data) == pagestart:
self._data.extend(self._getpage(page))
else:
for data in self._getpage(page):
self._data[pagestart] = data
pagestart += 1
def _getpage(self, page):
raise NotImplementedError("PagedList._getpage() must be provided " +
"by subclass")
class PagedRequest(PagedList):
"""
Derived PageList that provides a list-like object with automatic
paging intended for use with search requests.
"""
def __init__(self, request, handler=None):
self._request = request
if handler:
self._handler = handler
super(PagedRequest, self).__init__(self._getpage(1), 20)
def _getpage(self, page):
req = self._request.new(page=page)
res = req.readJSON()
self._len = res['total_results']
for item in res['results']:
if item is None:
yield None
else:
yield self._handler(item)
|
{
"content_hash": "0af5d9a423229785ef6c616f884b8ed0",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 76,
"avg_line_length": 29.17117117117117,
"alnum_prop": 0.5546633724521309,
"repo_name": "naveenvhegde/pytmdb3",
"id": "3d99010063facfce4df778fd6e0c68be60d973de",
"size": "3453",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tmdb3/pager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "111996"
}
],
"symlink_target": ""
}
|
import sys
import os
import argparse
import inspect
sys.path.append( os.environ['CMSSW_BASE']+"/src/" )
from CMSSW_Utils.Utils_Types.pluginStdVector import * ## For stl containers
from CMSSW_Utils.Utils_Types.CmdParser import * ## For advanced cmd parser class
from CMSSW_Utils.Utils_Functions.utils_string import * ## For additional string parsing
from TstarAnalysis.CombineAnalysis.pluginCombineMgr import * ## For self written C++ objects
class CombineStatus():
NONE = 0
INIT_PLOT = 1
INIT_SAMPLE = 2
INIT_CHANNEL = 3
MADE_BASE_PLOTS = 4
class CombineCmd( CmdParser ):
prompt = 'tstar> '
intro = 'Session for running the tstar combination analysis'
def __init__(self):
self.my_Combine = CombineMgr()
self.recommended_mode = ['Asymptotic']
self.state = CombineStatus.NONE
#-------------------------------------------------------------------------------
# Initialization commands
#-------------------------------------------------------------------------------
def InitFileOpts(self):
initfile_opts = argparse.ArgumentParser( description='Read single initialization file')
initfile_opts.add_argument('mode', type=str, choices = self.CompleteCombineFunc('Init'),
help = "Setting to initialize" )
initfile_opts.add_argument('initfile', type=file,
help='Initialization file to use')
return initfile_opts
def do_Initialize(self,line):
try:
options = self.InitFileOpts().parse_args(line.split())
except:
print "Error parsing arguments!", line
return CmdExecStatus.OPTION_ERROR
if options.mode == 'Samples' and self.state < CombineStatus.INIT_PLOT :
print "Error! Plot has not yet been initialized!"
return CmdExecStatus.EXECUTE_FAIL
elif options.mode == 'Channels' and self.state < CombineStatus.INIT_SAMPLE :
print "Error! Samples has not yet been initialized!"
return CmdExecStatus.EXECUTE_FAIL
attr = getattr( self.my_Combine , "Init" + options.mode );
attr(options.initfile.name)
if options.mode == 'Plots':
self.state = CombineStatus.INIT_PLOT
elif options.mode == 'Samples':
self.state = CombineStatus.INIT_SAMPLE
else:
self.state = CombineStatus.INIT_CHANNEL
print "Done!"
return CmdExecStatus.EXECUTE_SUCESS
def help_Initialize(self):
self.InitFileOpts().print_help()
pass
def complete_Initialize(self,text,line,begidx,endidx):
wordidx = WordPositionAtIndex(line,begidx)
if wordidx == 1 :
return self.CompleteCombineFunc('Init',text)
else:
return self.PathComplete(line,begidx,endidx)
#-------------------------------------------------------------------------------
# Channel initialization commands
#-------------------------------------------------------------------------------
def ChannelInitOpts(self):
channel_init_opts = argparse.ArgumentParser( description = 'Passing single input file to channel for initialization' )
channel_init_opts.add_argument('Mode' , type=str , choices = self.CompleteCombineFunc('SetChannel'),
help = "Initialization mode" )
channel_init_opts.add_argument('Channel' , type=str , choices = self.CompleteChannel(),
help = "Channel to initialize" )
channel_init_opts.add_argument('file' , type=file , help = 'Input file')
return channel_init_opts
def do_SetChannel(self,line):
if self.state < CombineStatus.INIT_CHANNEL:
print "Error! You need to complete initialize the channels first!"
return CmdExecStatus.EXECUTE_SUCESS
try:
options = self.ChannelInitOpts().parse_args( line.split() )
except:
print "Error parsing arguments!", line
return CmdExecStatus.OPTION_ERROR
attr = getattr( self.my_Combine , "SetChannel" + options.Mode )
attr( options.Channel, options.file.name )
return CmdExecStatus.EXECUTE_SUCESS
def help_SetChannel(self):
print "Defining aspects of a channel from a input file"
self.ChannelInitOpts().print_help()
pass
def complete_SetChannel(self,text,line,begidx,endidx):
wordidx = WordPositionAtIndex(line, begidx )
if wordidx == 1 :
return self.CompleteCombineFunc( "SetChannel" , text )
elif wordidx == 2 :
return self.CompleteChannel( text )
else:
return self.PathComplete(line,begidx,endidx)
#-------------------------------------------------------------------------------
# Validation plots making commands
#-------------------------------------------------------------------------------
def do_MakeBasicPlots( self, line ):
if self.state < CombineStatus.INIT_CHANNEL:
print "Error! You need to initialize the channels first!"
return CmdExecStatus.EXECUTE_FAIL
self.my_Combine.MakeBasicPlots()
self.state = CombineStatus.MADE_BASE_PLOTS
return CmdExecStatus.EXECUTE_SUCESS
def DataBgOpts(self):
databg_opts = argparse.ArgumentParser(description = 'Making a single data/MC background comparison plot' )
databg_opts.add_argument( "channel" , type=str, choices = self.my_Combine.AvailableChannels() ,
help='Channel to run' )
databg_opts.add_argument( "plot" , type=str, choices = self.my_Combine.AvailablePlots(),
help='Variable to plot' )
return databg_opts
def do_MakeDataBGPlot( self, line ):
if self.state < CombineStatus.MADE_BASE_PLOTS :
print "Error! You need to run MakeBasicPlots first!"
return CmdExecStatus.EXECUTE_FAIL
try:
options = self.DataBgOpts().parse_args( line.split() )
except:
return CmdExecStatus.OPTION_ERROR
self.my_Combine.MakeDataBGPlot( options.channel , options.plot )
return CmdExecStatus.EXECUTE_SUCESS
def help_MakeDataBGPlot(self):
self.DataBgOpts().print_help()
pass
def complete_MakeDataBGPlot(self,text,line,begidx,endidx):
wordidx = WordPositionAtIndex(line,begidx)
if wordidx == 1 :
return self.CompleteChannel(text)
elif wordidx == 2:
return self.CompletePlot(text)
else:
return []
#-------------------------------------------------------------------------------
# Higgs Combine commands
#-------------------------------------------------------------------------------
def SingleChannelOpts(self):
sigch_opts = argparse.ArgumentParser(description = 'Receives a single channel as argument' )
sigch_opts.add_argument('channel', type=str, choices = self.my_Combine.AvailableChannels() )
return sigch_opts
def do_HC_MakeRequirements(self,line):
if self.state < CombineStatus.MADE_BASE_PLOTS:
print "Error! You need to run MakeBasicPlots first!"
return CmdExecStatus.EXECUTE_FAIL
try:
options = self.SingleChannelOpts().parse_args(line.split())
except:
return CmdExecStatus.OPTION_ERROR
self.my_Combine.HC_MakeRequirements( options.channel )
return CmdExecStatus.EXECUTE_SUCESS
def help_HC_MakeRequirements(self):
self.SingleChannelOpts().print_help()
pass
def complete_HC_MakeRequirements(self,text,line,begidx,endidx):
wordidx = WordPositionAtIndex(line,begidx)
if wordidx == 1 :
return self.CompleteChannel(text)
else:
return []
def RunCombineOpts(self):
runcomb_opts = argparse.ArgumentParser(description = 'Receives channel and higg combine run options' )
runcomb_opts.add_argument('channel',type=str, choices = self.my_Combine.AvailableChannels() ,
help = 'Channel to run with' )
runcomb_opts.add_argument('mode', type=str, choices = self.recommended_mode ,
help = 'Allow modes for running higgs combine package' )
return runcomb_opts
def do_HC_RunCombine(self,line):
if self.state < CombineStatus.INIT_CHANNEL :
print "Error! You need to initialized the channels first!"
return CmdExecStatus.EXECUTE_FAIL
try:
options = self.RunCombineOpts().parse_args( line.split() )
except:
print "Error parsing errors!"
return CmdExecStatus.OPTION_ERROR
self.my_Combine.HC_RunCombine( options.channel , options.mode )
return CmdExecStatus.OPTION_ERROR
def help_HC_RunCombine(self):
self.RunCombineOpts().print_help()
pass
def complete_HC_RunCombine(self,text,line,begidx,endidx):
wordidx = WordPositionAtIndex(line,begidx)
if wordidx == 1 :
return self.CompleteChannel(text)
elif wordidx == 2:
ans = []
for method in self.recommended_mode:
if method.startswith(method):
ans.append(method)
return ans;
else:
return []
def do_HC_PlotLimit(self,line):
if self.state < CombineStatus.INIT_CHANNEL :
print "Error! You need to initialized the channels first!"
return CmdExecStatus.EXECUTE_FAIL
try:
options = self.RunCombineOpts().parse_args( line.split() )
except:
return CmdExecStatus.OPTION_ERROR
self.my_Combine.HC_PlotLimit( options.channel , options.mode )
return CmdExecStatus.OPTION_ERROR
def help_HC_PlotLimit(self):
self.RunCombineOpts().print_help()
pass
def complete_HC_PlotLimit(self,text,line,begidx,endidx):
return self.complete_HC_RunCombine(text,line,begidx,endidx)
#-------------------------------------------------------------------------------
# Listing commands
#-------------------------------------------------------------------------------
def do_ListAvailablePlots(self,line):
for plot in self.my_Combine.AvailablePlots():
print plot
return CmdExecStatus.EXECUTE_SUCESS
def do_ListAvailableSamples(self,line):
for sample in self.my_Combine.AvailableSamples():
print sample
return CmdExecStatus.EXECUTE_SUCESS
def do_ListAvailableChannels(self,line):
for channel in self.my_Combine.AvailableChannels():
print channel
return CmdExecStatus.EXECUTE_SUCESS
#-------------------------------------------------------------------------------
# Completion helper functions
#-------------------------------------------------------------------------------
def CompleteCombineFunc( self, prefix , text='' ):
ans = []
mytext = text.lstrip().rstrip()
for name,stuff in inspect.getmembers( CombineMgr , predicate=inspect.ismethod ):
if name.startswith( prefix+mytext ):
ans.append(name[len(prefix):])
return ans;
def CompleteChannel( self, text='' ):
ans = []
for channel in self.my_Combine.AvailableChannels() :
if channel.startswith(text):
ans.append(channel)
return ans;
def CompletePlot( self, text='' ):
ans = []
for plot in self.my_Combine.AvailablePlots() :
if plot.startswith(text):
ans.append(plot)
return ans;
if __name__ == "__main__":
myCmd = CombineCmd()
myCmd.cmdloop()
|
{
"content_hash": "d629a04d109484b015d0e4bce85811c6",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 124,
"avg_line_length": 38.62244897959184,
"alnum_prop": 0.5971818582122413,
"repo_name": "enochnotsocool/TstarAnalysis_in_CMS",
"id": "12a064d904b2e2e8c5517222612d37ffbaeba0ba",
"size": "11671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CombineAnalysis/python/CombineCmd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AGS Script",
"bytes": "6931"
},
{
"name": "C",
"bytes": "479"
},
{
"name": "C++",
"bytes": "198119"
},
{
"name": "Python",
"bytes": "54090"
},
{
"name": "Shell",
"bytes": "22301"
},
{
"name": "TeX",
"bytes": "5287"
}
],
"symlink_target": ""
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "choropleth.unselected"
_path_str = "choropleth.unselected.marker"
_valid_props = {"opacity"}
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity of unselected points, applied only when
a selection exists.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
"""
def __init__(self, arg=None, opacity=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choropleth.unselected.Marker`
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choropleth.unselected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choropleth.unselected.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
{
"content_hash": "4631855ffa3ec2c33b5ef22c49a4db8e",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 82,
"avg_line_length": 27.95098039215686,
"alnum_prop": 0.5222728867064188,
"repo_name": "plotly/plotly.py",
"id": "5b1f9b9c4c327df08cc6830198a9017bf845ed82",
"size": "2851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/choropleth/unselected/_marker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from scrapy.spider import Spider
from scrapy.selector import Selector
from my_settings import name_file, test_mode, difference_days
from datetime import datetime, timedelta
print "Run spider NewenglandFilm"
file_output = open(name_file, 'a')
email_current_session = []
email_in_file = open(name_file, 'r').readlines()
if test_mode:
current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y')
else:
current_date = datetime.today().strftime('%m/%d/%Y')
class NewenglandFilm(Spider):
name = 'newenglandfilm'
allowed_domains = ["newenglandfilm.com"]
start_urls = ["http://newenglandfilm.com/jobs.htm"]
def parse(self, response):
sel = Selector(response)
for num_div in xrange(1, 31):
date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0]
email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})')
if current_date == date:
for address in email:
if address + "\n" not in email_in_file and address not in email_current_session:
file_output.write(address + "\n")
email_current_session.append(address)
print "Spider: NewenglandFilm. Email {0} added to file".format(address)
else:
print "Spider: NewenglandFilm. Email {0} already in the file".format(address)
|
{
"content_hash": "56cfb76e1500b95b398cecb449646b58",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 138,
"avg_line_length": 43.916666666666664,
"alnum_prop": 0.5863377609108159,
"repo_name": "dcondrey/scrapy-spiders",
"id": "773895fc34e701280227c2f7d83dcbb9ec6231fb",
"size": "1625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dist/spiders/newenglandfilm.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44855"
}
],
"symlink_target": ""
}
|
# Copyright 2020 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
"""
An example of configuring SNMP alert for HPE iLO systems
Usage: python <script>.py <ilo_addr> <ilo_user> <ilo_pass>
"""
import sys
import json
from redfish import RedfishClient
from redfish.rest.v1 import ServerDownOrUnreachableError
from ilorest_util import get_resource_directory
from ilorest_util import get_gen
def configure_snmp(_redfishobj, read_communities, snmp_alertdestinations, DISABLE_RESOURCE_DIR):
snmp_service_uri = None
resource_instances = get_resource_directory(_redfishobj)
if DISABLE_RESOURCE_DIR or not resource_instances:
#if we do not have a resource directory or want to force it's non use to find the
#relevant URI
managers_uri = _redfishobj.root.obj['Managers']['@odata.id']
managers_response = _redfishobj.get(managers_uri)
managers_members_uri = next(iter(managers_response.obj['Members']))['@odata.id']
managers_members_response = _redfishobj.get(managers_members_uri)
snmp_service_uri = managers_members_response.obj.Oem.Hpe.Links['Snmp']['@odata.id']
else:
for instance in resource_instances:
#Use Resource directory to find the relevant URI
if '#HpeiLOSnmpService.' in instance['@odata.type']:
snmp_service_uri = instance['@odata.id']
if snmp_service_uri:
#body = {"AlertsEnabled": snmp_alerts, "ReadCommunities": read_communities}
body = {"AlertDestinations": snmp_alertdestinations}
resp = _redfishobj.patch(snmp_service_uri, body)
#If iLO responds with soemthing outside of 200 or 201 then lets check the iLO extended info
#error message to see what went wrong
if resp.status == 400:
try:
print(json.dumps(resp.obj['error']['@Message.ExtendedInfo'], indent=4, \
sort_keys=True))
except Exception as excp:
sys.stderr.write("A response error occurred, unable to access iLO Extended "\
"Message Info...")
elif resp.status != 200:
sys.stderr.write("An http response of \'%s\' was returned.\n" % resp.status)
else:
print("Success!\n")
print(json.dumps(resp.dict, indent=4, sort_keys=True))
def set_snmp_alert_destination(_redfishobj, snmp_service_uri, alert_destination_list):
data = _redfishobj.get(snmp_service_uri)
if data.dict.get("AlertDestinations"):
resp = _redfishobj.patch(snmp_service_uri, {'AlertDestinations': alert_destination_list})
else:
raise Exception("\'AlertDestinations\' property is not available/modifyable.\n")
#If iLO responds with soemthing outside of 200 or 201 then lets check the iLO extended info
#error message to see what went wrong
if resp.status == 400:
try:
print(json.dumps(resp.obj['error']['@Message.ExtendedInfo'], indent=4, sort_keys=True))
except Exception as excp:
sys.stderr.write("A response error occurred, unable to access iLO Extended " "Message Info...")
elif resp.status != 200:
sys.stderr.write("An http response of \'%s\' was returned.\n" % resp.status)
else:
print("\nPatch operation successful!\n\nResponse:")
print(json.dumps(resp.dict, indent=4, sort_keys=True))
snmp_service_response = _redfishobj.get(snmp_service_uri).dict.get('AlertDestinations')
print("\n\nPrinting updated SNMP alert destination:\n")
print(json.dumps(snmp_service_response, indent=4, sort_keys=True))
if __name__ == "__main__":
# When running on the server locally use the following commented values
#SYSTEM_URL = None
#LOGIN_ACCOUNT = None
#LOGIN_PASSWORD = None
# When running remotely connect using the secured (https://) address,
# account name, and password to send https requests
# SYSTEM_URL acceptable examples:
# "https://10.0.0.100"
# "https://ilo.hostname"
#SYSTEM_URL = "https://15.146.46.45"
#LOGIN_ACCOUNT = "admin"
#LOGIN_PASSWORD = "admin123"
SYSTEM_URL = sys.argv[1]
LOGIN_ACCOUNT = sys.argv[2]
LOGIN_PASSWORD = sys.argv[3]
#Properties:
#read communities array
READ_COMMUNITIES = ["public", "", ""]
#alerts_enabled primitive (boolean)
#ALERTS_ENABLED = True
#Alert Destination
ALERTS_DESTINATION = ["1.1.1.1","2.2.2.2"]
# flag to force disable resource directory. Resource directory and associated operations are
# intended for HPE servers.
DISABLE_RESOURCE_DIR = False
snmp_service_uri = "/redfish/v1/Managers/1/SNMPService/"
# Number of max alert destination supported on iLO4 and iLO5 is different
alert_destination_list = ["ILOCN771702NJ", "15.146.46.55" , "15.146.46.58"]
try:
# Create a Redfish client object
REDFISHOBJ = RedfishClient(base_url=SYSTEM_URL, username=LOGIN_ACCOUNT, \
password=LOGIN_PASSWORD)
# Login with the Redfish client
REDFISHOBJ.login()
except ServerDownOrUnreachableError as excp:
sys.stderr.write("ERROR: server not reachable or does not support RedFish.\n")
sys.exit()
(ilogen,_) = get_gen(REDFISHOBJ)
print ("Generation is ", ilogen)
if int(ilogen) == 5:
configure_snmp(REDFISHOBJ, READ_COMMUNITIES, ALERTS_DESTINATION, DISABLE_RESOURCE_DIR)
else:
set_snmp_alert_destination(REDFISHOBJ, snmp_service_uri, alert_destination_list)
REDFISHOBJ.logout()
|
{
"content_hash": "8ddb6d979fc4440a1f4ff12be2e41015",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 164,
"avg_line_length": 45.80434782608695,
"alnum_prop": 0.646733111849391,
"repo_name": "HewlettPackard/python-ilorest-library",
"id": "1b584257c4f061fad0065603165f6c19b90803e9",
"size": "6321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/Redfish/set_snmp_alert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "160"
},
{
"name": "Makefile",
"bytes": "13995"
},
{
"name": "PowerShell",
"bytes": "3135"
},
{
"name": "Python",
"bytes": "352505"
},
{
"name": "Shell",
"bytes": "119"
}
],
"symlink_target": ""
}
|
from django.template import loader
class BaseRenderer:
def generate_body(self, intermediate):
body = []
for item in intermediate:
for key, values in item.items():
body.append(self.render_item(key, values).strip())
return '\n'.join(body)
def render_item(self, key, body):
if key == "text":
return body["raw"]
elif key == "betty":
return self.render(self.BETTY_TEMPLATE, body)
elif key == "facebook":
return self.render(self.FACEBOOK_TEMPLATE, body)
elif key == "imgur":
return self.render(self.IMGUR_TEMPLATE, body)
elif key == "instagram":
return self.render(self.INSTAGRAM_TEMPLATE, body)
elif key == "onion_video":
return self.render(self.ONION_VIDEO_TEMPLATE, body)
elif key == "soundcloud":
return self.render(self.SOUNDCLOUD_TEMPLATE, body)
elif key == "twitter":
return self.render(self.TWITTER_TEMPLATE, body)
elif key == "vimeo":
return self.render(self.VIMEO_TEMPLATE, body)
elif key == "youtube":
return self.render(self.YOUTUBE_TEMPLATE, body)
else:
raise Exception("Key not implemented")
def render(self, template, body):
return loader.render_to_string(
template,
body
)
class InstantArticleRenderer(BaseRenderer):
BETTY_TEMPLATE = "instant_article/embeds/_ia_betty_embed.html"
FACEBOOK_TEMPLATE = "instant_article/embeds/_ia_facebook_embed.html"
IMGUR_TEMPLATE = "instant_article/embeds/_ia_imgur_embed.html"
INSTAGRAM_TEMPLATE = "instant_article/embeds/_ia_instagram_embed.html"
ONION_VIDEO_TEMPLATE = "instant_article/embeds/_ia_onion_video_embed.html"
SOUNDCLOUD_TEMPLATE = "instant_article/embeds/_ia_soundcloud_embed.html"
TWITTER_TEMPLATE = "instant_article/embeds/_ia_twitter_embed.html"
VIMEO_TEMPLATE = "instant_article/embeds/_ia_vimeo_embed.html"
YOUTUBE_TEMPLATE = "instant_article/embeds/_ia_youtube_embed.html"
|
{
"content_hash": "a46c4c49f1706c1b8b7344c1d1739942",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 39,
"alnum_prop": 0.630579297245964,
"repo_name": "theonion/django-bulbs",
"id": "27fb11aaa6b6a822b071f67e4173e7b4c0444223",
"size": "2106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bulbs/instant_articles/renderer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36651"
},
{
"name": "HTML",
"bytes": "73968"
},
{
"name": "JavaScript",
"bytes": "57288"
},
{
"name": "Python",
"bytes": "1055540"
},
{
"name": "Ruby",
"bytes": "397"
},
{
"name": "Shell",
"bytes": "1629"
}
],
"symlink_target": ""
}
|
"""The command-line entry point for combining images into a single image."""
from framer import combine_vertical_from_iterable
from PIL import Image
import argparse
import os
def main():
"""Combine images vertically given file names on the command line."""
parser = argparse.ArgumentParser(description='''Make a vertical image from
a series of images. They better have the same dimensions...''')
parser.add_argument('images', nargs='+',
help='''The file paths of the images to combine.''')
parser.add_argument('-n', '--name',
help=""""The resultant image's file name. Defaults to
the name of the first image plus '_vertical'""")
parser.add_argument('-d', '--delete', action='store_true',
help='Set this flag to delete the component images.')
parser.add_argument('-t', '--type', choices=['png', 'jpg'], default='png',
help='The file type of the snapshots. ')
args = parser.parse_args()
result = combine_vertical_from_iterable(args.images)
if args.delete:
for image in args.images:
os.remove(image)
fmt = args.type if args.type is not None \
else Image.open(args.images[0]).format
name = args.name if args.name is not None \
else os.path.splitext(args.images[0])[0] + \
'_vertical.' + fmt
result.save(name, format=args.type)
if __name__ == '__main__':
main()
|
{
"content_hash": "fcb19de5c8cb40a88c5b4c080ef65a36",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 79,
"avg_line_length": 42.44444444444444,
"alnum_prop": 0.5909685863874345,
"repo_name": "vr2262/framer",
"id": "19ef126d17e1daea8ae2d5d9f3e3a8b86d25f207",
"size": "1551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/make_vertical.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "786105"
}
],
"symlink_target": ""
}
|
import argparse
import os
from math import sqrt
from PIL import Image
import numpy as np
parser = argparse.ArgumentParser(
description='converts gray scale raw image to png image',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('infile', nargs='+', help='input filename')
parser.add_argument('--width', type=int)
parser.add_argument('--height', type=int)
args = parser.parse_args()
for file in args.infile:
with open(file, 'rb') as fd:
raw_data = fd.read()
if args.width is not None:
width = args.width
height = args.height
else:
height = width = int(sqrt(len(raw_data)))
if height * width == len(raw_data):
img = Image.frombytes('L', (width, height), raw_data)
else:
height = width = int(sqrt(len(raw_data) / 3))
assert(height * width * 3 == len(raw_data))
raw_data = np.frombuffer(raw_data, dtype='ubyte')
raw_data = np.reshape(raw_data, (3, width, height))
img = Image.fromarray(np.transpose(raw_data, (1, 2, 0)), 'RGB')
filename, ext = os.path.splitext(file)
img.save(filename + '.png')
|
{
"content_hash": "9ddff35fd62f0d1cfb8af1db1116a537",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 71,
"avg_line_length": 31.72222222222222,
"alnum_prop": 0.6409807355516638,
"repo_name": "silverneko/HWs",
"id": "43c71a7c9ed45e03ff6419c8fb776e3bf7a70ace",
"size": "1166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digital-image-processing/hw4/convert_raw.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "11314045"
},
{
"name": "Batchfile",
"bytes": "16268"
},
{
"name": "C",
"bytes": "20037807"
},
{
"name": "C++",
"bytes": "14668867"
},
{
"name": "CMake",
"bytes": "259362"
},
{
"name": "CSS",
"bytes": "3339"
},
{
"name": "Cuda",
"bytes": "26137"
},
{
"name": "Emacs Lisp",
"bytes": "9440"
},
{
"name": "Go",
"bytes": "133381"
},
{
"name": "Haxe",
"bytes": "140982"
},
{
"name": "LLVM",
"bytes": "42152280"
},
{
"name": "Lex",
"bytes": "17360"
},
{
"name": "M4",
"bytes": "102755"
},
{
"name": "Makefile",
"bytes": "289041"
},
{
"name": "Matlab",
"bytes": "3289"
},
{
"name": "OCaml",
"bytes": "392667"
},
{
"name": "Objective-C",
"bytes": "145449"
},
{
"name": "Perl",
"bytes": "43307"
},
{
"name": "Python",
"bytes": "596622"
},
{
"name": "Roff",
"bytes": "18916"
},
{
"name": "Scilab",
"bytes": "105758"
},
{
"name": "Shell",
"bytes": "211792"
},
{
"name": "Vim script",
"bytes": "16523"
},
{
"name": "Yacc",
"bytes": "102681"
}
],
"symlink_target": ""
}
|
import threading
import logging
import websocket
class WebsocketListener:
def __init__(self, websocket_link, on_message_callback, notifications=None):
self.websocket_link = websocket_link
self.on_message_callback = on_message_callback
self.notifications = notifications
self.closed = True
def on_error(self, ws, error):
logging.error("A websocket error occurred on websocket '{0}':".format(
self.websocket_link))
logging.error(error)
def on_close(self, ws):
self.closed = True
print( "The websocket with link '{0}' was closed.".format(
self.websocket_link))
def start(self):
self.closed = False
websocket.enableTrace(True)
ws = websocket.WebSocketApp(
self.websocket_link,
on_error=self.on_error,
on_close=self.on_close,
on_message=self.on_message_callback)
ws_thread = threading.Thread(target = ws.run_forever)
ws_thread.start()
self.ws = ws
self.ws_thread = ws_thread
def stop(self):
self.closed = True
self.ws.keep_running = False
|
{
"content_hash": "f0532c3dcccfbe7f2878c465c7ab4f77",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 80,
"avg_line_length": 29.974358974358974,
"alnum_prop": 0.613344739093242,
"repo_name": "Fortunate-MAN/PulseMonitor",
"id": "99e959c8c078a9c12ba9af7db93826757d81628b",
"size": "1311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source/WebsocketListener.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "567"
},
{
"name": "Python",
"bytes": "11359"
},
{
"name": "Shell",
"bytes": "264"
}
],
"symlink_target": ""
}
|
"""
Summary
=======
Walks through your blog root figuring out all the available monthly
archives in your blogs. It generates html with this information and
stores it in the ``$(archivelinks)`` variable which you can use in
your head and foot templates.
Install
=======
This plugin comes with douglas. To install, do the following:
1. Add ``douglas.plugins.pyarchives`` to the ``load_plugins`` list
in your ``config.py`` file.
2. Configure using the following configuration variables.
``archive_template``
Let's you change the format of the output for an archive link.
For example::
py['archive_template'] = ('<li><a href="%(base_url)s/%(Y)s/%(b)s">'
'%(m)s/%(y)s</a></li>')
This displays the archives as list items, with a month number,
then a slash, then the year number.
The formatting variables available in the ``archive_template``
are::
b 'Jun'
m '6'
Y '1978'
y '78'
These work the same as ``time.strftime`` in python.
Additionally, you can use variables from config and data.
.. Note::
The syntax used here is the Python string formatting
syntax---not the douglas template rendering syntax!
Usage
=====
Add ``$(archivelinks)`` to your head and/or foot templates.
"""
__description__ = "Builds month/year-based archives listing."
__category__ = "archives"
__license__ = "MIT"
from douglas import tools
from douglas.memcache import memcache_decorator
from douglas.tools import pwrap
import time
def verify_installation(cfg):
if not "archive_template" in cfg:
pwrap(
"missing optional config property 'archive_template' which "
"allows you to specify how the archive links are created. "
"refer to pyarchive plugin documentation for more details.")
return True
class PyblArchives(object):
def __init__(self, request):
self._request = request
self._archives = None
@memcache_decorator('pyarchives', True)
def __str__(self):
if self._archives == None:
self.gen_linear_archive()
return self._archives
def gen_linear_archive(self):
config = self._request.get_configuration()
data = self._request.get_data()
root = config["datadir"]
archives = {}
archive_list = tools.get_entries(config, root)
fulldict = {}
fulldict.update(config)
fulldict.update(data)
template = config.get('archive_template',
'<a href="%(base_url)s/%(Y)s/%(b)s">%(Y)s-%(b)s</a><br />')
for mem in archive_list:
timetuple = tools.filestat(self._request, mem)
timedict = {}
for x in ["B", "b", "m", "Y", "y"]:
timedict[x] = time.strftime("%" + x, timetuple)
fulldict.update(timedict)
if not (timedict['Y'] + timedict['m']) in archives:
archives[timedict['Y'] + timedict['m']] = (template % fulldict)
result = []
for key in sorted(archives.keys(), reverse=True):
result.append(archives[key])
self._archives = '\n'.join(result)
def cb_prepare(args):
request = args["request"]
data = request.get_data()
data["archivelinks"] = PyblArchives(request)
|
{
"content_hash": "9a65131ebab82f3d5198138ad2ed79e3",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 28,
"alnum_prop": 0.6014880952380952,
"repo_name": "willkg/douglas",
"id": "f3ee560ec91531f68a21ecca52d4b362c8dc95de",
"size": "3360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "douglas/plugins/archives.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24"
},
{
"name": "HTML",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "239841"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ExponentformatValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="exponentformat", parent_name="layout.scene.xaxis", **kwargs
):
super(ExponentformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["none", "e", "E", "power", "SI", "B"]),
**kwargs,
)
|
{
"content_hash": "3075d391b151e43c91d8d57fac51967a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 86,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.5984848484848485,
"repo_name": "plotly/plotly.py",
"id": "52ef25e76dddfa433db7519a77941cb5c28301af",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/scene/xaxis/_exponentformat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""
Django settings for demo_project project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'axe8g-m^k08=t0!7f+awtew$0vgto&_5r7659bjg#)y7lprz5('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# django autocomplete light
'dal',
'dal_select2',
'dal_admin_filters',
# django stuff
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# project apps
'notes',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'demo_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demo_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
DEFAULT_AUTO_FIELD='django.db.models.AutoField'
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "2ce1dac13c17047e54813d88799292ff",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 91,
"avg_line_length": 25.527131782945737,
"alnum_prop": 0.6838748861220771,
"repo_name": "shamanu4/dal_admin_filters",
"id": "94eebb675eff6b59cafc0f99628ecfcfcc33c9a4",
"size": "3293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo_project/demo_project/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67"
},
{
"name": "HTML",
"bytes": "649"
},
{
"name": "JavaScript",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "12663"
}
],
"symlink_target": ""
}
|
import code
import cpp_util
from model import Platforms
from schema_util import CapitalizeFirstLetter
from schema_util import JsFunctionNameToClassName
import json
import os
import re
def _RemoveDescriptions(node):
"""Returns a copy of |schema| with "description" fields removed.
"""
if isinstance(node, dict):
result = {}
for key, value in node.items():
# Some schemas actually have properties called "description", so only
# remove descriptions that have string values.
if key == 'description' and isinstance(value, basestring):
continue
result[key] = _RemoveDescriptions(value)
return result
if isinstance(node, list):
return [_RemoveDescriptions(v) for v in node]
return node
class CppBundleGenerator(object):
"""This class contains methods to generate code based on multiple schemas.
"""
def __init__(self,
root,
model,
api_defs,
cpp_type_generator,
cpp_namespace,
source_file_dir,
impl_dir):
self._root = root
self._model = model
self._api_defs = api_defs
self._cpp_type_generator = cpp_type_generator
self._cpp_namespace = cpp_namespace
self._source_file_dir = source_file_dir
self._impl_dir = impl_dir
self.api_cc_generator = _APICCGenerator(self)
self.api_h_generator = _APIHGenerator(self)
self.schemas_cc_generator = _SchemasCCGenerator(self)
self.schemas_h_generator = _SchemasHGenerator(self)
def _GenerateHeader(self, file_base, body_code):
"""Generates a code.Code object for a header file
Parameters:
- |file_base| - the base of the filename, e.g. 'foo' (for 'foo.h')
- |body_code| - the code to put in between the multiple inclusion guards"""
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append(cpp_util.GENERATED_BUNDLE_FILE_MESSAGE % self._source_file_dir)
ifndef_name = cpp_util.GenerateIfndefName(self._source_file_dir, file_base)
c.Append()
c.Append('#ifndef %s' % ifndef_name)
c.Append('#define %s' % ifndef_name)
c.Append()
c.Concat(body_code)
c.Append()
c.Append('#endif // %s' % ifndef_name)
c.Append()
return c
def _GetPlatformIfdefs(self, model_object):
"""Generates the "defined" conditional for an #if check if |model_object|
has platform restrictions. Returns None if there are no restrictions.
"""
if model_object.platforms is None:
return None
ifdefs = []
for platform in model_object.platforms:
if platform == Platforms.CHROMEOS:
ifdefs.append('defined(OS_CHROMEOS)')
elif platform == Platforms.LINUX:
ifdefs.append('defined(OS_LINUX)')
elif platform == Platforms.MAC:
ifdefs.append('defined(OS_MACOSX)')
elif platform == Platforms.WIN:
ifdefs.append('defined(OS_WIN)')
else:
raise ValueError("Unsupported platform ifdef: %s" % platform.name)
return ' || '.join(ifdefs)
def _GenerateRegisterFunctions(self, namespace_name, function):
c = code.Code()
function_ifdefs = self._GetPlatformIfdefs(function)
if function_ifdefs is not None:
c.Append("#if %s" % function_ifdefs, indent_level=0)
function_name = JsFunctionNameToClassName(namespace_name, function.name)
c.Append("registry->RegisterFunction<%sFunction>();" % (
function_name))
if function_ifdefs is not None:
c.Append("#endif // %s" % function_ifdefs, indent_level=0)
return c
def _GenerateFunctionRegistryRegisterAll(self):
c = code.Code()
c.Append('// static')
c.Sblock('void GeneratedFunctionRegistry::RegisterAll('
'ExtensionFunctionRegistry* registry) {')
for namespace in self._model.namespaces.values():
namespace_ifdefs = self._GetPlatformIfdefs(namespace)
if namespace_ifdefs is not None:
c.Append("#if %s" % namespace_ifdefs, indent_level=0)
namespace_name = CapitalizeFirstLetter(namespace.name.replace(
"experimental.", ""))
for function in namespace.functions.values():
if function.nocompile:
continue
c.Concat(self._GenerateRegisterFunctions(namespace.name, function))
for type_ in namespace.types.values():
for function in type_.functions.values():
if function.nocompile:
continue
namespace_types_name = JsFunctionNameToClassName(
namespace.name, type_.name)
c.Concat(self._GenerateRegisterFunctions(namespace_types_name,
function))
if namespace_ifdefs is not None:
c.Append("#endif // %s" % namespace_ifdefs, indent_level=0)
c.Eblock("}")
return c
class _APIHGenerator(object):
"""Generates the header for API registration / declaration"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append('#include <string>')
c.Append()
c.Append('#include "base/basictypes.h"')
c.Append()
c.Append("class ExtensionFunctionRegistry;")
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('class GeneratedFunctionRegistry {')
c.Sblock(' public:')
c.Append('static void RegisterAll('
'ExtensionFunctionRegistry* registry);')
c.Eblock('};')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
return self._bundle._GenerateHeader('generated_api', c)
class _APICCGenerator(object):
"""Generates a code.Code object for the generated API .cc file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(self._bundle._source_file_dir,
'generated_api.h')))
c.Append()
for namespace in self._bundle._model.namespaces.values():
namespace_name = namespace.unix_name.replace("experimental_", "")
implementation_header = namespace.compiler_options.get(
"implemented_in",
"%s/%s/%s_api.h" % (self._bundle._impl_dir,
namespace_name,
namespace_name))
if not os.path.exists(
os.path.join(self._bundle._root,
os.path.normpath(implementation_header))):
if "implemented_in" in namespace.compiler_options:
raise ValueError('Header file for namespace "%s" specified in '
'compiler_options not found: %s' %
(namespace.unix_name, implementation_header))
continue
ifdefs = self._bundle._GetPlatformIfdefs(namespace)
if ifdefs is not None:
c.Append("#if %s" % ifdefs, indent_level=0)
c.Append('#include "%s"' % implementation_header)
if ifdefs is not None:
c.Append("#endif // %s" % ifdefs, indent_level=0)
c.Append()
c.Append('#include '
'"extensions/browser/extension_function_registry.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Concat(self._bundle._GenerateFunctionRegistryRegisterAll())
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
c.Append()
return c
class _SchemasHGenerator(object):
"""Generates a code.Code object for the generated schemas .h file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append('#include <map>')
c.Append('#include <string>')
c.Append()
c.Append('#include "base/strings/string_piece.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('class GeneratedSchemas {')
c.Sblock(' public:')
c.Append('// Determines if schema named |name| is generated.')
c.Append('static bool IsGenerated(std::string name);')
c.Append()
c.Append('// Gets the API schema named |name|.')
c.Append('static base::StringPiece Get(const std::string& name);')
c.Eblock('};')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
return self._bundle._GenerateHeader('generated_schemas', c)
def _FormatNameAsConstant(name):
"""Formats a name to be a C++ constant of the form kConstantName"""
name = '%s%s' % (name[0].upper(), name[1:])
return 'k%s' % re.sub('_[a-z]',
lambda m: m.group(0)[1].upper(),
name.replace('.', '_'))
class _SchemasCCGenerator(object):
"""Generates a code.Code object for the generated schemas .cc file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(self._bundle._source_file_dir,
'generated_schemas.h')))
c.Append()
c.Append('#include "base/lazy_instance.h"')
c.Append()
c.Append('namespace {')
for api in self._bundle._api_defs:
namespace = self._bundle._model.namespaces[api.get('namespace')]
# JSON parsing code expects lists of schemas, so dump a singleton list.
json_content = json.dumps([_RemoveDescriptions(api)],
separators=(',', ':'))
# Escape all double-quotes and backslashes. For this to output a valid
# JSON C string, we need to escape \ and ". Note that some schemas are
# too large to compile on windows. Split the JSON up into several
# strings, since apparently that helps.
max_length = 8192
segments = [json_content[i:i + max_length].replace('\\', '\\\\')
.replace('"', '\\"')
for i in xrange(0, len(json_content), max_length)]
c.Append('const char %s[] = "%s";' %
(_FormatNameAsConstant(namespace.name), '" "'.join(segments)))
c.Append('}')
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Sblock('struct Static {')
c.Sblock('Static() {')
for api in self._bundle._api_defs:
namespace = self._bundle._model.namespaces[api.get('namespace')]
c.Append('schemas["%s"] = %s;' % (namespace.name,
_FormatNameAsConstant(namespace.name)))
c.Eblock('}')
c.Append()
c.Append('std::map<std::string, const char*> schemas;')
c.Eblock('};')
c.Append()
c.Append('base::LazyInstance<Static> g_lazy_instance;')
c.Append()
c.Append('// static')
c.Sblock('base::StringPiece GeneratedSchemas::Get('
'const std::string& name) {')
c.Append('return IsGenerated(name) ? '
'g_lazy_instance.Get().schemas[name] : "";')
c.Eblock('}')
c.Append()
c.Append('// static')
c.Sblock('bool GeneratedSchemas::IsGenerated(std::string name) {')
c.Append('return g_lazy_instance.Get().schemas.count(name) > 0;')
c.Eblock('}')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
c.Append()
return c
|
{
"content_hash": "0de5c41f932c12eccc96105310264188",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 79,
"avg_line_length": 36.36538461538461,
"alnum_prop": 0.6178388859509959,
"repo_name": "chromium2014/src",
"id": "91ae920c32d79b31b49423f983c92c2078b5ba34",
"size": "11513",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tools/json_schema_compiler/cpp_bundle_generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1889381"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "39993418"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "220757674"
},
{
"name": "CSS",
"bytes": "973910"
},
{
"name": "Java",
"bytes": "6583410"
},
{
"name": "JavaScript",
"bytes": "20967999"
},
{
"name": "Mercury",
"bytes": "9480"
},
{
"name": "Objective-C",
"bytes": "943237"
},
{
"name": "Objective-C++",
"bytes": "7190130"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "674461"
},
{
"name": "Python",
"bytes": "10430892"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1337040"
},
{
"name": "Standard ML",
"bytes": "3705"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class CartelisandsConfig(AppConfig):
name = 'cartelisands'
|
{
"content_hash": "c08cd23d569af8fbfe9b8b6049015b6a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 36,
"avg_line_length": 19.8,
"alnum_prop": 0.7777777777777778,
"repo_name": "austing/cartels",
"id": "dc25ea432f0df38d88dc3c0dea73ffdad9bb3d5d",
"size": "99",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cartelisands/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1"
},
{
"name": "HTML",
"bytes": "176010"
},
{
"name": "Python",
"bytes": "14976"
},
{
"name": "Ruby",
"bytes": "7281"
},
{
"name": "Shell",
"bytes": "350"
}
],
"symlink_target": ""
}
|
from thefuck.utils import for_app
@for_app('man', at_least=1)
def match(command):
return True
def get_new_command(command):
if '3' in command.script:
return command.script.replace("3", "2")
if '2' in command.script:
return command.script.replace("2", "3")
split_cmd2 = command.script_parts
split_cmd3 = split_cmd2[:]
split_cmd2.insert(1, ' 2 ')
split_cmd3.insert(1, ' 3 ')
return ["".join(split_cmd3), "".join(split_cmd2)]
|
{
"content_hash": "53758578d867a2d8e4decbf9f164a9da",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 53,
"avg_line_length": 22.761904761904763,
"alnum_prop": 0.6192468619246861,
"repo_name": "PLNech/thefuck",
"id": "ead1361b088c45cd270a722cc9efd68198e67aab",
"size": "478",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "thefuck/rules/man.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "267150"
},
{
"name": "Shell",
"bytes": "1525"
}
],
"symlink_target": ""
}
|
import time
import mock
import unittest
from nose.tools import * # noqa
import webtest
import furl
import itsdangerous
from modularodm import storage
from framework.auth import signing
from framework.auth.core import Auth
from framework.exceptions import HTTPError
from framework.sessions.model import Session
from framework.mongo import set_up_storage
from website import settings
from website.util import api_url_for, rubeus
from website.addons.base import exceptions, GuidFile
from website.project import new_private_link
from website.project.views.node import _view_project as serialize_node
from website.addons.base import AddonConfig, AddonNodeSettingsBase, views
from website.addons.github.model import AddonGitHubOauthSettings
from tests.base import OsfTestCase
from tests.factories import AuthUserFactory, ProjectFactory
from website.addons.github.exceptions import ApiError
class DummyGuidFile(GuidFile):
file_name = 'foo.md'
name = 'bar.md'
@property
def provider(self):
return 'dummy'
@property
def version_identifier(self):
return 'versionidentifier'
@property
def unique_identifier(self):
return 'dummyid'
@property
def waterbutler_path(self):
return '/path/to/file/'
def enrich(self):
pass
class TestAddonConfig(unittest.TestCase):
def setUp(self):
self.addon_config = AddonConfig(
short_name='test', full_name='test', owners=['node'],
added_to={'node': False}, categories=[],
settings_model=AddonNodeSettingsBase,
)
def test_static_url_relative(self):
url = self.addon_config._static_url('foo')
assert_equal(
url,
'/static/addons/test/foo'
)
def test_deleted_defaults_to_false(self):
class MyAddonSettings(AddonNodeSettingsBase):
pass
config = MyAddonSettings()
assert_is(config.deleted, False)
def test_static_url_absolute(self):
url = self.addon_config._static_url('/foo')
assert_equal(
url,
'/foo'
)
class SetEnvironMiddleware(object):
def __init__(self, app, **kwargs):
self.app = app
self.kwargs = kwargs
def __call__(self, environ, start_response):
environ.update(self.kwargs)
return self.app(environ, start_response)
class TestAddonAuth(OsfTestCase):
def setUp(self):
super(TestAddonAuth, self).setUp()
self.flask_app = SetEnvironMiddleware(self.app.app, REMOTE_ADDR='127.0.0.1')
self.test_app = webtest.TestApp(self.flask_app)
self.user = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = AddonGitHubOauthSettings(github_user_id='john')
self.oauth_settings.save()
self.user_addon.oauth_settings = self.oauth_settings
self.user_addon.oauth_access_token = 'secret'
self.user_addon.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.save()
def build_url(self, **kwargs):
options = dict(
action='download',
cookie=self.cookie,
nid=self.node._id,
provider=self.node_addon.config.short_name,
)
options.update(kwargs)
return api_url_for('get_auth', **options)
def test_auth_download(self):
url = self.build_url()
res = self.test_app.get(url)
assert_equal(res.json['auth'], views.make_auth(self.user))
assert_equal(res.json['credentials'], self.node_addon.serialize_waterbutler_credentials())
assert_equal(res.json['settings'], self.node_addon.serialize_waterbutler_settings())
expected_url = furl.furl(self.node.api_url_for('create_waterbutler_log', _absolute=True))
observed_url = furl.furl(res.json['callback_url'])
observed_url.port = expected_url.port
assert_equal(expected_url, observed_url)
def test_auth_missing_args(self):
url = self.build_url(cookie=None)
res = self.test_app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_auth_bad_cookie(self):
url = self.build_url(cookie=self.cookie[::-1])
res = self.test_app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_auth_missing_addon(self):
url = self.build_url(provider='queenhub')
res = self.test_app.get(url, expect_errors=True)
assert_equal(res.status_code, 400)
def test_auth_bad_ip(self):
flask_app = SetEnvironMiddleware(self.app.app, REMOTE_ADDR='192.168.1.1')
test_app = webtest.TestApp(flask_app)
url = self.build_url()
res = test_app.get(url, expect_errors=True)
assert_equal(res.status_code, 403)
class TestAddonLogs(OsfTestCase):
def setUp(self):
super(TestAddonLogs, self).setUp()
self.flask_app = SetEnvironMiddleware(self.app.app, REMOTE_ADDR='127.0.0.1')
self.test_app = webtest.TestApp(self.flask_app)
self.user = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = AddonGitHubOauthSettings(github_user_id='john')
self.oauth_settings.save()
self.user_addon.oauth_settings = self.oauth_settings
self.user_addon.oauth_access_token = 'secret'
self.user_addon.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.save()
def build_payload(self, metadata, **kwargs):
options = dict(
auth={'id': self.user._id},
action='create',
provider=self.node_addon.config.short_name,
metadata=metadata,
time=time.time() + 1000,
)
options.update(kwargs)
options = {
key: value
for key, value in options.iteritems()
if value is not None
}
message, signature = signing.default_signer.sign_payload(options)
return {
'payload': message,
'signature': signature,
}
def test_add_log(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path})
nlogs = len(self.node.logs)
self.test_app.put_json(url, payload, headers={'Content-Type': 'application/json'})
self.node.reload()
assert_equal(len(self.node.logs), nlogs + 1)
def test_add_log_missing_args(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, auth=None)
nlogs = len(self.node.logs)
res = self.test_app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(len(self.node.logs), nlogs)
def test_add_log_no_user(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, auth={'id': None})
nlogs = len(self.node.logs)
res = self.test_app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(len(self.node.logs), nlogs)
def test_add_log_no_addon(self):
path = 'pizza'
node = ProjectFactory(creator=self.user)
url = node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path})
nlogs = len(node.logs)
res = self.test_app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(len(node.logs), nlogs)
def test_add_log_bad_action(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, action='dance')
nlogs = len(self.node.logs)
res = self.test_app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(len(self.node.logs), nlogs)
class TestCheckAuth(OsfTestCase):
def setUp(self):
super(TestCheckAuth, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
def test_has_permission(self):
res = views.check_access(self.node, self.user, 'upload')
assert_true(res)
def test_not_has_permission_read_public(self):
self.node.is_public = True
self.node.save()
res = views.check_access(self.node, None, 'download')
def test_not_has_permission_read_has_link(self):
link = new_private_link('red-special', self.user, [self.node], anonymous=False)
res = views.check_access(self.node, None, 'download', key=link.key)
def test_not_has_permission_logged_in(self):
user2 = AuthUserFactory()
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node, user2, 'download')
assert_equal(exc_info.exception.code, 403)
def test_not_has_permission_not_logged_in(self):
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node, None, 'download')
assert_equal(exc_info.exception.code, 401)
def test_has_permission_on_parent_node_copyto_pass_if_registration(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, parent=self.node)
component.is_registration = True
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, self.user, 'copyto')
assert_true(res)
def test_has_permission_on_parent_node_copyto_fail_if_not_registration(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, parent=self.node)
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError):
views.check_access(component, self.user, 'copyto')
def test_has_permission_on_parent_node_copyfrom(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, public=False, parent=self.node)
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, self.user, 'copyfrom')
assert_true(res)
class OsfFileTestCase(OsfTestCase):
@classmethod
def setUpClass(cls):
super(OsfTestCase, cls).setUpClass()
set_up_storage([DummyGuidFile], storage.MongoStorage)
class TestAddonFileViewHelpers(OsfFileTestCase):
def test_key_error_raises_attr_error_for_name(self):
class TestGuidFile(GuidFile):
pass
with assert_raises(AttributeError):
TestGuidFile().name
def test_getattrname_catches(self):
class TestGuidFile(GuidFile):
pass
assert_equals(getattr(TestGuidFile(), 'name', 'foo'), 'foo')
def test_getattrname(self):
class TestGuidFile(GuidFile):
pass
guid = TestGuidFile()
guid._metadata_cache = {'name': 'test'}
assert_equals(getattr(guid, 'name', 'foo'), 'test')
def assert_urls_equal(url1, url2):
furl1 = furl.furl(url1)
furl2 = furl.furl(url2)
for attr in ['scheme', 'host', 'port']:
setattr(furl1, attr, None)
setattr(furl2, attr, None)
assert_equal(furl1, furl2)
@mock.patch('website.addons.github.model.GitHub.repo', mock.Mock(side_effect=ApiError))
class TestAddonFileViews(OsfTestCase):
def setUp(self):
super(TestAddonFileViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.user.add_addon('github')
self.project.add_addon('github', auth=Auth(self.user))
self.user_addon = self.user.get_addon('github')
self.node_addon = self.project.get_addon('github')
self.oauth = AddonGitHubOauthSettings(
github_user_id='denbarell',
oauth_access_token='Truthy'
)
self.oauth.save()
self.user_addon.oauth_settings = self.oauth
self.user_addon.save()
self.node_addon.user_settings = self.user_addon
self.node_addon.repo = 'Truth'
self.node_addon.user = 'E'
self.node_addon.save()
# self.node_addon.user_settings = 'Truthy'
# setattr(self.node_addon, 'has_auth', True)
def get_mako_return(self):
ret = serialize_node(self.project, Auth(self.user), primary=True)
ret.update({
'error': '',
'provider': '',
'file_path': '',
'sharejs_uuid': '',
'private': '',
'urls': {
'files': '',
'render': '',
'sharejs': '',
'mfr': '',
'gravatar': '',
'external': '',
},
'size': '',
'extra': '',
'file_name': '',
'materialized_path': '',
})
ret.update(rubeus.collect_addon_assets(self.project))
return ret
def test_redirects_to_guid(self):
path = 'bigdata'
guid, _ = self.node_addon.find_or_create_file_guid('/' + path)
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github'
),
auth=self.user.auth
)
assert_equals(resp.status_code, 302)
assert_equals(resp.headers['Location'], 'http://localhost:80{}'.format(guid.guid_url))
def test_action_download_redirects_to_download(self):
path = 'cloudfiles'
guid, _ = self.node_addon.find_or_create_file_guid('/' + path)
resp = self.app.get(guid.guid_url + '?action=download', auth=self.user.auth)
assert_equals(resp.status_code, 302)
assert_equals(resp.headers['Location'], guid.download_url + '&action=download')
@mock.patch('website.addons.base.request')
def test_mfr_public_download_url_includes_view_only(self, mock_request):
view_only = 'justworkplease'
mock_request.args = {
'view_only': view_only
}
path = 'cloudfiles'
guid, _ = self.node_addon.find_or_create_file_guid('/' + path)
assert_in('view_only={}'.format(view_only), guid.mfr_public_download_url)
assert_in('accept_url=false', guid.mfr_public_download_url)
@mock.patch('website.addons.base.request')
def test_mfr_render_url(self, mock_request):
view_only = 'justworkplease'
mock_request.args = {
'view_only': view_only
}
path = 'cloudfiles'
guid, _ = self.node_addon.find_or_create_file_guid('/' + path)
assert_in(settings.MFR_SERVER_URL + '/render', guid.mfr_render_url)
assert_in('?url=', guid.mfr_render_url)
@mock.patch('website.addons.base.views.addon_view_file')
def test_action_view_calls_view_file(self, mock_view_file):
self.user.reload()
self.project.reload()
path = 'cloudfiles'
mock_view_file.return_value = self.get_mako_return()
guid, _ = self.node_addon.find_or_create_file_guid('/' + path)
self.app.get(guid.guid_url + '?action=view', auth=self.user.auth)
args, kwargs = mock_view_file.call_args
assert_equals(kwargs, {})
assert_equals(args[-1], {'action': 'view'})
assert_equals(args[1], self.project)
assert_equals(args[0].user, self.user)
assert_equals(args[2], self.node_addon)
@mock.patch('website.addons.base.views.addon_view_file')
def test_no_action_calls_view_file(self, mock_view_file):
self.user.reload()
self.project.reload()
path = 'cloudfiles'
mock_view_file.return_value = self.get_mako_return()
guid, _ = self.node_addon.find_or_create_file_guid('/' + path)
self.app.get(guid.guid_url, auth=self.user.auth)
args, kwargs = mock_view_file.call_args
assert_equals(kwargs, {})
assert_equals(args[-1], {})
assert_equals(args[1], self.project)
assert_equals(args[0].user, self.user)
assert_equals(args[2], self.node_addon)
def test_download_create_guid(self):
path = 'cloudfiles'
self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth
)
guid, created = self.node_addon.find_or_create_file_guid('/' + path)
assert_true(guid)
assert_false(created)
assert_equals(guid.waterbutler_path, '/' + path)
def test_unauthorized_addons_raise(self):
path = 'cloudfiles'
self.node_addon.user_settings = None
self.node_addon.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 401)
def test_nonstorage_addons_raise(self):
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path='sillywiki',
provider='wiki',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 400)
def test_head_returns_url(self):
path = 'the little engine that couldnt'
guid, _ = self.node_addon.find_or_create_file_guid('/' + path)
download_url = furl.furl(guid.download_url)
download_url.args['accept_url'] = 'false'
resp = self.app.head(guid.guid_url, auth=self.user.auth)
assert_urls_equal(resp.headers['Location'], download_url.url)
def test_nonexistent_addons_raise(self):
path = 'cloudfiles'
self.project.delete_addon('github', Auth(self.user))
self.project.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 400)
def test_unauth_addons_raise(self):
path = 'cloudfiles'
self.node_addon.user_settings = None
self.node_addon.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 401)
class TestLegacyViews(OsfTestCase):
def setUp(self):
super(TestLegacyViews, self).setUp()
self.path = 'mercury.png'
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.node_addon = self.project.get_addon('osfstorage')
file_record = self.node_addon.root_node.append_file(self.path)
self.expected_path = file_record._id
self.node_addon.save()
file_record.save()
def test_view_file_redirect(self):
url = '/{0}/osffiles/{1}/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.expected_path,
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_download_file_redirect(self):
url = '/{0}/osffiles/{1}/download/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_download_file_version_redirect(self):
url = '/{0}/osffiles/{1}/version/3/download/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
version=3,
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_api_download_file_redirect(self):
url = '/api/v1/project/{0}/osffiles/{1}/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_api_download_file_version_redirect(self):
url = '/api/v1/project/{0}/osffiles/{1}/version/3/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
version=3,
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_no_provider_name(self):
url = '/{0}/files/{1}'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.expected_path,
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_action_as_param(self):
url = '/{}/osfstorage/files/{}/?action=download'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_other_addon_redirect(self):
url = '/project/{0}/mycooladdon/files/{1}/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.path,
provider='mycooladdon',
)
assert_urls_equal(res.location, expected_url)
def test_other_addon_redirect_download(self):
url = '/project/{0}/mycooladdon/files/{1}/download/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.path,
action='download',
provider='mycooladdon',
)
assert_urls_equal(res.location, expected_url)
|
{
"content_hash": "ad68ca778315844918c35c6378ea5cbc",
"timestamp": "",
"source": "github",
"line_count": 767,
"max_line_length": 98,
"avg_line_length": 33.84354628422425,
"alnum_prop": 0.5924955697665459,
"repo_name": "lyndsysimon/osf.io",
"id": "438b52b98a598719e1e889684f07931371f3bb78",
"size": "25983",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "tests/test_addons.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "117596"
},
{
"name": "HTML",
"bytes": "23737"
},
{
"name": "JavaScript",
"bytes": "1209184"
},
{
"name": "Mako",
"bytes": "545105"
},
{
"name": "Python",
"bytes": "3356763"
},
{
"name": "Shell",
"bytes": "1679"
}
],
"symlink_target": ""
}
|
"""Array operations for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged import segment_id_ops
from tensorflow.python.util.tf_export import tf_export
#===============================================================================
# Masking
#===============================================================================
@tf_export('ragged.boolean_mask')
def boolean_mask(data, mask, name=None):
"""Applies a boolean mask to `data` without flattening the mask dimensions.
Returns a potentially ragged tensor that is formed by retaining the elements
in `data` where the corresponding value in `mask` is `True`.
* `output[a1...aA, i, b1...bB] = data[a1...aA, j, b1...bB]`
Where `j` is the `i`th `True` entry of `mask[a1...aA]`.
Note that `output` preserves the mask dimensions `a1...aA`; this differs
from `tf.boolean_mask`, which flattens those dimensions.
Args:
data: A potentially ragged tensor.
mask: A potentially ragged boolean tensor. `mask`'s shape must be a prefix
of `data`'s shape. `rank(mask)` must be known statically.
name: A name prefix for the returned tensor (optional).
Returns:
A potentially ragged tensor that is formed by retaining the elements in
`data` where the corresponding value in `mask` is `True`.
* `rank(output) = rank(data)`.
* `output.ragged_rank = max(data.ragged_rank, rank(mask) - 1)`.
Raises:
ValueError: if `rank(mask)` is not known statically; or if `mask.shape` is
not a prefix of `data.shape`.
#### Examples:
>>> # Aliases for True & False so data and mask line up.
>>> T, F = (True, False)
>>> tf.ragged.boolean_mask( # Mask a 2D Tensor.
... data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... mask=[[T, F, T], [F, F, F], [T, F, F]]).to_list()
[[1, 3], [], [7]]
>>> tf.ragged.boolean_mask( # Mask a 2D RaggedTensor.
... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]),
... tf.ragged.constant([[F, F, T], [F], [T, T]])).to_list()
[[3], [], [5, 6]]
>>> tf.ragged.boolean_mask( # Mask rows of a 2D RaggedTensor.
... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]),
... tf.ragged.constant([True, False, True])).to_list()
[[1, 2, 3], [5, 6]]
"""
with ops.name_scope(name, 'RaggedMask', [data, mask]):
# Convert inputs to tensors.
data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data')
mask = ragged_tensor.convert_to_tensor_or_ragged_tensor(
mask, dtypes.bool, name='mask')
row_splits_dtype, (data, mask) = ragged_tensor.match_row_splits_dtypes(
data, mask, return_dtype=True)
# Get static rank of mask.
if mask.shape.ndims is None:
raise ValueError('mask.shape.ndims must be known statically.')
elif mask.shape.ndims == 0:
raise ValueError('mask cannot be scalar.')
# If mask is ragged, then recurse with a non-ragged mask.
if ragged_tensor.is_ragged(mask):
if not ragged_tensor.is_ragged(data):
data = ragged_tensor.RaggedTensor.from_tensor(
data, ragged_rank=mask.ragged_rank,
row_splits_dtype=mask.row_splits.dtype)
# Check that mask.nested_row_splits is a prefix of
# data.nested_row_splits.
splits_list = [
mask.nested_row_splits, data.nested_row_splits[:mask.ragged_rank]
]
with ops.control_dependencies(
ragged_util.assert_splits_match(splits_list)):
# Strip off ragged `splits` until `mask` is non-ragged. Keep the splits
# that we strip off in `splits`, so we can add them back on after
# we recursively mask the non-ragged data.
splits = []
while ragged_tensor.is_ragged(mask):
if mask.shape.ndims > 2:
splits.append(mask.row_splits)
else:
# Count the number of True mask values in each row to find the
# lengths of the filtered rows; then convert to splits.
int_mask = ragged_functional_ops.map_flat_values(
math_ops.cast, mask, dtype=row_splits_dtype)
masked_row_lengths = ragged_math_ops.reduce_sum(int_mask, axis=1)
splits.append(ragged_util.lengths_to_splits(masked_row_lengths))
mask = mask.values
data = data.values
# Recursively apply the nested non-ragged mask to the nested data.
masked_values = boolean_mask(data, mask)
# Add the ragged `splits` back to the result.
masked_values = ragged_tensor.RaggedTensor.from_nested_row_splits(
masked_values, splits, validate=False)
return masked_values
# If mask is non-ragged and has rank 1, and data is ragged, then build a
# ragged tensor with the indicated rows.
elif ragged_tensor.is_ragged(data) and mask.shape.ndims == 1:
# Get the masked splits: first get the length of each row, then filter
# out the rows that we are deleting, and convert that filtered set of
# masks back to a splits tensor.
lengths = data.row_lengths()
masked_lengths = array_ops.boolean_mask(lengths, mask)
masked_splits = ragged_util.lengths_to_splits(masked_lengths)
# Get the masked values: first get row ids corresponding to each
# value, then use tf.gather to build a boolean mask that's false for
# values that come from rows that we are deleting, and use that mask to
# construct the masked values tensor.
segment_ids = segment_id_ops.row_splits_to_segment_ids(data.row_splits)
segment_mask = array_ops.gather(mask, segment_ids)
masked_values = boolean_mask(data.values, segment_mask)
return ragged_tensor.RaggedTensor.from_row_splits(masked_values,
masked_splits,
validate=False)
# If mask is non-ragged and has rank>1, then convert it to be ragged,
# with a ragged rank matching data.
if ragged_tensor.is_ragged(data):
mask = ragged_tensor.RaggedTensor.from_tensor(
mask, ragged_rank=min(data.ragged_rank, mask.shape.ndims - 1),
row_splits_dtype=data.row_splits.dtype)
return boolean_mask(data, mask)
# Otherwise, data and mask are both `Tensor`s.
else:
# Apply `boolean_mask` to get the masked values.
masked_values = array_ops.boolean_mask(data, mask)
if mask.shape.ndims >= 2:
# Add the innermost ragged dimension. For each innermost cell, get the
# number of values it contains. Then flatten that to get a list of
# cell lengths, and convert it to splits. Finally, combine the splits
# and values to get the innermost ragged tensor.
masked_lengths = math_ops.count_nonzero(mask, axis=-1,
dtype=row_splits_dtype)
flattened_masked_lengths = array_ops.reshape(masked_lengths, [-1])
masked_values = ragged_tensor.RaggedTensor.from_row_lengths(
masked_values, flattened_masked_lengths, validate=False)
# Wrap remaining ragged dimensions.
if mask.shape.ndims > 2:
mask_shape = array_ops.shape(mask, out_type=row_splits_dtype)
split_size = math_ops.cumprod(mask_shape) + 1
for dim in range(mask.shape.ndims - 3, -1, -1):
elt_size = mask_shape[dim + 1]
masked_splits = math_ops.range(split_size[dim]) * elt_size
masked_values = ragged_tensor.RaggedTensor.from_row_splits(
masked_values, masked_splits, validate=False)
return masked_values
#===============================================================================
# Tiling
#===============================================================================
def tile(input, multiples, name=None): # pylint: disable=redefined-builtin
"""Constructs a `RaggedTensor` by tiling a given `RaggedTensor`.
The values of `input` are replicated `multiples[i]` times along the
`i`th dimension (for each dimension `i`). For every dimension `axis` in
`input`, the length of each output element in that dimension is the
length of corresponding input element multiplied by `multiples[axis]`.
Args:
input: A `RaggedTensor`.
multiples: A 1-D integer `Tensor`. Length must be the same as the number of
dimensions in `input`.
name: A name for the operation (optional).
Returns:
A `RaggedTensor` with the same type, rank, and ragged_rank as `input`.
#### Example:
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> tf.tile(rt, [3, 2]).to_list()
[[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]]
"""
with ops.name_scope(name, 'RaggedTile', [input, multiples]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, name='input')
if not ragged_tensor.is_ragged(input):
return array_ops.tile(input, multiples, name)
multiples = ragged_util.convert_to_int_tensor(
multiples, name='multiples', dtype=input.row_splits.dtype)
multiples.shape.assert_has_rank(1)
# If the constant value of `multiples` is available, then we can use it
# to skip tiling dimensions where `multiples=1`.
const_multiples = tensor_util.constant_value(multiples)
return ragged_tensor.RaggedTensor.from_nested_row_splits(
_tile_ragged_values(input, multiples, const_multiples),
_tile_ragged_splits(input, multiples, const_multiples),
validate=False)
def _tile_ragged_values(rt_input, multiples, const_multiples=None):
"""Builds flat_values tensor for a tiled `RaggedTensor`.
Returns a tensor that repeats the values in
`rt_input.flat_values` in the
appropriate pattern to construct a `RaggedTensor` that tiles `rt_input` as
specified by `multiples`.
Args:
rt_input: The `RaggedTensor` whose values should be repeated.
multiples: A 1-D integer `tensor`, indicating how many times each dimension
should be repeated.
const_multiples: Optional constant value for multiples. Used to skip tiling
dimensions where `multiples=1`.
Returns:
A `Tensor` with the same type and rank as `rt_input.flat_values`.
#### Example:
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> _tile_ragged_values(rt, tf.constant([3, 2])).numpy()
array([1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3], dtype=int32)
"""
ragged_rank = rt_input.ragged_rank
nested_splits = rt_input.nested_row_splits
# Pointers to the values in `rt_input.flat_values`.
inner_value_ids = math_ops.range(nested_splits[-1][-1])
# For each ragged dimension (working from the innermost to outermost),
# expand `inner_value_ids` as necessary to tile that dimension.
prev_splits = None
for axis in range(ragged_rank, 0, -1):
# Ragged splits for this dimension.
splits = nested_splits[axis - 1]
# Adjust splits so they point into `inner_value_ids` (instead of just
# pointing into the next dimension's values).
if prev_splits is not None: # Not the first pass through the loop.
splits = array_ops.gather(prev_splits * multiples[axis + 1], splits)
# Repeat each element in this ragged dimension `multiples[axis]` times.
if const_multiples is None or const_multiples[axis] != 1:
inner_value_ids = ragged_util.repeat_ranges(inner_value_ids, splits,
multiples[axis])
prev_splits = splits
# Gather the tiled inner values.
ragged_tiled_values = array_ops.gather(rt_input.flat_values, inner_value_ids)
# Tile the flat_values for the uniform dimensions (i.e., for `axis=0` plus
# `axis=range(ragged_rank, rank)`).
inner_repeats = array_ops.concat([multiples[:1], multiples[ragged_rank + 1:]],
axis=0)
return array_ops.tile(ragged_tiled_values, inner_repeats)
def _tile_ragged_splits(rt_input, multiples, const_multiples=None):
"""Builds nested_split tensors for a tiled `RaggedTensor`.
Returns a list of split tensors that can be used to construct the
`RaggedTensor` that tiles `rt_input` as specified by `multiples`.
Args:
rt_input: The `RaggedTensor` that is being tiled.
multiples: A 1-D integer `tensor`, indicating how many times each dimension
should be repeated.
const_multiples: Optional constant value for multiples. Used to skip tiling
dimensions where `multiples=1`.
Returns:
A list of 1-D integer `Tensor`s (one for each ragged dimension in
`rt_input`).
#### Example:
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> _tile_ragged_splits(rt, [3, 2])
[<tf.Tensor: ..., numpy=array([ 0, 4, 6, 10, 12, 16, 18])>]
"""
ragged_rank = rt_input.ragged_rank
nested_splits = rt_input.nested_row_splits
# projected_splits[src_axis, dst_axis] contains the split points that divide
# the rows from src_axis in the list of dst_axis values. E.g.,
# projected_splits[i, i] = nested_splits[i], and
# projected_splits[i, i+1] = gather(nested_splits[i+1], nested_splits[i]).
projected_splits = [{i: nested_splits[i]} for i in range(ragged_rank)]
for src_axis in range(ragged_rank):
for dst_axis in range(src_axis + 1, ragged_rank - 1):
projected_splits[src_axis][dst_axis] = array_ops.gather(
nested_splits[dst_axis],
projected_splits[src_axis][dst_axis - 1])
# For each ragged dimension: nested_splits[axis] -> result_splits[axis].
result_splits = []
for axis in range(ragged_rank):
# Get the length of each row for the input tensor for this dimension.
input_lengths = nested_splits[axis][1:] - nested_splits[axis][:-1]
# Multiply those lengths by the `multiples` of dimension axis+1, since
# each value will be repeated that number of times.
output_lengths = input_lengths * multiples[axis + 1]
# Repeat ranges of the row lengths as necessary for them to be tiled in
# each ragged dimension `d < axis`. (Start with dimension d=axis-1, and
# work our way up to dimension d=0.)
repeats = 1
for d in range(axis - 1, -1, -1):
if const_multiples is None or const_multiples[d + 1] != 1:
splits = projected_splits[d][axis - 1] * repeats
output_lengths = ragged_util.repeat_ranges(output_lengths, splits,
multiples[d + 1])
repeats *= multiples[d + 1]
# Tile splits for the outermost (uniform) dimension.
output_lengths = array_ops.tile(output_lengths, multiples[:1])
# Convert to splits.
result_splits.append(ragged_util.lengths_to_splits(output_lengths))
return result_splits
#===============================================================================
# Reshaping
#===============================================================================
def expand_dims(input, axis, name=None): # pylint: disable=redefined-builtin
"""Inserts a dimension with shape 1 into a potentially ragged tensor's shape.
Given a potentially ragged tenor `input`, this operation inserts a
dimension with size 1 at the dimension `axis` of `input`'s shape.
* If `input` is a `Tensor`, then this is equivalent to
`tf.expand_dims`.
* If `input` is ragged, and `axis=0`, then the new dimension will be
uniform; but the previously outermost dimension will become ragged.
* If `input` is ragged, and `0 < axis < input.ragged_rank`, then the
new dimension will be ragged.
* If `input` is ragged, and axis >= input.ragged_rank`, then the new
dimension will be uniform.
The following table gives some examples showing how `ragged.expand_dims`
impacts the shapes of different input tensors. Ragged dimensions are
indicated by enclosing them in parentheses.
input.shape | axis | result.shape
----------------------- | ---- | -----------------------------
`[D1, D2]` | `0` | `[1, D1, D2]`
`[D1, D2]` | `1` | `[D1, 1, D2]`
`[D1, D2]` | `2` | `[D1, D2, 1]`
`[D1, (D2), (D3), D4]` | `0` | `[1, (D1), (D2), (D3), D4]`
`[D1, (D2), (D3), D4]` | `1` | `[D1, (1), (D2), (D3), D4]`
`[D1, (D2), (D3), D4]` | `2` | `[D1, (D2), (1), (D3), D4]`
`[D1, (D2), (D3), D4]` | `3` | `[D1, (D2), (D3), 1, D4]`
`[D1, (D2), (D3), D4]` | `4` | `[D1, (D2), (D3), D4, 1]`
Args:
input: The potentially tensor that should be expanded with a new
dimension.
axis: An integer constant indicating where the new dimension should be
inserted.
name: A name for the operation (optional).
Returns:
A tensor with the same values as `input`, with an added dimension of
size 1 at `axis`.
#### Examples:
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> print(rt.shape)
(2, None)
>>> expanded = tf.expand_dims(rt, axis=0)
>>> print(expanded.shape, expanded)
(1, None, None) <tf.RaggedTensor [[[1, 2], [3]]]>
>>> expanded = tf.expand_dims(rt, axis=1)
>>> print(expanded.shape, expanded)
(2, None, None) <tf.RaggedTensor [[[1, 2]], [[3]]]>
>>> expanded = tf.expand_dims(rt, axis=2)
>>> print(expanded.shape, expanded)
(2, None, 1) <tf.RaggedTensor [[[1], [2]], [[3]]]>
"""
with ops.name_scope(name, 'RaggedExpandDims', [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, name='input')
if not ragged_tensor.is_ragged(input):
return array_ops.expand_dims(input, axis)
ndims = None if input.shape.ndims is None else input.shape.ndims + 1
axis = ragged_util.get_positive_axis(axis, ndims)
if axis == 0:
values = input
splits = array_ops.stack([0, input.nrows()])
elif axis == 1:
values = input
splits = math_ops.range(input.nrows() + 1)
else:
values = expand_dims(input.values, axis - 1)
splits = input.row_splits
return ragged_tensor.RaggedTensor.from_row_splits(values, splits,
validate=False)
#===============================================================================
# RaggedTensor Size
#===============================================================================
def size(input, out_type=dtypes.int32, name=None): # pylint: disable=redefined-builtin
"""Returns the size of a potentially ragged tensor.
The size of a ragged tensor is the size of its inner values.
#### Example:
>>> tf.size(tf.ragged.constant([[1, 2], [3]])).numpy()
3
Args:
input: A potentially ragged `Tensor`.
out_type: The numeric output type for the operation.
name: A name for the operation (optional).
Returns:
A Tensor of type `out_type`.
"""
if ragged_tensor.is_ragged(input):
return array_ops.size(input.flat_values, out_type=out_type, name=name)
else:
return array_ops.size(input, out_type=out_type, name=name)
#===============================================================================
# ragged.rank
#===============================================================================
def rank(input, name=None): # pylint: disable=redefined-builtin
"""Returns the rank of a RaggedTensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
#### Example:
>>> # shape of tensor 't' is [2, None, None]
>>> t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]])
>>> tf.rank(t).numpy()
3
Args:
input: A `RaggedTensor`
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, 'RaggedRank', [input]) as name:
if not ragged_tensor.is_ragged(input):
return array_ops.rank(input, name)
return input.ragged_rank + array_ops.rank(input.flat_values)
#===============================================================================
# ragged.one_hot
#===============================================================================
def ragged_one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""Applies tf.one_hot along the values of a RaggedTensor."""
with ops.name_scope(name, 'RaggedOneHot', [indices]):
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices')
if axis is not None:
axis = ragged_util.get_positive_axis(axis, indices.shape.ndims)
if axis < indices.ragged_rank:
raise ValueError('axis may not be less than indices.ragged_rank.')
return indices.with_flat_values(
array_ops.one_hot(indices.flat_values, depth, on_value, off_value, axis,
dtype, name))
#===============================================================================
# ragged.stack_dynamic_partitions
#===============================================================================
@tf_export('ragged.stack_dynamic_partitions')
def stack_dynamic_partitions(data, partitions, num_partitions, name=None):
"""Stacks dynamic partitions of a Tensor or RaggedTensor.
Returns a RaggedTensor `output` with `num_partitions` rows, where the row
`output[i]` is formed by stacking all slices `data[j1...jN]` such that
`partitions[j1...jN] = i`. Slices of `data` are stacked in row-major
order.
If `num_partitions` is an `int` (not a `Tensor`), then this is equivalent to
`tf.ragged.stack(tf.dynamic_partition(data, partitions, num_partitions))`.
#### Example:
>>> data = ['a', 'b', 'c', 'd', 'e']
>>> partitions = [ 3, 0, 2, 2, 3]
>>> num_partitions = 5
>>> tf.ragged.stack_dynamic_partitions(data, partitions, num_partitions)
<tf.RaggedTensor [[b'b'], [], [b'c', b'd'], [b'a', b'e'], []]>
Args:
data: A `Tensor` or `RaggedTensor` containing the values to stack.
partitions: An `int32` or `int64` `Tensor` or `RaggedTensor` specifying the
partition that each slice of `data` should be added to.
`partitions.shape` must be a prefix of `data.shape`. Values must be
greater than or equal to zero, and less than `num_partitions`.
`partitions` is not required to be sorted.
num_partitions: An `int32` or `int64` scalar specifying the number of
partitions to output. This determines the number of rows in `output`.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the stacked partitions. The returned tensor
has the same dtype as `data`, and its shape is
`[num_partitions, (D)] + data.shape[partitions.rank:]`, where `(D)` is a
ragged dimension whose length is the number of data slices stacked for
each `partition`.
"""
with ops.name_scope(name, 'SegmentStack', [data, partitions, num_partitions]):
# Convert inputs to tensors.
data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data')
row_splits_dtype = (
data.row_splits.dtype
if isinstance(data, ragged_tensor.RaggedTensor) else None)
partitions = ragged_tensor.convert_to_tensor_or_ragged_tensor(
partitions, name='partitions', preferred_dtype=row_splits_dtype)
num_partitions = ops.convert_to_tensor(
num_partitions, name='num_partitions', preferred_dtype=partitions.dtype)
if row_splits_dtype is not None:
partitions = math_ops.cast(partitions, row_splits_dtype)
num_partitions = math_ops.cast(num_partitions, partitions.dtype)
# Sanity-checks for shapes.
partitions_rank = partitions.shape.ndims
if partitions_rank is None:
raise ValueError('partitions must have known rank.')
num_partitions.shape.assert_has_rank(0)
partitions.shape.assert_is_compatible_with(data.shape[:partitions_rank])
if partitions_rank == 0:
# If partitions is a scalar, then just create a RaggedTensor containing
# that single the complete `data` value in the specified row.
return ragged_tensor.RaggedTensor.from_value_rowids(
values=array_ops.stack([data]),
value_rowids=array_ops.stack([partitions]),
nrows=num_partitions,
validate=False)
elif partitions_rank == 1:
# If partitions is a vector (the typical case): we can just use data and
# partitions as the `values` and `value_rowids` for `from_value_rowids`,
# as long as we sort them first.
permutation = sort_ops.argsort(partitions, stable=True)
value_rowids = array_ops.gather(partitions, permutation)
values = array_ops.gather(data, permutation)
check = check_ops.assert_less(
value_rowids[-1:],
num_partitions,
message='partitions must be less than num_partitions')
with ops.control_dependencies([check]):
return ragged_tensor.RaggedTensor.from_value_rowids(
values, value_rowids, nrows=num_partitions, validate=False)
else:
# Handle higher-dimensional partitions via recursion.
if not isinstance(data, ragged_tensor.RaggedTensor):
data = ragged_tensor.RaggedTensor.from_tensor(
data, row_splits_dtype=partitions.dtype, ragged_rank=1)
if not isinstance(partitions, ragged_tensor.RaggedTensor):
partitions = ragged_tensor.RaggedTensor.from_tensor(
partitions,
row_splits_dtype=partitions.dtype,
ragged_rank=max(data.ragged_rank, partitions_rank - 1))
check = check_ops.assert_equal(
data.row_splits,
partitions.row_splits,
message='data and partitions have incompatible ragged shapes')
with ops.control_dependencies([check]):
return stack_dynamic_partitions(data.values, partitions.values,
num_partitions)
|
{
"content_hash": "a4c5f7a4bfd2d8bcf23399a36baf9877",
"timestamp": "",
"source": "github",
"line_count": 631,
"max_line_length": 87,
"avg_line_length": 41.608557844690964,
"alnum_prop": 0.6174442963245096,
"repo_name": "chemelnucfin/tensorflow",
"id": "af9e7c278a21f2dba632e7c2681e07759c0ab3a4",
"size": "26944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/ragged/ragged_array_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "16146"
},
{
"name": "C",
"bytes": "825231"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "75313939"
},
{
"name": "CMake",
"bytes": "207856"
},
{
"name": "Dockerfile",
"bytes": "80130"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "881711"
},
{
"name": "Jupyter Notebook",
"bytes": "1113647"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "853297"
},
{
"name": "Makefile",
"bytes": "109340"
},
{
"name": "Objective-C",
"bytes": "105235"
},
{
"name": "Objective-C++",
"bytes": "258793"
},
{
"name": "PHP",
"bytes": "38007"
},
{
"name": "Pascal",
"bytes": "3741"
},
{
"name": "Pawn",
"bytes": "14380"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "50825074"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "532610"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
import warnings
from typing import Dict, List, Optional, Sequence, Set, Tuple
from flask import current_app, g
from flask_appbuilder.security.sqla import models as sqla_models
from flask_appbuilder.security.sqla.manager import SecurityManager
from flask_appbuilder.security.sqla.models import Permission, PermissionView, Role, User, ViewMenu
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
from airflow.exceptions import AirflowException
from airflow.models import DagBag, DagModel
from airflow.security import permissions
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.www.utils import CustomSQLAInterface
from airflow.www.views import (
CustomPermissionModelView,
CustomPermissionViewModelView,
CustomResetMyPasswordView,
CustomResetPasswordView,
CustomRoleModelView,
CustomUserDBModelView,
CustomUserInfoEditView,
CustomUserLDAPModelView,
CustomUserOAuthModelView,
CustomUserOIDModelView,
CustomUserRemoteUserModelView,
CustomUserStatsChartView,
CustomViewMenuModelView,
)
EXISTING_ROLES = {
'Admin',
'Viewer',
'User',
'Op',
'Public',
}
class AirflowSecurityManager(SecurityManager, LoggingMixin): # pylint: disable=too-many-public-methods
"""Custom security manager, which introduces a permission model adapted to Airflow"""
###########################################################################
# PERMISSIONS
###########################################################################
# [START security_viewer_perms]
VIEWER_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),
]
# [END security_viewer_perms]
# [START security_user_perms]
USER_PERMISSIONS = [
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
]
# [END security_user_perms]
# [START security_op_perms]
OP_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_ADMIN_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM),
]
# [END security_op_perms]
ADMIN_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE),
]
# global view-menu for dag-level access
DAG_VMS = {permissions.RESOURCE_DAG}
READ_DAG_PERMS = {permissions.ACTION_CAN_READ}
DAG_PERMS = permissions.DAG_PERMS
###########################################################################
# DEFAULT ROLE CONFIGURATIONS
###########################################################################
ROLE_CONFIGS = [
{'role': 'Public', 'perms': []},
{'role': 'Viewer', 'perms': VIEWER_PERMISSIONS},
{
'role': 'User',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS,
},
{
'role': 'Op',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS,
},
{
'role': 'Admin',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS + ADMIN_PERMISSIONS,
},
]
permissionmodelview = CustomPermissionModelView
permissionviewmodelview = CustomPermissionViewModelView
rolemodelview = CustomRoleModelView
viewmenumodelview = CustomViewMenuModelView
userdbmodelview = CustomUserDBModelView
resetmypasswordview = CustomResetMyPasswordView
resetpasswordview = CustomResetPasswordView
userinfoeditview = CustomUserInfoEditView
userldapmodelview = CustomUserLDAPModelView
useroauthmodelview = CustomUserOAuthModelView
userremoteusermodelview = CustomUserRemoteUserModelView
useroidmodelview = CustomUserOIDModelView
userstatschartview = CustomUserStatsChartView
def __init__(self, appbuilder):
super().__init__(appbuilder)
# Go and fix up the SQLAInterface used from the stock one to our subclass.
# This is needed to support the "hack" where we had to edit
# FieldConverter.conversion_table in place in airflow.www.utils
for attr in dir(self):
if not attr.endswith('view'):
continue
view = getattr(self, attr, None)
if not view or not getattr(view, 'datamodel', None):
continue
view.datamodel = CustomSQLAInterface(view.datamodel.obj)
self.perms = None
def init_role(self, role_name, perms):
"""
Initialize the role with the permissions and related view-menus.
:param role_name:
:param perms:
:return:
"""
warnings.warn(
"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.",
DeprecationWarning,
stacklevel=2,
)
self.bulk_sync_roles([{'role': role_name, 'perms': perms}])
def bulk_sync_roles(self, roles):
"""Sync the provided roles and permissions."""
existing_roles = self._get_all_roles_with_permissions()
non_dag_perms = self._get_all_non_dag_permissionviews()
for config in roles:
role_name = config['role']
perms = config['perms']
role = existing_roles.get(role_name) or self.add_role(role_name)
for perm_name, view_name in perms:
perm_view = non_dag_perms.get((perm_name, view_name)) or self.create_permission(
perm_name, view_name
)
if perm_view not in role.permissions:
self.add_permission_to_role(role, perm_view)
def add_permissions(self, role, perms):
"""Adds resource permissions to a given role."""
for action_name, resource_name in perms:
permission = self.create_permission(action_name, resource_name)
self.add_permission_to_role(role, permission)
def get_resource(self, name: str) -> ViewMenu:
"""
Returns a resource record by name, if it exists.
:param name: Name of resource
:type name: str
:return: Resource record
:rtype: ViewMenu
"""
return self.find_view_menu(name)
def get_all_resources(self) -> List[ViewMenu]:
"""
Gets all existing resource records.
:return: List of all resources
:rtype: List[ViewMenu]
"""
return self.get_all_view_menu()
def get_action(self, name: str) -> Permission:
"""
Gets an existing action record.
:param name: name
:type name: str
:return: Action record, if it exists
:rtype: Permission
"""
return self.find_permission(name)
def get_permission(self, action_name: str, resource_name: str) -> PermissionView:
"""
Gets a permission made with the given action->resource pair, if the permission already exists.
:param action_name: Name of action
:type action_name: str
:param resource_name: Name of resource
:type resource_name: str
:return: The existing permission
:rtype: PermissionView
"""
return self.find_permission_view_menu(action_name, resource_name)
def create_permission(self, action_name: str, resource_name: str) -> PermissionView:
"""
Creates a permission linking an action and resource.
:param action_name: Name of existing action
:type action_name: str
:param resource_name: Name of existing resource
:type resource_name: str
:return: Resource created
:rtype: PermissionView
"""
return self.add_permission_view_menu(action_name, resource_name)
def delete_permission(self, action_name: str, resource_name: str) -> None:
"""
Deletes the permission linking an action->resource pair. Doesn't delete the
underlying action or resource.
:param action_name: Name of existing action
:type action_name: str
:param resource_name: Name of existing resource
:type resource_name: str
:return: None
:rtype: None
"""
self.del_permission_view_menu(action_name, resource_name)
def delete_role(self, role_name):
"""
Delete the given Role
:param role_name: the name of a role in the ab_role table
"""
session = self.get_session
role = session.query(sqla_models.Role).filter(sqla_models.Role.name == role_name).first()
if role:
self.log.info("Deleting role '%s'", role_name)
session.delete(role)
session.commit()
else:
raise AirflowException(f"Role named '{role_name}' does not exist")
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
if user.is_anonymous:
public_role = current_app.appbuilder.get_app.config["AUTH_ROLE_PUBLIC"]
return [current_app.appbuilder.sm.find_role(public_role)] if public_role else []
return user.roles
def get_current_user_permissions(self):
"""Returns permissions for logged in user as a set of tuples with the action and resource name"""
perms = set()
for role in self.get_user_roles():
perms.update({(perm.permission.name, perm.view_menu.name) for perm in role.permissions})
return perms
def get_readable_dags(self, user):
"""Gets the DAGs readable by authenticated user."""
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)
def get_editable_dags(self, user):
"""Gets the DAGs editable by authenticated user."""
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)
def get_readable_dag_ids(self, user) -> Set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return {dag.dag_id for dag in self.get_readable_dags(user)}
def get_editable_dag_ids(self, user) -> Set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return {dag.dag_id for dag in self.get_editable_dags(user)}
def get_accessible_dag_ids(self, user) -> Set[str]:
"""Gets the DAG IDs editable or readable by authenticated user."""
accessible_dags = self.get_accessible_dags(
[permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ], user
)
return {dag.dag_id for dag in accessible_dags}
@provide_session
def get_accessible_dags(self, user_actions, user, session=None):
"""Generic function to get readable or writable DAGs for user."""
if user.is_anonymous:
roles = self.get_user_roles(user)
else:
user_query = (
session.query(User)
.options(
joinedload(User.roles)
.subqueryload(Role.permissions)
.options(joinedload(PermissionView.permission), joinedload(PermissionView.view_menu))
)
.filter(User.id == user.id)
.first()
)
roles = user_query.roles
resources = set()
for role in roles:
for permission in role.permissions:
action = permission.permission.name
if action not in user_actions:
continue
resource = permission.view_menu.name
if resource == permissions.RESOURCE_DAG:
return session.query(DagModel)
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])
else:
resources.add(resource)
return session.query(DagModel).filter(DagModel.dag_id.in_(resources))
def can_access_some_dags(self, action: str, dag_id: Optional[str] = None) -> bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != '~':
return self.has_access(action, permissions.resource_name_for_dag(dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dags(user))
return any(self.get_editable_dags(user))
def can_read_dag(self, dag_id, user=None) -> bool:
"""Determines whether a user has DAG read access."""
if not user:
user = g.user
dag_resource_name = permissions.resource_name_for_dag(dag_id)
return self._has_access(
user, permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG
) or self._has_access(user, permissions.ACTION_CAN_READ, dag_resource_name)
def can_edit_dag(self, dag_id, user=None) -> bool:
"""Determines whether a user has DAG edit access."""
if not user:
user = g.user
dag_resource_name = permissions.resource_name_for_dag(dag_id)
return self._has_access(
user, permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG
) or self._has_access(user, permissions.ACTION_CAN_EDIT, dag_resource_name)
def prefixed_dag_id(self, dag_id):
"""Returns the permission name for a DAG id."""
warnings.warn(
"`prefixed_dag_id` has been deprecated. "
"Please use `airflow.security.permissions.resource_name_for_dag` instead.",
DeprecationWarning,
stacklevel=2,
)
return permissions.resource_name_for_dag(dag_id)
def is_dag_resource(self, resource_name):
"""Determines if a resource belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def _has_view_access(self, user, action, resource) -> bool:
"""
Overriding the method to ensure that it always returns a bool
_has_view_access can return NoneType which gives us
issues later on, this fixes that.
"""
return bool(super()._has_view_access(user, action, resource))
def has_access(self, action_name, resource_name, user=None) -> bool:
"""
Verify whether a given user could perform a certain action
(e.g can_read, can_write) on the given resource.
:param action_name: action_name on resource (e.g can_read, can_edit).
:type action_name: str
:param resource_name: name of view-menu or resource.
:type resource_name: str
:param user: user name
:type user: str
:return: Whether user could perform certain action on the resource.
:rtype bool
"""
if not user:
user = g.user
if user.is_anonymous:
user.roles = self.get_user_roles(user)
has_access = self._has_access(user, action_name, resource_name)
# FAB built-in view access method. Won't work for AllDag access.
if self.is_dag_resource(resource_name):
if action_name == permissions.ACTION_CAN_READ:
has_access |= self.can_read_dag(resource_name, user)
elif action_name == permissions.ACTION_CAN_EDIT:
has_access |= self.can_edit_dag(resource_name, user)
return has_access
def _has_access(self, user: User, action_name: str, resource_name: str) -> bool:
"""
Wraps the FAB built-in view access method. Won't work for AllDag access.
:param user: user object
:type user: User
:param action_name: action_name on resource (e.g can_read, can_edit).
:type action_name: str
:param resource_name: name of resource.
:type resource_name: str
:return: a bool whether user could perform certain action on the resource.
:rtype bool
"""
return bool(self._has_view_access(user, action_name, resource_name))
def _get_and_cache_perms(self):
"""Cache permissions"""
self.perms = self.get_current_user_permissions()
def _has_role(self, role_name_or_list):
"""Whether the user has this role name"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in self.get_user_roles())
def _has_perm(self, action_name, resource_name):
"""Whether the user has this perm"""
if hasattr(self, 'perms') and self.perms is not None:
if (action_name, resource_name) in self.perms:
return True
# rebuild the permissions set
self._get_and_cache_perms()
return (action_name, resource_name) in self.perms
def has_all_dags_access(self):
"""
Has all the dag access in any of the 3 cases:
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read action on dags resource.
3. Has can_edit action on dags resource.
"""
return (
self._has_role(['Admin', 'Viewer', 'Op', 'User'])
or self._has_perm(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG)
or self._has_perm(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG)
)
def clean_perms(self):
"""FAB leaves faulty permissions that need to be cleaned up"""
self.log.debug('Cleaning faulty perms')
sesh = self.get_session
perms = sesh.query(sqla_models.PermissionView).filter(
or_(
sqla_models.PermissionView.permission == None, # noqa pylint: disable=singleton-comparison
sqla_models.PermissionView.view_menu == None, # noqa pylint: disable=singleton-comparison
)
)
# Since FAB doesn't define ON DELETE CASCADE on these tables, we need
# to delete the _object_ so that SQLA knows to delete the many-to-many
# relationship object too. :(
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info('Deleted %s faulty permissions', deleted_count)
def _merge_perm(self, action_name, resource_name):
"""
Add the new (permission, resource) to assoc_permissionview_role if it doesn't exist.
It will add the related entry to ab_permission
and ab_view_menu two meta tables as well.
:param action_name: Name of the action
:type action_name: str
:param resource_name: Name of the resource
:type resource_name: str
:return:
"""
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
perm = None
if action and resource:
perm = (
self.get_session.query(self.permissionview_model)
.filter_by(permission=action, view_menu=resource)
.first()
)
if not perm and action_name and resource_name:
self.create_permission(action_name, resource_name)
def add_homepage_access_to_custom_roles(self):
"""
Add Website.can_read access to all custom roles.
:return: None.
"""
website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.get_session.commit()
def add_permission_to_role(self, role: Role, permission: PermissionView) -> None:
"""
Add an existing permission pair to a role.
:param role: The role about to get a new permission.
:type role: Role
:param permission: The permission pair to add to a role.
:type permission: PermissionView
:return: None
:rtype: None
"""
self.add_permission_role(role, permission)
def remove_permission_from_role(self, role: Role, permission: PermissionView) -> None:
"""
Remove a permission pair from a role.
:param role: User role containing permissions.
:type role: Role
:param permission: Object representing resource-> action pair
:type permission: PermissionView
"""
self.del_permission_role(role, permission)
def delete_action(self, name: str) -> bool:
"""
Deletes a permission action.
:param name: Name of action to delete (e.g. can_read).
:type name: str
:return: Whether or not delete was successful.
:rtype: bool
"""
return self.del_permission(name)
def get_all_permissions(self) -> Set[Tuple[str, str]]:
"""Returns all permissions as a set of tuples with the action and resource names"""
return set(
self.get_session.query(self.permissionview_model)
.join(self.permission_model)
.join(self.viewmenu_model)
.with_entities(self.permission_model.name, self.viewmenu_model.name)
.all()
)
def _get_all_non_dag_permissionviews(self) -> Dict[Tuple[str, str], PermissionView]:
"""
Returns a dict with a key of (action_name, resource_name) and value of permission
with all permissions except those that are for specific DAGs.
"""
return {
(action_name, resource_name): viewmodel
for action_name, resource_name, viewmodel in (
self.get_session.query(self.permissionview_model)
.join(self.permission_model)
.join(self.viewmenu_model)
.filter(~self.viewmenu_model.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
.with_entities(
self.permission_model.name, self.viewmenu_model.name, self.permissionview_model
)
.all()
)
}
def _get_all_roles_with_permissions(self) -> Dict[str, Role]:
"""Returns a dict with a key of role name and value of role with eagrly loaded permissions"""
return {
r.name: r
for r in (
self.get_session.query(self.role_model).options(joinedload(self.role_model.permissions)).all()
)
}
def create_dag_specific_permissions(self) -> None:
"""
Creates 'can_read' and 'can_edit' permissions for all DAGs,
along with any `access_control` permissions provided in them.
This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`
if you only need to sync a single DAG.
:return: None.
"""
perms = self.get_all_permissions()
dagbag = DagBag(read_dags_from_db=True)
dagbag.collect_dags_from_db()
dags = dagbag.dags.values()
for dag in dags:
dag_resource_name = permissions.resource_name_for_dag(dag.dag_id)
for action_name in self.DAG_PERMS:
if (action_name, dag_resource_name) not in perms:
self._merge_perm(action_name, dag_resource_name)
if dag.access_control:
self._sync_dag_view_permissions(dag_resource_name, dag.access_control)
def update_admin_perm_view(self):
"""
Admin should have all the permissions, except the dag permissions.
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
dag_resources = (
self.get_session.query(sqla_models.ViewMenu)
.filter(sqla_models.ViewMenu.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
.all()
)
resource_ids = [resource.id for resource in dag_resources]
perms = (
self.get_session.query(sqla_models.PermissionView)
.filter(~sqla_models.PermissionView.view_menu_id.in_(resource_ids))
.all()
)
perms = [p for p in perms if p.permission and p.view_menu]
admin = self.find_role('Admin')
admin.permissions = list(set(admin.permissions) | set(perms))
self.get_session.commit()
def sync_roles(self):
"""
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
# Create global all-dag permissions
self.create_perm_vm_for_all_dag()
# Sync the default roles (Admin, Viewer, User, Op, public) with related permissions
self.bulk_sync_roles(self.ROLE_CONFIGS)
self.add_homepage_access_to_custom_roles()
# init existing roles, the rest role could be created through UI.
self.update_admin_perm_view()
self.clean_perms()
def sync_resource_permissions(self, perms=None):
"""Populates resource-based permissions."""
if not perms:
return
for action_name, resource_name in perms:
self.create_resource(resource_name)
self.create_permission(action_name, resource_name)
def sync_perm_for_dag(self, dag_id, access_control=None):
"""
Sync permissions for given dag id. The dag id surely exists in our dag bag
as only / refresh button or DagBag will call this function
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: str
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g.,
{'can_read'}
:type access_control: dict
:return:
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
for action_name in self.DAG_PERMS:
self.create_permission(action_name, dag_resource_name)
if access_control:
self._sync_dag_view_permissions(dag_resource_name, access_control)
def get_resource_permissions(self, resource: ViewMenu) -> PermissionView:
"""
Retrieve permission pairs associated with a specific resource object.
:param resource: Object representing a single resource.
:type resource: ViewMenu
:return: Permission objects representing resource->action pair
:rtype: PermissionView
"""
return self.find_permissions_view_menu(resource)
def _sync_dag_view_permissions(self, dag_id, access_control):
"""
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: str
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g. {'can_read'})
:type access_control: dict
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
def _get_or_create_dag_permission(action_name):
perm = self.get_permission(action_name, dag_resource_name)
if not perm:
self.log.info("Creating new action '%s' on resource '%s'", action_name, dag_resource_name)
perm = self.create_permission(action_name, dag_resource_name)
return perm
def _revoke_stale_permissions(resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name != 'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, {})
if perm.permission.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.permission,
dag_resource_name,
role.name,
)
self.remove_permission_from_role(role, perm)
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_stale_permissions(resource)
for rolename, perms in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
"The access_control mapping for DAG '{}' includes a role "
"named '{}', but that role does not exist".format(dag_id, rolename)
)
perms = set(perms)
invalid_perms = perms - self.DAG_PERMS
if invalid_perms:
raise AirflowException(
"The access_control map for DAG '{}' includes the following "
"invalid permissions: {}; The set of valid permissions "
"is: {}".format(dag_resource_name, invalid_perms, self.DAG_PERMS)
)
for action_name in perms:
dag_perm = _get_or_create_dag_permission(action_name)
self.add_permission_to_role(role, dag_perm)
def create_resource(self, name: str) -> ViewMenu:
"""
Create a resource with the given name.
:param name: The name of the resource to create created.
:type name: str
:return: The FAB resource created.
:rtype: ViewMenu
"""
return self.add_view_menu(name)
def create_perm_vm_for_all_dag(self):
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
# create perm for global logical dag
for resource_name in self.DAG_VMS:
for action_name in self.DAG_PERMS:
self._merge_perm(action_name, resource_name)
def check_authorization(
self, perms: Optional[Sequence[Tuple[str, str]]] = None, dag_id: Optional[str] = None
) -> bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in (
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
def reset_all_permissions(self) -> None:
"""
Deletes all permission records and removes from roles,
then re-syncs them.
:return: None
:rtype: None
"""
session = self.get_session
for role in self.get_all_roles():
role.permissions = []
session.commit()
session.query(PermissionView).delete()
session.query(ViewMenu).delete()
session.query(Permission).delete()
session.commit()
self.sync_roles()
class ApplessAirflowSecurityManager(AirflowSecurityManager):
"""Security Manager that doesn't need the whole flask app"""
def __init__(self, session=None): # pylint: disable=super-init-not-called
self.session = session
@property
def get_session(self):
return self.session
|
{
"content_hash": "dfbf5012e7968b2b40adc20f5a7896c7",
"timestamp": "",
"source": "github",
"line_count": 893,
"max_line_length": 110,
"avg_line_length": 40.06494960806271,
"alnum_prop": 0.6143160601486948,
"repo_name": "sekikn/incubator-airflow",
"id": "0dfc0b133e8a9a9abb2958ca21e6a48ffc1ec237",
"size": "36568",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/www/security.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
import gym
from gym.spaces import Box, Dict, Discrete
import numpy as np
import random
class ParametricActionsCartPole(gym.Env):
"""Parametric action version of CartPole.
In this env there are only ever two valid actions, but we pretend there are
actually up to `max_avail_actions` actions that can be taken, and the two
valid actions are randomly hidden among this set.
At each step, we emit a dict of:
- the actual cart observation
- a mask of valid actions (e.g., [0, 0, 1, 0, 0, 1] for 6 max avail)
- the list of action embeddings (w/ zeroes for invalid actions) (e.g.,
[[0, 0],
[0, 0],
[-0.2322, -0.2569],
[0, 0],
[0, 0],
[0.7878, 1.2297]] for max_avail_actions=6)
In a real environment, the actions embeddings would be larger than two
units of course, and also there would be a variable number of valid actions
per step instead of always [LEFT, RIGHT].
"""
def __init__(self, max_avail_actions):
# Use simple random 2-unit action embeddings for [LEFT, RIGHT]
self.left_action_embed = np.random.randn(2)
self.right_action_embed = np.random.randn(2)
self.action_space = Discrete(max_avail_actions)
self.wrapped = gym.make("CartPole-v0")
self.observation_space = Dict({
"action_mask": Box(0, 1, shape=(max_avail_actions, )),
"avail_actions": Box(-10, 10, shape=(max_avail_actions, 2)),
"cart": self.wrapped.observation_space,
})
def update_avail_actions(self):
self.action_assignments = np.array([[0., 0.]] * self.action_space.n)
self.action_mask = np.array([0.] * self.action_space.n)
self.left_idx, self.right_idx = random.sample(
range(self.action_space.n), 2)
self.action_assignments[self.left_idx] = self.left_action_embed
self.action_assignments[self.right_idx] = self.right_action_embed
self.action_mask[self.left_idx] = 1
self.action_mask[self.right_idx] = 1
def reset(self):
self.update_avail_actions()
return {
"action_mask": self.action_mask,
"avail_actions": self.action_assignments,
"cart": self.wrapped.reset(),
}
def step(self, action):
if action == self.left_idx:
actual_action = 0
elif action == self.right_idx:
actual_action = 1
else:
raise ValueError(
"Chosen action was not one of the non-zero action embeddings",
action, self.action_assignments, self.action_mask,
self.left_idx, self.right_idx)
orig_obs, rew, done, info = self.wrapped.step(actual_action)
self.update_avail_actions()
obs = {
"action_mask": self.action_mask,
"avail_actions": self.action_assignments,
"cart": orig_obs,
}
return obs, rew, done, info
|
{
"content_hash": "16934f4f01ab49f67d881b0de211a1c0",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 39.02597402597402,
"alnum_prop": 0.5930116472545757,
"repo_name": "pcmoritz/ray-1",
"id": "ec98b78e2bc92ea1524b7dd0b73e105d24ffba8c",
"size": "3005",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rllib/examples/env/parametric_actions_cartpole.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
}
|
import functools
import six
from essential import funcutils
from essential import test
class FuncutilsTestCase(test.BaseTestCase):
def _test_func(self, instance, red=None, blue=None):
pass
def test_all_kwargs(self):
args = ()
kwargs = {'instance': {'uuid': 1}, 'red': 3, 'blue': 4}
callargs = funcutils.getcallargs(self._test_func, *args, **kwargs)
#implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertTrue('instance' in callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertTrue('red' in callargs)
self.assertEqual(3, callargs['red'])
self.assertTrue('blue' in callargs)
self.assertEqual(4, callargs['blue'])
def test_all_args(self):
args = ({'uuid': 1}, 3, 4)
kwargs = {}
callargs = funcutils.getcallargs(self._test_func, *args, **kwargs)
#implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertTrue('instance' in callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertTrue('red' in callargs)
self.assertEqual(3, callargs['red'])
self.assertTrue('blue' in callargs)
self.assertEqual(4, callargs['blue'])
def test_mixed_args(self):
args = ({'uuid': 1}, 3)
kwargs = {'blue': 4}
callargs = funcutils.getcallargs(self._test_func, *args, **kwargs)
#implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertTrue('instance' in callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertTrue('red' in callargs)
self.assertEqual(3, callargs['red'])
self.assertTrue('blue' in callargs)
self.assertEqual(4, callargs['blue'])
def test_partial_kwargs(self):
args = ()
kwargs = {'instance': {'uuid': 1}, 'red': 3}
callargs = funcutils.getcallargs(self._test_func, *args, **kwargs)
#implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertTrue('instance' in callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertTrue('red' in callargs)
self.assertEqual(3, callargs['red'])
self.assertTrue('blue' in callargs)
self.assertIsNone(callargs['blue'])
def test_partial_args(self):
args = ({'uuid': 1}, 3)
kwargs = {}
callargs = funcutils.getcallargs(self._test_func, *args, **kwargs)
#implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertTrue('instance' in callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertTrue('red' in callargs)
self.assertEqual(3, callargs['red'])
self.assertTrue('blue' in callargs)
self.assertIsNone(callargs['blue'])
def test_partial_mixed_args(self):
args = (3,)
kwargs = {'instance': {'uuid': 1}}
callargs = funcutils.getcallargs(self._test_func, *args, **kwargs)
self.assertEqual(4, len(callargs))
self.assertTrue('instance' in callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertTrue('red' in callargs)
self.assertEqual(3, callargs['red'])
self.assertTrue('blue' in callargs)
self.assertIsNone(callargs['blue'])
def _wrapper(self, function):
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
function(self, *args, **kwargs)
return decorated_function
def test_wrapped_X(self):
def wrapped(self, instance, red=None, blue=None):
pass
old_wrapped = wrapped
# Wrap it many times and ensure that its still the right one.
for _i in range(10):
wrapped = self._wrapper(wrapped)
func = funcutils.get_wrapped_function(wrapped)
func_code = six.get_function_code(func)
self.assertEqual(4, len(func_code.co_varnames))
self.assertTrue('self' in func_code.co_varnames)
self.assertTrue('instance' in func_code.co_varnames)
self.assertTrue('red' in func_code.co_varnames)
self.assertTrue('blue' in func_code.co_varnames)
self.assertEqual(old_wrapped, func)
|
{
"content_hash": "c68f23eb7d82a8484fcd36e1eacffd30",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 74,
"avg_line_length": 36.12396694214876,
"alnum_prop": 0.606726149622512,
"repo_name": "gaolichuang/py-essential",
"id": "93589fee35f20d9345e541476e39b0983b03baf3",
"size": "5048",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_funcutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "814"
},
{
"name": "Python",
"bytes": "1440964"
}
],
"symlink_target": ""
}
|
from pyface.image_list import *
|
{
"content_hash": "a6d78f0f129d2d71d7fd620578ba57ed",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.78125,
"repo_name": "enthought/etsproxy",
"id": "ac8cf468acd8580718478adf21d618edcb7fdaa0",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/pyface/image_list.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
import os
import tempfile
from os import getcwd
from os.path import join
from nose.tools import raises
from parameterized import param, parameterized
from rsmtool import run_summary
from rsmtool.configuration_parser import Configuration
from rsmtool.test_utils import (check_run_summary,
copy_data_files,
do_run_summary)
# allow test directory to be set via an environment variable
# which is needed for package testing
TEST_DIR = os.environ.get('TESTDIR', None)
if TEST_DIR:
rsmtool_test_dir = TEST_DIR
else:
from rsmtool.test_utils import rsmtool_test_dir
@parameterized([
param('lr-self-summary'),
param('linearsvr-self-summary'),
param('lr-self-eval-summary'),
param('lr-self-summary-with-custom-sections'),
param('lr-self-summary-with-tsv-inputs'),
param('lr-self-summary-with-tsv-output', file_format='tsv'),
param('lr-self-summary-with-xlsx-output', file_format='xlsx'),
param('lr-self-summary-no-scaling'),
param('lr-self-summary-with-h2'),
param('summary-with-custom-names'),
param('lr-self-summary-no-trim')
])
def test_run_experiment_parameterized(*args, **kwargs):
if TEST_DIR:
kwargs['given_test_dir'] = TEST_DIR
check_run_summary(*args, **kwargs)
def test_run_experiment_lr_summary_with_object():
'''
test rsmsummarize using the Configuration object, rather than a file;
'''
source = 'lr-self-summary-object'
# we set the configuration directory to point to the test directory
# to ensure that the results are identical to what we would expect
# if we had run this test with a configuration file instead
configdir = join(rsmtool_test_dir,
'data',
'experiments',
source)
config_dict = {"summary_id": "model_comparison",
"experiment_dirs": ["lr-subgroups", "lr-subgroups", "lr-subgroups"],
"description": "Comparison of rsmtool experiment with itself."}
config_obj = Configuration(config_dict,
context='rsmsummarize',
configdir=configdir)
check_run_summary(source, config_obj_or_dict=config_obj)
def test_run_experiment_lr_summary_dictionary():
'''
Test rsmsummarize using the dictionary object, rather than a file;
'''
source = 'lr-self-summary-dict'
# set up a temporary directory since
# we will be using getcwd
temp_dir = tempfile.TemporaryDirectory(prefix=getcwd())
old_file_dict = {'experiment_dir': 'data/experiments/lr-self-summary-dict/lr-subgroups'}
new_file_dict = copy_data_files(temp_dir.name,
old_file_dict,
rsmtool_test_dir)
config_dict = {"summary_id": "model_comparison",
"experiment_dirs": [new_file_dict['experiment_dir'],
new_file_dict['experiment_dir'],
new_file_dict['experiment_dir']],
"description": "Comparison of rsmtool experiment with itself."}
check_run_summary(source, config_obj_or_dict=config_dict)
@raises(ValueError)
def test_run_summary_wrong_input_format():
config_list = [('experiment_id', 'AAAA'),
('train_file', 'some_path')]
with tempfile.TemporaryDirectory() as temp_dir:
run_summary(config_list, temp_dir)
@raises(FileNotFoundError)
def test_run_experiment_summary_wrong_directory():
# rsmsummarize experiment where the specified directory
# does not exist
source = 'summary-wrong-directory'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmsummarize.json')
do_run_summary(source, config_file)
@raises(FileNotFoundError)
def test_run_experiment_summary_no_csv_directory():
# rsmsummarize experiment where the specified directory
# does not contain any rsmtool experiments
source = 'summary-no-output-dir'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmsummarize.json')
do_run_summary(source, config_file)
@raises(FileNotFoundError)
def test_run_experiment_summary_no_json():
# rsmsummarize experiment where the specified directory
# does not contain any json files
source = 'summary-no-json-file'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmsummarize.json')
do_run_summary(source, config_file)
@raises(ValueError)
def test_run_experiment_summary_too_many_jsons():
# rsmsummarize experiment where the specified directory
# does contains several jsons files and the user
# specified experiment names
source = 'summary-too-many-jsons'
config_file = join(rsmtool_test_dir,
'data',
'experiments',
source,
'rsmsummarize.json')
do_run_summary(source, config_file)
|
{
"content_hash": "45cb740aa602be8dbd9fe111eeed1d0a",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 92,
"avg_line_length": 33.21383647798742,
"alnum_prop": 0.6076500662753267,
"repo_name": "EducationalTestingService/rsmtool",
"id": "9551ee680bd3a0ace3a1850142dd677225ec970c",
"size": "5281",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_experiment_rsmsummarize.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "226889"
},
{
"name": "PowerShell",
"bytes": "3198"
},
{
"name": "Python",
"bytes": "1143343"
},
{
"name": "Smarty",
"bytes": "123"
},
{
"name": "TeX",
"bytes": "1402"
}
],
"symlink_target": ""
}
|
"""
Some utilities and tools for using RBNWorld with other libraries, e.g. PyAChemKit
"""
import AChemKit
import AChemKit.utils
import AChemKit.utils.utils
from AChemKit.utils.utils import long_subseq
import reaction
def sufficiently_similar(mols, proportion = 0.5):
"""
Function for determining if a collection of molecules is sufficiently similar.
"""
def simplify(mol):
for x in "123456789[]":
mol = mol.replace(x, "")
return mol
mols = [simplify(str(mol)) for mol in sorted(set(mols))]
i = len(long_subseq(mols))
j = max((len(x) for x in mols))
#print mols, i
if i >= proportion*j:
return True
else:
return False
class RBNWorldAChem(object):
noreactants = 2
def __init__(self):
pass
def react(self, reactants):
return reaction.react(reactants)
|
{
"content_hash": "1d2bd3df28725b0238f4c7ed524c0d7a",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 82,
"avg_line_length": 22.21951219512195,
"alnum_prop": 0.6114160263446762,
"repo_name": "afaulconbridge/PyRBNWorld",
"id": "19fd95151de28cc8b61cea5f84ca634fff80dade",
"size": "911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rbnworld/tool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "67685"
}
],
"symlink_target": ""
}
|
"""Various text used throughout the website, e.g. status messages, errors, etc.
"""
# Status Messages
#################
# NOTE: in status messages, newlines are not preserved, so triple-quotes strings
# are ok
# Status message shown at settings page on first login
# (upon clicking primary email confirmation link)
WELCOME_MESSAGE = '''
<h1>Welcome to the OSF!</h1>
<p>Visit our <a href="http://help.osf.io/" target="_blank" rel="noreferrer">Guides</a> to learn about creating a project, or get inspiration from <a href="https://osf.io/explore/activity/#popularPublicProjects">popular public projects</a>.</p>
'''
REGISTRATION_SUCCESS = '''Registration successful. Please check {email} to confirm your email address.'''
EXTERNAL_LOGIN_EMAIL_CREATE_SUCCESS = '''A new OSF account has been created with your {external_id_provider} profile. Please check {email} to confirm your email address.'''
EXTERNAL_LOGIN_EMAIL_LINK_SUCCESS = '''Your OSF account has been linked with your {external_id_provider}. Please check {email} to confirm this action.'''
# Shown if registration is turned off in website.settings
REGISTRATION_UNAVAILABLE = 'Registration currently unavailable.'
ALREADY_REGISTERED = u'The email {email} has already been registered.'
AFTER_SUBMIT_FOR_REVIEW = 'Your submission has been received. You will be notified within two business days regarding the status of your submission. If you have questions you may contact us at prereg@cos.io.'
# Shown if user tries to login with an email that is not yet confirmed
UNCONFIRMED = ('This login email has been registered but not confirmed. Please check your email (and spam folder).'
' <a href="/resend/">Click here</a> to resend your confirmation email.')
# Shown if the user's account is disabled
DISABLED = '''
Log-in failed: Deactivated account.
'''
# Shown on incorrect password attempt
LOGIN_FAILED = '''
Log-in failed. Please try again or reset your password.
'''
# Shown at login page if user tries to access a resource that requires auth
MUST_LOGIN = '''
You must log in or create a new account to claim the contributor-ship.
'''
# Shown on logout
LOGOUT = '''
You have successfully logged out.
'''
EMAIL_NOT_FOUND = u'''
{email} was not found in our records.
'''
# Shown after an unregistered user claims an account and is redirected to the
# settings page
CLAIMED_CONTRIBUTOR = ('<strong>Welcome to the OSF!</strong> Edit your display name below and then check your '
'<a href="/dashboard/">dashboard</a> to see projects to which you have been added as a '
'contributor by someone else.')
# Error Pages
# ###########
# Search-related errors
SEARCH_QUERY_HELP = ('Please check our help (the question mark beside the search box) for more information '
'on advanced search queries.')
# Shown at error page if an expired/revokes email confirmation link is clicked
EXPIRED_EMAIL_CONFIRM_TOKEN = 'This confirmation link has expired. Please <a href="/login/">log in</a> to continue.'
INVALID_EMAIL_CONFIRM_TOKEN = 'This confirmation link is invalid. Please <a href="/login/">log in</a> to continue.'
CANNOT_MERGE_ACCOUNTS_SHORT = 'Cannot Merge Accounts'
CANNOT_MERGE_ACCOUNTS_LONG = (
'Accounts cannot be merged due to a possible conflict with add-ons. '
'Before you continue, please <a href="/settings/addons/"> deactivate '
'any add-ons</a> to be merged into your primary account.'
)
MERGE_COMPLETE = 'Accounts successfully merged.'
MERGE_CONFIRMATION_REQUIRED_SHORT = 'Confirmation Required: Merge Accounts'
MERGE_CONFIRMATION_REQUIRED_LONG = (
u'<p>This email is confirmed to another account. '
u'Would you like to merge <em>{src_user}</em> with the account '
u'<em>{dest_user}</em>?<p>'
u'<a class="btn btn-primary" href="?confirm_merge">Confirm merge</a> '
)
# Node Actions
AFTER_REGISTER_ARCHIVING = (
'Files are being copied to the newly created registration, and you will receive an email '
'notification when the copying is finished.'
)
BEFORE_REGISTER_HAS_POINTERS = (
u'This {category} contains links to other projects. Links will be copied '
u'into your registration, but the projects that they link to will not be '
u'registered. If you wish to register the linked projects, you must fork '
u'them from the original project before registering.'
)
BEFORE_FORK_HAS_POINTERS = (
u'This {category} contains links to other projects. Links will be copied '
u'into your fork, but the projects that they link to will not be forked. '
u'If you wish to fork the linked projects, they need to be forked from the '
u'original project.'
)
REGISTRATION_INFO = '''
<p>Registration creates a frozen version of the project that can never be
edited or deleted but can be withdrawn. You can register your project by
selecting a registration form, entering information about your project, and
then confirming. You will be able to continue editing the original project,
however, and the frozen version with timestamps will always be linked to
the original. Withdrawing a registration will leave behind metadata about
when the registration was created and withdrawn but removes the contents
of the registration.</p>
<ul>
<li>A registration can be made public immediately or entered into
an embargo period of up to four years. At the end of the embargo period,
the registration will automatically become public.</li>
<li>Before initiating a registration, make sure that the project is
in the state that you wish to freeze. Consider turning links into
forks.</li>
<li>Start by selecting a registration form from the list below. You can hit
your browser's back button if the selected form is not appropriate for
your use.</li>
</ul>
'''
REGISTRATION_EMBARGO_INFO = '''
You can choose whether to make your registration public immediately or
embargo it for up to four years. At the end of the embargo period the registration
is automatically made public. After becoming public, the only way to remove a
registration is to withdraw it. Withdrawn registrations show only the registration title,
contributors, and description to indicate that a registration was made and
later withdrawn.
<br /><br />
If you choose to embargo your registration, a notification will be sent to
all other project contributors. Other administrators will have 48 hours to
approve or cancel creating the registration. If any other administrator rejects the
registration, it will be canceled. If all other administrators approve or do
nothing, the registration will be confirmed and enter its embargo period.
'''
BEFORE_REGISTRATION_INFO = '''
Registration cannot be undone, and the archived content and files cannot be
deleted after registration. Please be sure the project is complete and
comprehensive for what you wish to register.
'''
# Nodes: forking, templating, linking
LINK_ACTION = 'Link to this Project'
LINK_DESCRIPTION = """
<p>Linking to this project will reference it in another project, without
creating a copy. The link will always point to the most up-to-date version.</p>
"""
TEMPLATE_ACTION = 'Duplicate template'
TEMPLATE_DESCRIPTION = """
<p>This option will create a new project, using this project as a template.
The new project will be structured in the same way, but contain no data.</p>
"""
FORK_ACTION = 'Fork this Project'
FORK_DESCRIPTION = """
<p>Fork this project if you plan to build upon it in your own work.
The new project will be an exact duplicate of this project's current state,
with you as the only contributor.</p>
"""
TEMPLATE_DROPDOWN_HELP = """Start typing to search. Selecting project as
template will duplicate its structure in the new project without importing the
content of that project."""
TEMPLATED_FROM_PREFIX = 'Templated from '
# MFR Error handling
ERROR_PREFIX = "Unable to render. <a href='?action=download'>Download</a> file to view it."
SUPPORT = u"Contact support@osf.io for further assistance."
# Custom Error Messages w/ support # TODO: Where are these used? See [#OSF-6101]
STATA_VERSION_ERROR = u'Version of given Stata file is not 104, 105, 108, 113 (Stata 8/9), 114 (Stata 10/11) or 115 (Stata 12)<p>{0}</p>'.format(SUPPORT)
BLANK_OR_CORRUPT_TABLE_ERROR = u'Is this a valid instance of this file type?<p>{0}</p>'.format(SUPPORT)
#disk saving mode
DISK_SAVING_MODE = 'Forks, registrations, and uploads to OSF Storage uploads are temporarily disabled while we are undergoing a server upgrade. These features will return shortly.'
#log out and revisit the link to confirm emails
CONFIRM_ALTERNATE_EMAIL_ERROR = 'The email address has <b>NOT</b> been added to your account. Please log out and revisit the link in your email. Thank you.'
|
{
"content_hash": "5552aea7e434480b2f43efbe50828711",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 243,
"avg_line_length": 43.32178217821782,
"alnum_prop": 0.7420866186721518,
"repo_name": "Nesiehr/osf.io",
"id": "ad698038b0a65fe2e3624f5dc96ff5d785224378",
"size": "8775",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "website/language.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "144027"
},
{
"name": "HTML",
"bytes": "215077"
},
{
"name": "JavaScript",
"bytes": "1699002"
},
{
"name": "Mako",
"bytes": "650031"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7928034"
}
],
"symlink_target": ""
}
|
from south.db import db
from django.db import models
from myblog.comments.models import *
class Migration:
def forwards(self, orm):
# Adding model 'Comment'
db.create_table('comments_comment', (
('id', orm['comments.Comment:id']),
('content_type', orm['comments.Comment:content_type']),
('object_pk', orm['comments.Comment:object_pk']),
('site', orm['comments.Comment:site']),
('user', orm['comments.Comment:user']),
('user_name', orm['comments.Comment:user_name']),
('user_email', orm['comments.Comment:user_email']),
('user_url', orm['comments.Comment:user_url']),
('content', orm['comments.Comment:content']),
('parent', orm['comments.Comment:parent']),
('mail_notify', orm['comments.Comment:mail_notify']),
('date', orm['comments.Comment:date']),
('ip_address', orm['comments.Comment:ip_address']),
('is_public', orm['comments.Comment:is_public']),
('is_removed', orm['comments.Comment:is_removed']),
))
db.send_create_signal('comments', ['Comment'])
def backwards(self, orm):
# Deleting model 'Comment'
db.delete_table('comments_comment')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'comments.comment': {
'content': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'mail_notify': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.PositiveIntegerField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'children'", 'blank': 'True', 'null': 'True', 'to': "orm['comments.Comment']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comment for comment'", 'to': "orm['sites.Site']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['comments']
|
{
"content_hash": "176b46161f6e4a2c99ea257a65871f16",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 190,
"avg_line_length": 64.4795918367347,
"alnum_prop": 0.5415413831302421,
"repo_name": "wahaha02/myblog",
"id": "a72fe36e5ba18fd8dac1f1ef8c4df3ce34729f2b",
"size": "6320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comments/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from distutils.core import setup
from Cython.Build import cythonize
setup(
ext_modules = cythonize(["./src/*.pyx","./examples/sv_model_nsmc.pyx","./examples/lgss_model_nsmc.pyx","./examples/nlss_model*.pyx"])
)
|
{
"content_hash": "809025f6dd3516c2618958991434e69a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 137,
"avg_line_length": 31,
"alnum_prop": 0.7096774193548387,
"repo_name": "can-cs/smcpy",
"id": "e7e69d2fce4c284f4af4a764c1b345aaec6f7443",
"size": "217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27518"
}
],
"symlink_target": ""
}
|
import os
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
import unittest
import meterbus
from meterbus.exceptions import *
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.vib_empty = meterbus.ValueInformationBlock()
self.vib0 = meterbus.ValueInformationBlock([0xFD, 0x1B])
def test_empty_vib_has_extension_bit(self):
self.assertEqual(self.vib_empty.has_extension_bit, False)
def test_empty_vib_without_extension_bit(self):
self.assertEqual(self.vib_empty.without_extension_bit, False)
def test_empty_vib_has_lvar_bit(self):
self.assertEqual(self.vib_empty.has_lvar_bit, False)
def test_vib0_has_extension_bit(self):
self.assertEqual(self.vib0.has_extension_bit, False)
def test_vib0_without_extension_bit(self):
self.assertEqual(self.vib0.without_extension_bit, False)
def test_vib0_has_lvar_bit(self):
self.assertEqual(self.vib0.has_lvar_bit, False)
def test_custom_vib_setter_getter(self):
vib = meterbus.ValueInformationBlock()
vib.customVIF = [0x65]
self.assertEqual(vib.customVIF, [0x65])
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9a0400980426b15e3f043c3daa6b694e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 69,
"avg_line_length": 30.414634146341463,
"alnum_prop": 0.6896551724137931,
"repo_name": "ganehag/pyMeterBus",
"id": "5e502e118d78ac9773a8691f0f16b6e70d959a3e",
"size": "1272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_vib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "206826"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, unicode_literals
from voodoo.dbutil import get_table_kwargs
from sqlalchemy import Column, Integer, String, Index
from weblab.core.coordinator.sql.model import Base, RESERVATION_ID_SIZE
TABLE_KWARGS = get_table_kwargs()
SUFFIX = 'ILAB_BATCH_'
class ILabBatchReservation(Base):
__tablename__ = SUFFIX + 'ILabBatchReservation'
__table_args__ = TABLE_KWARGS
id = Column(Integer, primary_key=True)
local_reservation_id = Column(String(RESERVATION_ID_SIZE))
lab_server_url = Column(String(255))
remote_experiment_id = Column(Integer)
def __init__(self, local_reservation_id, lab_server_url, remote_experiment_id):
self.local_reservation_id = local_reservation_id
self.lab_server_url = lab_server_url
self.remote_experiment_id = remote_experiment_id
def __repr__(self):
return SUFFIX + "ILabBatchReservation(%r, %r, %r, %r)" % (
self.local_reservation_id,
self.lab_server_url,
self.remote_experiment_id,
)
Index('ix_ilabbatch_rese_lab', ILabBatchReservation.local_reservation_id, ILabBatchReservation.lab_server_url)
|
{
"content_hash": "34a7105f53206a40dec3b993bc3b3a8c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 110,
"avg_line_length": 35.611111111111114,
"alnum_prop": 0.6310452418096724,
"repo_name": "porduna/weblabdeusto",
"id": "9d3357dc70e712c37b9bc4b2ccd9cd1758c0b775",
"size": "1669",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "server/src/weblab/core/coordinator/sql/externals/ilab_batch_scheduler_model.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "4785"
},
{
"name": "ActionScript",
"bytes": "8508"
},
{
"name": "Batchfile",
"bytes": "7753"
},
{
"name": "C",
"bytes": "19456"
},
{
"name": "C#",
"bytes": "315160"
},
{
"name": "C++",
"bytes": "9547"
},
{
"name": "CSS",
"bytes": "203478"
},
{
"name": "CoffeeScript",
"bytes": "39146"
},
{
"name": "Go",
"bytes": "7076"
},
{
"name": "HTML",
"bytes": "610251"
},
{
"name": "Java",
"bytes": "856300"
},
{
"name": "JavaScript",
"bytes": "1538963"
},
{
"name": "Makefile",
"bytes": "24995"
},
{
"name": "Mako",
"bytes": "1236"
},
{
"name": "PHP",
"bytes": "159985"
},
{
"name": "Python",
"bytes": "3780070"
},
{
"name": "Shell",
"bytes": "7880"
},
{
"name": "Smarty",
"bytes": "40320"
},
{
"name": "TSQL",
"bytes": "717"
},
{
"name": "VHDL",
"bytes": "5874"
}
],
"symlink_target": ""
}
|
import os
import gslevels
from . import gsocr
from process import process
class ckbroke(process):
ready_state = 3
def process_ep(self, ep):
exts = self.options.upload_formats
for ext in exts:
src_pathname = os.path.join( self.show_dir, ext,
"%s.%s"%(ep.slug,ext))
p=gsocr.Main(src_pathname)
# gocr -s 40 -C A-Z ~/shot0001.png INVALID
p.gocr_cmd = ['gocr', '-', '-s', '40', '-C', 'A-Z']
dictionary = ["INVALID"]
p.dictionaries=[dictionary]
# p.frame=30*5 # start 5 seconds into it (past the title)
p.seek_sec = 1
if self.options.verbose: print("checking ", ext)
gsocr.gtk.main()
print(p.words)
if p.words: ## ["INVALID"] is kinda the only thing it can be
print(ep.id, ep.name)
print(p.words)
ep.name = "INVALID " + ep.name
ep.state = -1
ep.save()
ret=False
else:
# return True to bump state
# assuming we are not --force-ing the check
ret=self.options.push
return ret
def add_more_options(self, parser):
parser.add_option('--push',
help="Push episode past review step if it passes check.")
if __name__ == '__main__':
p=ckbroke()
p.main()
|
{
"content_hash": "0ce9de30c694fd84a40c962db3b4b7e0",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 72,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.49480968858131485,
"repo_name": "yoe/veyepar",
"id": "db968e7661974935eb8ff2b30c936897e6d9d151",
"size": "1688",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dj/scripts/ck_invalid.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6107"
},
{
"name": "HTML",
"bytes": "76370"
},
{
"name": "JavaScript",
"bytes": "76640"
},
{
"name": "Python",
"bytes": "713606"
},
{
"name": "Ruby",
"bytes": "3503"
},
{
"name": "Shell",
"bytes": "80571"
}
],
"symlink_target": ""
}
|
"""
django-helpdesk - A Django powered ticket tracker for small enterprise.
(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.
models.py - Model (and hence database) definitions. This is the core of the
helpdesk structure.
"""
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _, ugettext
try:
from django.utils import timezone
except ImportError:
from datetime import datetime as timezone
class Queue(models.Model):
"""
A queue is a collection of tickets into what would generally be business
areas or departments.
For example, a company may have a queue for each Product they provide, or
a queue for each of Accounts, Pre-Sales, and Support.
"""
title = models.CharField(
_('Title'),
max_length=100,
)
slug = models.SlugField(
_('Slug'),
help_text=_('This slug is used when building ticket ID\'s. Once set, '
'try not to change it or e-mailing may get messy.'),
)
email_address = models.EmailField(
_('E-Mail Address'),
blank=True,
null=True,
help_text=_('All outgoing e-mails for this queue will use this e-mail '
'address. If you use IMAP or POP3, this should be the e-mail '
'address for that mailbox.'),
)
locale = models.CharField(
_('Locale'),
max_length=10,
blank=True,
null=True,
help_text=_('Locale of this queue. All correspondence in this queue will be in this language.'),
)
allow_public_submission = models.BooleanField(
_('Allow Public Submission?'),
blank=True,
default=False,
help_text=_('Should this queue be listed on the public submission '
'form?'),
)
allow_email_submission = models.BooleanField(
_('Allow E-Mail Submission?'),
blank=True,
default=False,
help_text=_('Do you want to poll the e-mail box below for new '
'tickets?'),
)
escalate_days = models.IntegerField(
_('Escalation Days'),
blank=True,
null=True,
help_text=_('For tickets which are not held, how often do you wish to '
'increase their priority? Set to 0 for no escalation.'),
)
new_ticket_cc = models.CharField(
_('New Ticket CC Address'),
blank=True,
null=True,
max_length=200,
help_text=_('If an e-mail address is entered here, then it will '
'receive notification of all new tickets created for this queue. '
'Enter a comma between multiple e-mail addresses.'),
)
updated_ticket_cc = models.CharField(
_('Updated Ticket CC Address'),
blank=True,
null=True,
max_length=200,
help_text=_('If an e-mail address is entered here, then it will '
'receive notification of all activity (new tickets, closed '
'tickets, updates, reassignments, etc) for this queue. Separate '
'multiple addresses with a comma.'),
)
email_box_type = models.CharField(
_('E-Mail Box Type'),
max_length=5,
choices=(('pop3', _('POP 3')), ('imap', _('IMAP'))),
blank=True,
null=True,
help_text=_('E-Mail server type for creating tickets automatically '
'from a mailbox - both POP3 and IMAP are supported.'),
)
email_box_host = models.CharField(
_('E-Mail Hostname'),
max_length=200,
blank=True,
null=True,
help_text=_('Your e-mail server address - either the domain name or '
'IP address. May be "localhost".'),
)
email_box_port = models.IntegerField(
_('E-Mail Port'),
blank=True,
null=True,
help_text=_('Port number to use for accessing e-mail. Default for '
'POP3 is "110", and for IMAP is "143". This may differ on some '
'servers. Leave it blank to use the defaults.'),
)
email_box_ssl = models.BooleanField(
_('Use SSL for E-Mail?'),
blank=True,
default=False,
help_text=_('Whether to use SSL for IMAP or POP3 - the default ports '
'when using SSL are 993 for IMAP and 995 for POP3.'),
)
email_box_user = models.CharField(
_('E-Mail Username'),
max_length=200,
blank=True,
null=True,
help_text=_('Username for accessing this mailbox.'),
)
email_box_pass = models.CharField(
_('E-Mail Password'),
max_length=200,
blank=True,
null=True,
help_text=_('Password for the above username'),
)
email_box_imap_folder = models.CharField(
_('IMAP Folder'),
max_length=100,
blank=True,
null=True,
help_text=_('If using IMAP, what folder do you wish to fetch messages '
'from? This allows you to use one IMAP account for multiple '
'queues, by filtering messages on your IMAP server into separate '
'folders. Default: INBOX.'),
)
email_box_interval = models.IntegerField(
_('E-Mail Check Interval'),
help_text=_('How often do you wish to check this mailbox? (in Minutes)'),
blank=True,
null=True,
default='5',
)
email_box_last_check = models.DateTimeField(
blank=True,
null=True,
editable=False,
# This is updated by management/commands/get_mail.py.
)
def __unicode__(self):
return u"%s" % self.title
class Meta:
ordering = ('title',)
def _from_address(self):
"""
Short property to provide a sender address in SMTP format,
eg 'Name <email>'. We do this so we can put a simple error message
in the sender name field, so hopefully the admin can see and fix it.
"""
if not self.email_address:
return u'NO QUEUE EMAIL ADDRESS DEFINED <%s>' % settings.DEFAULT_FROM_EMAIL
else:
return u'%s <%s>' % (self.title, self.email_address)
from_address = property(_from_address)
def save(self, *args, **kwargs):
if self.email_box_type == 'imap' and not self.email_box_imap_folder:
self.email_box_imap_folder = 'INBOX'
if not self.email_box_port:
if self.email_box_type == 'imap' and self.email_box_ssl:
self.email_box_port = 993
elif self.email_box_type == 'imap' and not self.email_box_ssl:
self.email_box_port = 143
elif self.email_box_type == 'pop3' and self.email_box_ssl:
self.email_box_port = 995
elif self.email_box_type == 'pop3' and not self.email_box_ssl:
self.email_box_port = 110
super(Queue, self).save(*args, **kwargs)
class Ticket(models.Model):
"""
To allow a ticket to be entered as quickly as possible, only the
bare minimum fields are required. These basically allow us to
sort and manage the ticket. The user can always go back and
enter more information later.
A good example of this is when a customer is on the phone, and
you want to give them a ticket ID as quickly as possible. You can
enter some basic info, save the ticket, give the customer the ID
and get off the phone, then add in further detail at a later time
(once the customer is not on the line).
Note that assigned_to is optional - unassigned tickets are displayed on
the dashboard to prompt users to take ownership of them.
"""
OPEN_STATUS = 1
REOPENED_STATUS = 2
RESOLVED_STATUS = 3
CLOSED_STATUS = 4
DUPLICATE_STATUS = 5
STATUS_CHOICES = (
(OPEN_STATUS, _('Open')),
(REOPENED_STATUS, _('Reopened')),
(RESOLVED_STATUS, _('Resolved')),
(CLOSED_STATUS, _('Closed')),
(DUPLICATE_STATUS, _('Duplicate')),
)
PRIORITY_CHOICES = (
(1, _('1. Critical')),
(2, _('2. High')),
(3, _('3. Normal')),
(4, _('4. Low')),
(5, _('5. Very Low')),
)
title = models.CharField(
_('Title'),
max_length=200,
)
queue = models.ForeignKey(
Queue,
verbose_name=_('Queue'),
)
created = models.DateTimeField(
_('Created'),
blank=True,
help_text=_('Date this ticket was first created'),
)
modified = models.DateTimeField(
_('Modified'),
blank=True,
help_text=_('Date this ticket was most recently changed.'),
)
submitter_email = models.EmailField(
_('Submitter E-Mail'),
blank=True,
null=True,
help_text=_('The submitter will receive an email for all public '
'follow-ups left for this task.'),
)
assigned_to = models.ForeignKey(
User,
related_name='assigned_to',
blank=True,
null=True,
verbose_name=_('Assigned to'),
)
status = models.IntegerField(
_('Status'),
choices=STATUS_CHOICES,
default=OPEN_STATUS,
)
on_hold = models.BooleanField(
_('On Hold'),
blank=True,
default=False,
help_text=_('If a ticket is on hold, it will not automatically be '
'escalated.'),
)
description = models.TextField(
_('Description'),
blank=True,
null=True,
help_text=_('The content of the customers query.'),
)
resolution = models.TextField(
_('Resolution'),
blank=True,
null=True,
help_text=_('The resolution provided to the customer by our staff.'),
)
priority = models.IntegerField(
_('Priority'),
choices=PRIORITY_CHOICES,
default=3,
blank=3,
help_text=_('1 = Highest Priority, 5 = Low Priority'),
)
due_date = models.DateTimeField(
_('Due on'),
blank=True,
null=True,
)
last_escalation = models.DateTimeField(
blank=True,
null=True,
editable=False,
help_text=_('The date this ticket was last escalated - updated '
'automatically by management/commands/escalate_tickets.py.'),
)
def _get_assigned_to(self):
""" Custom property to allow us to easily print 'Unassigned' if a
ticket has no owner, or the users name if it's assigned. If the user
has a full name configured, we use that, otherwise their username. """
if not self.assigned_to:
return _('Unassigned')
else:
if self.assigned_to.get_full_name():
return self.assigned_to.get_full_name()
else:
return self.assigned_to.username
get_assigned_to = property(_get_assigned_to)
def _get_ticket(self):
""" A user-friendly ticket ID, which is a combination of ticket ID
and queue slug. This is generally used in e-mail subjects. """
return u"[%s]" % (self.ticket_for_url)
ticket = property(_get_ticket)
def _get_ticket_for_url(self):
""" A URL-friendly ticket ID, used in links. """
return u"%s-%s" % (self.queue.slug, self.id)
ticket_for_url = property(_get_ticket_for_url)
def _get_priority_img(self):
""" Image-based representation of the priority """
from django.conf import settings
return u"%shelpdesk/priorities/priority%s.png" % (settings.MEDIA_URL, self.priority)
get_priority_img = property(_get_priority_img)
def _get_priority_span(self):
"""
A HTML <span> providing a CSS_styled representation of the priority.
"""
from django.utils.safestring import mark_safe
return mark_safe(u"<span class='priority%s'>%s</span>" % (self.priority, self.priority))
get_priority_span = property(_get_priority_span)
def _get_status(self):
"""
Displays the ticket status, with an "On Hold" message if needed.
"""
held_msg = ''
if self.on_hold: held_msg = _(' - On Hold')
dep_msg = ''
if self.can_be_resolved == False: dep_msg = _(' - Open dependencies')
return u'%s%s%s' % (self.get_status_display(), held_msg, dep_msg)
get_status = property(_get_status)
def _get_ticket_url(self):
"""
Returns a publicly-viewable URL for this ticket, used when giving
a URL to the submitter of a ticket.
"""
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
try:
site = Site.objects.get_current()
except:
site = Site(domain='configure-django-sites.com')
return u"http://%s%s?ticket=%s&email=%s" % (
site.domain,
reverse('helpdesk_public_view'),
self.ticket_for_url,
self.submitter_email
)
ticket_url = property(_get_ticket_url)
def _get_staff_url(self):
"""
Returns a staff-only URL for this ticket, used when giving a URL to
a staff member (in emails etc)
"""
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
try:
site = Site.objects.get_current()
except:
site = Site(domain='configure-django-sites.com')
return u"http://%s%s" % (
site.domain,
reverse('helpdesk_view',
args=[self.id])
)
staff_url = property(_get_staff_url)
def _can_be_resolved(self):
"""
Returns a boolean.
True = any dependencies are resolved
False = There are non-resolved dependencies
"""
OPEN_STATUSES = (Ticket.OPEN_STATUS, Ticket.REOPENED_STATUS)
return TicketDependency.objects.filter(ticket=self).filter(depends_on__status__in=OPEN_STATUSES).count() == 0
can_be_resolved = property(_can_be_resolved)
class Meta:
get_latest_by = "created"
ordering = ('id',)
def __unicode__(self):
return u'%s %s' % (self.id, self.title)
def get_absolute_url(self):
return ('helpdesk_view', (self.id,))
get_absolute_url = models.permalink(get_absolute_url)
def save(self, *args, **kwargs):
if not self.id:
# This is a new ticket as no ID yet exists.
self.created = timezone.now()
if not self.priority:
self.priority = 3
self.modified = timezone.now()
super(Ticket, self).save(*args, **kwargs)
class FollowUpManager(models.Manager):
def private_followups(self):
return self.filter(public=False)
def public_followups(self):
return self.filter(public=True)
class FollowUp(models.Model):
"""
A FollowUp is a comment and/or change to a ticket. We keep a simple
title, the comment entered by the user, and the new status of a ticket
to enable easy flagging of details on the view-ticket page.
The title is automatically generated at save-time, based on what action
the user took.
Tickets that aren't public are never shown to or e-mailed to the submitter,
although all staff can see them.
"""
ticket = models.ForeignKey(
Ticket,
verbose_name=_('Ticket'),
)
date = models.DateTimeField(
_('Date'),
default = timezone.now()
)
title = models.CharField(
_('Title'),
max_length=200,
blank=True,
null=True,
)
comment = models.TextField(
_('Comment'),
blank=True,
null=True,
)
public = models.BooleanField(
_('Public'),
blank=True,
default=False,
help_text=_('Public tickets are viewable by the submitter and all '
'staff, but non-public tickets can only be seen by staff.'),
)
user = models.ForeignKey(
User,
blank=True,
null=True,
verbose_name=_('User'),
)
new_status = models.IntegerField(
_('New Status'),
choices=Ticket.STATUS_CHOICES,
blank=True,
null=True,
help_text=_('If the status was changed, what was it changed to?'),
)
objects = FollowUpManager()
class Meta:
ordering = ['date']
def __unicode__(self):
return u'%s' % self.title
def get_absolute_url(self):
return u"%s#followup%s" % (self.ticket.get_absolute_url(), self.id)
def save(self, *args, **kwargs):
t = self.ticket
t.modified = timezone.now()
t.save()
super(FollowUp, self).save(*args, **kwargs)
class TicketChange(models.Model):
"""
For each FollowUp, any changes to the parent ticket (eg Title, Priority,
etc) are tracked here for display purposes.
"""
followup = models.ForeignKey(
FollowUp,
verbose_name=_('Follow-up'),
)
field = models.CharField(
_('Field'),
max_length=100,
)
old_value = models.TextField(
_('Old Value'),
blank=True,
null=True,
)
new_value = models.TextField(
_('New Value'),
blank=True,
null=True,
)
def __unicode__(self):
str = u'%s ' % self.field
if not self.new_value:
str += ugettext('removed')
elif not self.old_value:
str += ugettext('set to %s') % self.new_value
else:
str += ugettext('changed from "%(old_value)s" to "%(new_value)s"') % {
'old_value': self.old_value,
'new_value': self.new_value
}
return str
def attachment_path(instance, filename):
"""
Provide a file path that will help prevent files being overwritten, by
putting attachments in a folder off attachments for ticket/followup_id/.
"""
import os
from django.conf import settings
os.umask(0)
path = 'helpdesk/attachments/%s/%s' % (instance.followup.ticket.ticket_for_url, instance.followup.id )
att_path = os.path.join(settings.MEDIA_ROOT, path)
if settings.DEFAULT_FILE_STORAGE == "django.core.files.storage.FileSystemStorage":
if not os.path.exists(att_path):
os.makedirs(att_path, 0777)
return os.path.join(path, filename)
class Attachment(models.Model):
"""
Represents a file attached to a follow-up. This could come from an e-mail
attachment, or it could be uploaded via the web interface.
"""
followup = models.ForeignKey(
FollowUp,
verbose_name=_('Follow-up'),
)
file = models.FileField(
_('File'),
upload_to=attachment_path,
)
filename = models.CharField(
_('Filename'),
max_length=100,
)
mime_type = models.CharField(
_('MIME Type'),
max_length=255,
)
size = models.IntegerField(
_('Size'),
help_text=_('Size of this file in bytes'),
)
def get_upload_to(self, field_attname):
""" Get upload_to path specific to this item """
if not self.id:
return u''
return u'helpdesk/attachments/%s/%s' % (
self.followup.ticket.ticket_for_url,
self.followup.id
)
def __unicode__(self):
return u'%s' % self.filename
class Meta:
ordering = ['filename',]
class PreSetReply(models.Model):
"""
We can allow the admin to define a number of pre-set replies, used to
simplify the sending of updates and resolutions. These are basically Django
templates with a limited context - however if you wanted to get crafy it would
be easy to write a reply that displays ALL updates in hierarchical order etc
with use of for loops over {{ ticket.followup_set.all }} and friends.
When replying to a ticket, the user can select any reply set for the current
queue, and the body text is fetched via AJAX.
"""
queues = models.ManyToManyField(
Queue,
blank=True,
null=True,
help_text=_('Leave blank to allow this reply to be used for all '
'queues, or select those queues you wish to limit this reply to.'),
)
name = models.CharField(
_('Name'),
max_length=100,
help_text=_('Only used to assist users with selecting a reply - not '
'shown to the user.'),
)
body = models.TextField(
_('Body'),
help_text=_('Context available: {{ ticket }} - ticket object (eg '
'{{ ticket.title }}); {{ queue }} - The queue; and {{ user }} '
'- the current user.'),
)
class Meta:
ordering = ['name',]
def __unicode__(self):
return u'%s' % self.name
class EscalationExclusion(models.Model):
"""
An 'EscalationExclusion' lets us define a date on which escalation should
not happen, for example a weekend or public holiday.
You may also have a queue that is only used on one day per week.
To create these on a regular basis, check out the README file for an
example cronjob that runs 'create_escalation_exclusions.py'.
"""
queues = models.ManyToManyField(
Queue,
blank=True,
null=True,
help_text=_('Leave blank for this exclusion to be applied to all '
'queues, or select those queues you wish to exclude with this '
'entry.'),
)
name = models.CharField(
_('Name'),
max_length=100,
)
date = models.DateField(
_('Date'),
help_text=_('Date on which escalation should not happen'),
)
def __unicode__(self):
return u'%s' % self.name
class EmailTemplate(models.Model):
"""
Since these are more likely to be changed than other templates, we store
them in the database.
This means that an admin can change email templates without having to have
access to the filesystem.
"""
template_name = models.CharField(
_('Template Name'),
max_length=100,
)
subject = models.CharField(
_('Subject'),
max_length=100,
help_text=_('This will be prefixed with "[ticket.ticket] ticket.title"'
'. We recommend something simple such as "(Updated") or "(Closed)"'
' - the same context is available as in plain_text, below.'),
)
heading = models.CharField(
_('Heading'),
max_length=100,
help_text=_('In HTML e-mails, this will be the heading at the top of '
'the email - the same context is available as in plain_text, '
'below.'),
)
plain_text = models.TextField(
_('Plain Text'),
help_text=_('The context available to you includes {{ ticket }}, '
'{{ queue }}, and depending on the time of the call: '
'{{ resolution }} or {{ comment }}.'),
)
html = models.TextField(
_('HTML'),
help_text=_('The same context is available here as in plain_text, '
'above.'),
)
locale = models.CharField(
_('Locale'),
max_length=10,
blank=True,
null=True,
help_text=_('Locale of this template.'),
)
def __unicode__(self):
return u'%s' % self.template_name
class Meta:
ordering = ['template_name', 'locale']
class KBCategory(models.Model):
"""
Lets help users help themselves: the Knowledge Base is a categorised
listing of questions & answers.
"""
title = models.CharField(
_('Title'),
max_length=100,
)
slug = models.SlugField(
_('Slug'),
)
description = models.TextField(
_('Description'),
)
def __unicode__(self):
return u'%s' % self.title
class Meta:
ordering = ['title',]
def get_absolute_url(self):
return ('helpdesk_kb_category', (), {'slug': self.slug})
get_absolute_url = models.permalink(get_absolute_url)
class KBItem(models.Model):
"""
An item within the knowledgebase. Very straightforward question/answer
style system.
"""
category = models.ForeignKey(
KBCategory,
verbose_name=_('Category'),
)
title = models.CharField(
_('Title'),
max_length=100,
)
question = models.TextField(
_('Question'),
)
answer = models.TextField(
_('Answer'),
)
votes = models.IntegerField(
_('Votes'),
help_text=_('Total number of votes cast for this item'),
default=0,
)
recommendations = models.IntegerField(
_('Positive Votes'),
help_text=_('Number of votes for this item which were POSITIVE.'),
default=0,
)
last_updated = models.DateTimeField(
_('Last Updated'),
help_text=_('The date on which this question was most recently '
'changed.'),
blank=True,
)
def save(self, *args, **kwargs):
if not self.last_updated:
self.last_updated = timezone.now()
return super(KBItem, self).save(*args, **kwargs)
def _score(self):
if self.votes > 0:
return int(self.recommendations / self.votes)
else:
return _('Unrated')
score = property(_score)
def __unicode__(self):
return u'%s' % self.title
class Meta:
ordering = ['title',]
def get_absolute_url(self):
return ('helpdesk_kb_item', (self.id,))
get_absolute_url = models.permalink(get_absolute_url)
class SavedSearch(models.Model):
"""
Allow a user to save a ticket search, eg their filtering and sorting
options, and optionally share it with other users. This lets people
easily create a set of commonly-used filters, such as:
* My tickets waiting on me
* My tickets waiting on submitter
* My tickets in 'Priority Support' queue with priority of 1
* All tickets containing the word 'billing'.
etc...
"""
user = models.ForeignKey(
User,
verbose_name=_('User'),
)
title = models.CharField(
_('Query Name'),
max_length=100,
help_text=_('User-provided name for this query'),
)
shared = models.BooleanField(
_('Shared With Other Users?'),
blank=True,
default=False,
help_text=_('Should other users see this query?'),
)
query = models.TextField(
_('Search Query'),
help_text=_('Pickled query object. Be wary changing this.'),
)
def __unicode__(self):
if self.shared:
return u'%s (*)' % self.title
else:
return u'%s' % self.title
class UserSettings(models.Model):
"""
A bunch of user-specific settings that we want to be able to define, such
as notification preferences and other things that should probably be
configurable.
We should always refer to user.usersettings.settings['setting_name'].
"""
user = models.OneToOneField(User)
settings_pickled = models.TextField(
_('Settings Dictionary'),
help_text=_('This is a base64-encoded representation of a pickled Python dictionary. Do not change this field via the admin.'),
blank=True,
null=True,
)
def _set_settings(self, data):
# data should always be a Python dictionary.
import cPickle
from helpdesk.lib import b64encode
self.settings_pickled = b64encode(cPickle.dumps(data))
def _get_settings(self):
# return a python dictionary representing the pickled data.
import cPickle
from helpdesk.lib import b64decode
try:
return cPickle.loads(b64decode(str(self.settings_pickled)))
except cPickle.UnpicklingError:
return {}
settings = property(_get_settings, _set_settings)
def __unicode__(self):
return u'Preferences for %s' % self.user
class Meta:
verbose_name = 'User Settings'
verbose_name_plural = 'User Settings'
def create_usersettings(sender, created_models=[], instance=None, created=False, **kwargs):
"""
Helper function to create UserSettings instances as
required, eg when we first create the UserSettings database
table via 'syncdb' or when we save a new user.
If we end up with users with no UserSettings, then we get horrible
'DoesNotExist: UserSettings matching query does not exist.' errors.
"""
from helpdesk.settings import DEFAULT_USER_SETTINGS
if sender == User and created:
# This is a new user, so lets create their settings entry.
s, created = UserSettings.objects.get_or_create(user=instance, defaults={'settings': DEFAULT_USER_SETTINGS})
s.save()
elif UserSettings in created_models:
# We just created the UserSettings model, lets create a UserSettings
# entry for each existing user. This will only happen once (at install
# time, or at upgrade) when the UserSettings model doesn't already
# exist.
for u in User.objects.all():
try:
s = UserSettings.objects.get(user=u)
except UserSettings.DoesNotExist:
s = UserSettings(user=u, settings=DEFAULT_USER_SETTINGS)
s.save()
models.signals.post_syncdb.connect(create_usersettings)
models.signals.post_save.connect(create_usersettings, sender=User)
class IgnoreEmail(models.Model):
"""
This model lets us easily ignore e-mails from certain senders when
processing IMAP and POP3 mailboxes, eg mails from postmaster or from
known trouble-makers.
"""
queues = models.ManyToManyField(
Queue,
blank=True,
null=True,
help_text=_('Leave blank for this e-mail to be ignored on all '
'queues, or select those queues you wish to ignore this e-mail '
'for.'),
)
name = models.CharField(
_('Name'),
max_length=100,
)
date = models.DateField(
_('Date'),
help_text=_('Date on which this e-mail address was added'),
blank=True,
editable=False
)
email_address = models.CharField(
_('E-Mail Address'),
max_length=150,
help_text=_('Enter a full e-mail address, or portions with '
'wildcards, eg *@domain.com or postmaster@*.'),
)
keep_in_mailbox = models.BooleanField(
_('Save Emails in Mailbox?'),
blank=True,
default=False,
help_text=_('Do you want to save emails from this address in the '
'mailbox? If this is unticked, emails from this address will '
'be deleted.'),
)
def __unicode__(self):
return u'%s' % self.name
def save(self, *args, **kwargs):
if not self.date:
self.date = timezone.now()
return super(IgnoreEmail, self).save(*args, **kwargs)
def test(self, email):
"""
Possible situations:
1. Username & Domain both match
2. Username is wildcard, domain matches
3. Username matches, domain is wildcard
4. username & domain are both wildcards
5. Other (no match)
1-4 return True, 5 returns False.
"""
own_parts = self.email_address.split("@")
email_parts = email.split("@")
if self.email_address == email \
or own_parts[0] == "*" and own_parts[1] == email_parts[1] \
or own_parts[1] == "*" and own_parts[0] == email_parts[0] \
or own_parts[0] == "*" and own_parts[1] == "*":
return True
else:
return False
class TicketCC(models.Model):
"""
Often, there are people who wish to follow a ticket who aren't the
person who originally submitted it. This model provides a way for those
people to follow a ticket.
In this circumstance, a 'person' could be either an e-mail address or
an existing system user.
"""
ticket = models.ForeignKey(
Ticket,
verbose_name=_('Ticket'),
)
user = models.ForeignKey(
User,
blank=True,
null=True,
help_text=_('User who wishes to receive updates for this ticket.'),
verbose_name=_('User'),
)
email = models.EmailField(
_('E-Mail Address'),
blank=True,
null=True,
help_text=_('For non-user followers, enter their e-mail address'),
)
can_view = models.BooleanField(
_('Can View Ticket?'),
blank=True,
help_text=_('Can this CC login to view the ticket details?'),
)
can_update = models.BooleanField(
_('Can Update Ticket?'),
blank=True,
help_text=_('Can this CC login and update the ticket?'),
)
def _email_address(self):
if self.user and self.user.email is not None:
return self.user.email
else:
return self.email
email_address = property(_email_address)
def _display(self):
if self.user:
return self.user
else:
return self.email
display = property(_display)
def __unicode__(self):
return u'%s for %s' % (self.display, self.ticket.title)
class CustomFieldManager(models.Manager):
def get_query_set(self):
return super(CustomFieldManager, self).get_query_set().order_by('ordering')
class CustomField(models.Model):
"""
Definitions for custom fields that are glued onto each ticket.
"""
name = models.SlugField(
_('Field Name'),
help_text=_('As used in the database and behind the scenes. Must be unique and consist of only lowercase letters with no punctuation.'),
unique=True,
)
label = models.CharField(
_('Label'),
max_length='30',
help_text=_('The display label for this field'),
)
help_text = models.TextField(
_('Help Text'),
help_text=_('Shown to the user when editing the ticket'),
blank=True,
null=True
)
DATA_TYPE_CHOICES = (
('varchar', _('Character (single line)')),
('text', _('Text (multi-line)')),
('integer', _('Integer')),
('decimal', _('Decimal')),
('list', _('List')),
('boolean', _('Boolean (checkbox yes/no)')),
('date', _('Date')),
('time', _('Time')),
('datetime', _('Date & Time')),
('email', _('E-Mail Address')),
('url', _('URL')),
('ipaddress', _('IP Address')),
('slug', _('Slug')),
)
data_type = models.CharField(
_('Data Type'),
max_length=100,
help_text=_('Allows you to restrict the data entered into this field'),
choices=DATA_TYPE_CHOICES,
)
max_length = models.IntegerField(
_('Maximum Length (characters)'),
blank=True,
null=True,
)
decimal_places = models.IntegerField(
_('Decimal Places'),
help_text=_('Only used for decimal fields'),
blank=True,
null=True,
)
empty_selection_list = models.BooleanField(
_('Add empty first choice to List?'),
default=False,
help_text=_('Only for List: adds an empty first entry to the choices list, which enforces that the user makes an active choice.'),
)
list_values = models.TextField(
_('List Values'),
help_text=_('For list fields only. Enter one option per line.'),
blank=True,
null=True,
)
ordering = models.IntegerField(
_('Ordering'),
help_text=_('Lower numbers are displayed first; higher numbers are listed later'),
blank=True,
null=True,
)
def _choices_as_array(self):
from StringIO import StringIO
valuebuffer = StringIO(self.list_values)
choices = [[item.strip(), item.strip()] for item in valuebuffer.readlines()]
valuebuffer.close()
return choices
choices_as_array = property(_choices_as_array)
required = models.BooleanField(
_('Required?'),
help_text=_('Does the user have to enter a value for this field?'),
)
staff_only = models.BooleanField(
_('Staff Only?'),
help_text=_('If this is ticked, then the public submission form will NOT show this field'),
)
objects = CustomFieldManager()
def __unicode__(self):
return '%s' % (self.name)
class TicketCustomFieldValue(models.Model):
ticket = models.ForeignKey(
Ticket,
verbose_name=_('Ticket'),
)
field = models.ForeignKey(
CustomField,
verbose_name=_('Field'),
)
value = models.TextField(blank=True, null=True)
def __unicode__(self):
return '%s / %s' % (self.ticket, self.field)
class Meta:
unique_together = ('ticket', 'field'),
class TicketDependency(models.Model):
"""
The ticket identified by `ticket` cannot be resolved until the ticket in `depends_on` has been resolved.
To help enforce this, a helper function `can_be_resolved` on each Ticket instance checks that
these have all been resolved.
"""
ticket = models.ForeignKey(
Ticket,
verbose_name=_('Ticket'),
related_name='ticketdependency',
)
depends_on = models.ForeignKey(
Ticket,
verbose_name=_('Depends On Ticket'),
related_name='depends_on',
)
def __unicode__(self):
return '%s / %s' % (self.ticket, self.depends_on)
class Meta:
unique_together = ('ticket', 'depends_on')
|
{
"content_hash": "be5c4c356a412f76eac089da36c053f1",
"timestamp": "",
"source": "github",
"line_count": 1281,
"max_line_length": 144,
"avg_line_length": 29.87431693989071,
"alnum_prop": 0.5762627714337976,
"repo_name": "gjedeer/django-helpdesk-issue-164",
"id": "e0c8e6c8988715c85cc4a1f14a77b1f6a22e0828",
"size": "38269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpdesk/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5692"
},
{
"name": "JavaScript",
"bytes": "46236"
},
{
"name": "Python",
"bytes": "356886"
},
{
"name": "Shell",
"bytes": "708"
}
],
"symlink_target": ""
}
|
"""
Copyright 2010 Rusty Klophaus <rusty@basho.com>
Copyright 2010 Justin Sheehy <justin@basho.com>
Copyright 2009 Jay Baird <jay@mochimedia.com>
This file is provided to you under the Apache License,
Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from __future__ import with_statement
import urllib, re, csv
from cStringIO import StringIO
import httplib
import socket
import errno
try:
import json
except ImportError:
import simplejson as json
from transport import RiakTransport
from riak.metadata import *
from riak.mapreduce import RiakLink
from riak import RiakError
from riak.riak_index_entry import RiakIndexEntry
from riak.multidict import MultiDict
from connection import HTTPConnectionManager
import riak.util
MAX_LINK_HEADER_SIZE = 8192 - 8 # substract length of "Link: " header string and newline
class RiakHttpTransport(RiakTransport) :
"""
The RiakHttpTransport object holds information necessary to connect to
Riak. The Riak API uses HTTP, so there is no persistent
connection, and the RiakClient object is extremely lightweight.
"""
# We're using the new RiakTransport API
api = 2
# The ConnectionManager class that this transport prefers.
default_cm = HTTPConnectionManager
# How many times to retry a request
RETRY_COUNT = 3
def __init__(self, cm,
prefix='riak', mapred_prefix='mapred', client_id=None,
**unused_options):
"""
Construct a new RiakClient object.
@param string host - Hostname or IP address (default '127.0.0.1')
@param int port - Port number (default 8098)
@param string prefix - Interface prefix (default 'riak')
@param string mapred_prefix - MapReduce prefix (default 'mapred')
@param string client_id - client id to use for vector clocks
"""
super(RiakHttpTransport, self).__init__()
self._conns = cm
self._prefix = prefix
self._mapred_prefix = mapred_prefix
self._client_id = client_id
if not self._client_id:
self._client_id = self.make_random_client_id()
def __copy__(self):
### not implemented right now
raise Exception('not implemented')
### we don't have _host and _port. will fix after some refactoring...
return RiakHttpTransport(self._host, self._port, self._prefix,
self._mapred_prefix)
def set_client_id(self, client_id):
self._client_id = client_id
def get_client_id(self):
return self._client_id
def ping(self) :
"""
Check server is alive over HTTP
"""
response = self.http_request('GET', '/ping')
return(response is not None) and (response[1] == 'OK')
def get(self, robj, r, vtag = None) :
"""
Get a bucket/key from the server
"""
params = {'r' : r}
if vtag is not None:
params['vtag'] = vtag
url = self.build_rest_path(robj.get_bucket(), robj.get_key(),
params=params)
response = self.http_request('GET', url)
return self.parse_body(response, [200, 300, 404])
def put(self, robj, w = None, dw = None, return_body = True):
"""
Serialize put request and deserialize response
"""
# Construct the URL...
params = {'returnbody' : str(return_body).lower(), 'w' : w, 'dw' : dw}
url = self.build_rest_path(bucket=robj.get_bucket(), key=robj.get_key(),
params=params)
headers = self.build_put_headers(robj)
content = robj.get_encoded_data()
return self.do_put(url, headers, content, return_body, key=robj.get_key())
def do_put(self, url, headers, content, return_body=False, key=None):
if key is None:
response = self.http_request('POST', url, headers, content)
else:
response = self.http_request('PUT', url, headers, content)
if return_body:
return self.parse_body(response, [200, 201, 300])
else:
self.check_http_code(response, [204])
return None
def put_new(self, robj, w=None, dw=None, return_meta=True):
"""Put a new object into the Riak store, returning its (new) key."""
# Construct the URL...
params = {'returnbody' : str(return_meta).lower(), 'w' : w, 'dw' : dw}
url = self.build_rest_path(bucket=robj.get_bucket(), params=params)
headers = self.build_put_headers(robj)
content = robj.get_encoded_data()
response = self.http_request('POST', url, headers, content)
location = response[0]['location']
idx = location.rindex('/')
key = location[idx+1:]
if return_meta:
vclock, [(metadata, data)] = self.parse_body(response, [201])
return key, vclock, metadata
else:
self.check_http_code(response, [201])
return key, None, None
def delete(self, robj, rw):
# Construct the URL...
params = {'rw' : rw}
url = self.build_rest_path(robj.get_bucket(), robj.get_key(),
params=params)
# Run the operation..
response = self.http_request('DELETE', url)
self.check_http_code(response, [204, 404])
return self
def get_keys(self, bucket):
params = {'props' : 'True', 'keys' : 'true'}
url = self.build_rest_path(bucket, params=params)
response = self.http_request('GET', url)
headers, encoded_props = response[0:2]
if headers['http_code'] == 200:
props = json.loads(encoded_props)
return props['keys']
else:
raise Exception('Error getting bucket properties.')
def get_buckets(self):
params = {'buckets': 'true'}
url = self.build_rest_path(None, params=params)
response = self.http_request('GET', url)
headers, encoded_props = response[0:2]
if headers['http_code'] == 200:
props = json.loads(encoded_props)
return props['buckets']
else:
raise Exception('Error getting buckets.')
def get_bucket_props(self, bucket):
# Run the request...
params = {'props' : 'True', 'keys' : 'False'}
url = self.build_rest_path(bucket, params=params)
response = self.http_request('GET', url)
headers = response[0]
encoded_props = response[1]
if headers['http_code'] == 200:
props = json.loads(encoded_props)
return props['props']
else:
raise Exception('Error getting bucket properties.')
def set_bucket_props(self, bucket, props):
"""
Set the properties on the bucket object given
"""
url = self.build_rest_path(bucket)
headers = {'Content-Type' : 'application/json'}
content = json.dumps({'props' : props})
# Run the request...
response = self.http_request('PUT', url, headers, content)
# Handle the response...
if response is None:
raise Exception('Error setting bucket properties.')
# Check the response value...
status = response[0]['http_code']
if status != 204:
raise Exception('Error setting bucket properties.')
return True
def mapred(self, inputs, query, timeout=None):
# Construct the job, optionally set the timeout...
job = {'inputs':inputs, 'query':query}
if timeout is not None:
job['timeout'] = timeout
content = json.dumps(job)
# Do the request...
url = "/" + self._mapred_prefix
response = self.http_request('POST', url, {}, content)
# Make sure the expected status code came back...
status = response[0]['http_code']
if status != 200:
raise Exception('Error running MapReduce operation. Status: ' + str(status) + ' : ' + response[1])
result = json.loads(response[1])
return result
def check_http_code(self, response, expected_statuses):
status = response[0]['http_code']
if not status in expected_statuses:
m = 'Expected status ' + str(expected_statuses) + ', received ' + str(status)
raise Exception(m)
def parse_body(self, response, expected_statuses):
"""
Given the output of RiakUtils.http_request and a list of
statuses, populate the object. Only for use by the Riak client
library.
@return self
"""
# If no response given, then return.
if response is None:
return self
# Make sure expected code came back
self.check_http_code(response, expected_statuses)
# Update the object...
headers = response[0]
data = response[1]
status = headers['http_code']
# Check if the server is down(status==0)
if not status:
### we need the host/port that was used.
m = 'Could not contact Riak Server: http://$HOST:$PORT !'
raise RiakError(m)
# Verify that we got one of the expected statuses. Otherwise, raise an exception.
if not status in expected_statuses:
m = 'Expected status ' + str(expected_statuses) + ', received ' + str(status)
raise RiakError(m)
# If 404(Not Found), then clear the object.
if status == 404:
return None
# If 300(Siblings), then return the list of siblings
elif status == 300:
# Parse and get rid of 'Siblings:' string in element 0
siblings = data.strip().split('\n')
siblings.pop(0)
return siblings
# Parse the headers...
vclock = None
metadata = {MD_USERMETA: {}, MD_INDEX: []}
links = []
for header, value in headers.iteritems():
if header == 'content-type':
metadata[MD_CTYPE] = value
elif header == 'charset':
metadata[MD_CHARSET] = value
elif header == 'content-encoding':
metadata[MD_CTYPE] = value
elif header == 'etag':
metadata[MD_VTAG] = value
elif header =='link':
self.parse_links(links, headers['link'])
elif header == 'last-modified':
metadata[MD_LASTMOD] = value
elif header.startswith('x-riak-meta-'):
metadata[MD_USERMETA][header.replace('x-riak-meta-', '')] = value
elif header.startswith('x-riak-index-'):
field = header.replace('x-riak-index-', '')
reader = csv.reader([value], skipinitialspace=True)
for line in reader:
for token in line:
rie = RiakIndexEntry(field, token)
metadata[MD_INDEX].append(rie)
elif header == 'x-riak-vclock':
vclock = value
if links:
metadata[MD_LINKS] = links
return vclock, [(metadata, data)]
def to_link_header(self, link):
"""
Convert this RiakLink object to a link header string. Used internally.
"""
header = ''
header += '</'
header += self._prefix + '/'
header += urllib.quote_plus(link.get_bucket()) + '/'
header += urllib.quote_plus(link.get_key()) + '>; riaktag="'
header += urllib.quote_plus(link.get_tag()) + '"'
return header
def parse_links(self, links, linkHeaders):
"""
Private.
@return self
"""
for linkHeader in linkHeaders.strip().split(','):
linkHeader = linkHeader.strip()
matches = re.match("</([^/]+)/([^/]+)/([^/]+)>; ?riaktag=\"([^\']+)\"", linkHeader)
if matches is not None:
link = RiakLink(matches.group(2), matches.group(3), matches.group(4))
links.append(link)
return self
def add_links_for_riak_object(self, robject, headers):
links = robject.get_links()
if links:
current_header = ''
for link in links:
header = self.to_link_header(link)
if len(current_header + header) > MAX_LINK_HEADER_SIZE:
headers.add('Link', current_header)
current_header = ''
if current_header != '': header = ', ' + header
current_header += header
headers.add('Link', current_header)
return headers
def get_request(self, uri=None, params=None):
url = self.build_rest_path(bucket=None, params=params, prefix=uri)
return self.http_request('GET', url)
def store_file(self, key, content_type="application/octet-stream", content=None):
url = self.build_rest_path(prefix='luwak', key=key)
headers = {'Content-Type' : content_type,
'X-Riak-ClientId' : self._client_id}
return self.do_put(url, headers, content, key=key)
def get_file(self, key):
url = self.build_rest_path(prefix='luwak', key=key)
response = self.http_request('GET', url)
result = self.parse_body(response, [200, 300, 404])
if result is not None:
(vclock, data) = result
(headers, body) = data.pop()
return body
def delete_file(self, key):
url = self.build_rest_path(prefix='luwak', key=key)
response = self.http_request('DELETE', url)
self.parse_body(response, [204, 404])
def post_request(self, uri=None, body=None, params=None, content_type="application/json"):
uri = self.build_rest_path(prefix=uri, params=params)
return self.http_request('POST', uri, {'Content-Type': content_type}, body)
# Utility functions used by Riak library.
def build_rest_path(self, bucket=None, key=None, params=None, prefix=None) :
"""
Given a RiakClient, RiakBucket, Key, LinkSpec, and Params,
construct and return a URL.
"""
# Build 'http://hostname:port/prefix/bucket'
path = ''
path += '/' + (prefix or self._prefix)
# Add '.../bucket'
if bucket is not None:
path += '/' + urllib.quote_plus(bucket._name)
# Add '.../key'
if key is not None:
path += '/' + urllib.quote_plus(key)
# Add query parameters.
if params is not None:
s = ''
for key in params.keys():
if s != '': s += '&'
s += urllib.quote_plus(key) + '=' + urllib.quote_plus(str(params[key]))
path += '?' + s
# Return.
return path
def build_put_headers(self, robj):
"""Build the headers for a POST/PUT request."""
# Construct the headers...
headers = MultiDict({'Accept' : 'text/plain, */*; q=0.5',
'Content-Type' : robj.get_content_type(),
'X-Riak-ClientId' : self._client_id})
# Add the vclock if it exists...
if robj.vclock() is not None:
headers['X-Riak-Vclock'] = robj.vclock()
# Create the header from metadata
links = self.add_links_for_riak_object(robj, headers)
for key, value in robj.get_usermeta().iteritems():
headers['X-Riak-Meta-%s' % key] = value
for rie in robj.get_indexes():
key = 'X-Riak-Index-%s' % rie.get_field()
if key in headers:
headers[key] += ", " + rie.get_value()
else:
headers[key] = rie.get_value()
return headers
def http_request(self, method, uri, headers=None, body='') :
"""
Given a Method, URL, Headers, and Body, perform and HTTP request,
and return a 2-tuple containing a dictionary of response headers
and the response body.
"""
if headers is None:
headers = {}
# Run the request...
for retry in range(self.RETRY_COUNT):
with self._conns.withconn() as conn:
### should probably build this try/except into a custom
### contextmanager for the connection.
try:
conn.request(method, uri, body, headers)
response = conn.getresponse()
try:
# Get the response headers...
response_headers = {'http_code': response.status}
for (key, value) in response.getheaders():
response_headers[key.lower()] = value
# Get the body...
response_body = response.read()
finally:
response.close()
return response_headers, response_body
except socket.error, e:
conn.close()
if e[0] == errno.ECONNRESET:
# Grab another connection and try again.
continue
# Don't know how to handle this.
raise
except httplib.HTTPException:
# Just close the connection and try again.
conn.close()
continue
# No luck, even with retrying.
raise RiakError("could not get a response")
@classmethod
def build_headers(cls, headers):
return ['%s: %s' % (header, value) for header, value in headers.iteritems()]
@classmethod
def parse_http_headers(cls, headers) :
"""
Parse an HTTP Header string into an asssociative array of
response headers.
"""
retVal = {}
fields = headers.split("\n")
for field in fields:
matches = re.match("([^:]+):(.+)", field)
if matches is None: continue
key = matches.group(1).lower()
value = matches.group(2).strip()
if key in retVal.keys():
if isinstance(retVal[key], list):
retVal[key].append(value)
else:
retVal[key] = [retVal[key]].append(value)
else:
retVal[key] = value
return retVal
|
{
"content_hash": "c26d6bc2edd13e8b5644cc11fc79d265",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 110,
"avg_line_length": 36.24568138195777,
"alnum_prop": 0.5565028595636518,
"repo_name": "richleland/riak-python-client",
"id": "92d534b4e300ee801d830efa4f1b91664cfee4bc",
"size": "18884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "riak/transports/http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import ggrc.builder
from blinker import Namespace
from flask import redirect, request, render_template, current_app
from ggrc.rbac import permissions
from ggrc.services.common import \
ModelView, as_json, inclusion_filter, filter_resource
from ggrc.utils import view_url_for, benchmark
from werkzeug.exceptions import Forbidden
class BaseObjectView(ModelView):
model_template = '{model_plural}/show.haml'
base_template = 'base_objects/show.haml'
signals = Namespace()
extension_contributions = signals.signal('View Extension Contributions',
"""
Gathers any extension contributions to be included into a template.
Receiver functions must expect the following arguments:
:sender: The model class of the object being rendered.
:obj: the model instance being rendered.
:context: A context for extensions to use in rendering the their
contribution.
""",
)
def dispatch_request(self, *args, **kwargs):
method = request.method.lower()
if method == 'get':
if self.pk in kwargs and kwargs[self.pk] is not None:
return self.get(*args, **kwargs)
else:
# No `pk` given; fallthrough for now
pass
else:
# Method not supported; fallthrough for now
pass
raise NotImplementedError()
def get_context_for_object(self, obj):
return {
'instance': obj,
'controller': self,
'instance_json':
lambda: self.get_object_json(obj)
}
def get_object_json(self, obj):
"""Returns object json"""
with benchmark("Get object JSON"):
return as_json({
self.model._inflector.table_singular:
filter_resource(
ggrc.builder.json.publish_representation(
ggrc.builder.json.publish(obj, (), inclusion_filter)))
})
def get_model_template_paths_for_object(self, obj):
# Generate lookup paths for templates based on inheritance
return [
self.model_template.format(model_plural=model._inflector.table_plural)
for model in self.model.mro() if hasattr(model, '__table__')]
def render_template_for_object(self, obj):
context = self.get_context_for_object(obj)
template_paths =\
self.get_model_template_paths_for_object(obj) + [self.base_template]
return render_template(template_paths, **context)
def extension_content(self, obj):
contributions = self.extension_contributions.send(
obj.__class__,
obj=obj,
context=self.get_context_for_object(obj),
)
return [template for func, template in contributions if template]
def get(self, id):
with benchmark("Query for object"):
obj = self.get_object(id)
if obj is None:
return self.not_found_response()
if 'Accept' in self.request.headers and \
'text/html' not in self.request.headers['Accept']:
return current_app.make_response((
'text/html', 406, [('Content-Type', 'text/plain')]))
if not permissions.is_allowed_read(self.model.__name__, obj.id,
obj.context_id):
raise Forbidden()
if not permissions.is_allowed_view_object_page_for(obj):
raise Forbidden()
with benchmark("Render"):
rendered_template = self.render_template_for_object(obj)
# FIXME: Etag based on rendered output, or object itself?
# if 'If-None-Match' in self.request.headers and \
# self.request.headers['If-None-Match'] == self.etag(object_for_json):
# return current_app.make_response((
# '', 304, [('Etag', self.etag(object_for_json))]))
return rendered_template
@classmethod
def add_to(cls, app, url, model_class=None, decorators=()):
if model_class:
cls_name = '{0}ObjectView'.format(model_class.__name__)
view_class = type(
cls_name,
(cls,),
{
'_model': model_class,
'base_url_for': classmethod(lambda cls: url),
})
import ggrc.views
setattr(ggrc.views, model_class.__name__, view_class)
else:
view_class = cls
view_func = view_class.as_view(view_class.endpoint_name())
view_func = cls.decorate_view_func(view_func, decorators)
view_route = '{url}/<{type}:{pk}>'.format(
url=url, type=cls.pk_type, pk=cls.pk)
app.add_url_rule(view_route, view_class.endpoint_name(),
view_func=view_func,
methods=['GET'])
class RedirectedPolymorphView(BaseObjectView):
"""Out of paranoia, be sure to redirect any direct link to a Directive view
to the appropriate view for one of its polymorpic representations.
"""
def get(self, id):
obj = self.get_object(id)
if obj is None:
return self.not_found_response()
if 'Accept' in self.request.headers and\
'text/html' not in self.request.headers['Accept']:
return current_app.make_response((
'text/html', 406, [('Content-Type', 'text/plain')]))
if not permissions.is_allowed_read(self.model.__name__, obj.id,
obj.context_id):
raise Forbidden()
return redirect(view_url_for(obj))
|
{
"content_hash": "8c1387f409e29cbd99dbaeb18e943976",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 78,
"avg_line_length": 35.38513513513514,
"alnum_prop": 0.6257399274393737,
"repo_name": "VinnieJohns/ggrc-core",
"id": "268dd2dbc582e01a87772eda2fb09a9179a13267",
"size": "5350",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "src/ggrc/views/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "226950"
},
{
"name": "HTML",
"bytes": "1060386"
},
{
"name": "JavaScript",
"bytes": "1927277"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2762348"
},
{
"name": "Shell",
"bytes": "31100"
}
],
"symlink_target": ""
}
|
import numpy as np
__ver__ = '1.0'
class dust_wrapper(object):
""" dust_wrapper class. EzGal wraps this class around the dust function. It takes care of the
details of passing or not passing parameters """
func = '' # sfh function
args = () # extra arguments to pass on call
has_args = False # whether or not there are actually any extra arguments
def __init__(self, function, args):
self.func = function
if type(args) == type(()) and len(args) > 0:
self.has_args = True
self.args = args
def __call__(self, time, ls):
if self.has_args:
return self.func(time, ls, *self.args)
else:
return self.func(time, ls)
class charlot_fall(object):
""" callable-object implementation of the Charlot and Fall (2000) dust law """
tau1 = 0.0
tau2 = 0.0
tbreak = 0.0
def __init__(self, tau1=1.0, tau2=0.5, tbreak=0.01):
""" dust_obj = charlot_fall(tau1=1.0, tau2=0.3, tbreak=0.01)
Return a callable object for returning the dimming factor as a function of age
for a Charlot and Fall (2000) dust law. The dimming is:
np.exp(-1*Tau(t)(lambda/5500angstroms))
Where Tau(t) = `tau1` for t < `tbreak` (in gyrs) and `tau2` otherwise. """
self.tau1 = tau1
self.tau2 = tau2
self.tbreak = tbreak
def __call__(self, ts, ls):
ls = np.asarray(ls)
ts = np.asarray(ts)
ls.shape = (ls.size,1)
ts.shape = (1,ts.size)
taus = np.asarray([self.tau1]*ts.size)
m = (ts > self.tbreak).ravel()
if m.sum(): taus[m] = self.tau2
return np.exp(-1.0*taus*(ls/5500.0)**-0.7)
|
{
"content_hash": "8cc33009e9b2513facc600d03635b62f",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 100,
"avg_line_length": 30.526315789473685,
"alnum_prop": 0.5614942528735632,
"repo_name": "dpgettings/ezgal",
"id": "150b566fe245daff4547214a60b6764b5f1795e9",
"size": "1740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ezgal/dusts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "279047"
},
{
"name": "TeX",
"bytes": "1037"
}
],
"symlink_target": ""
}
|
"""
Utilities for using the Online Learning Library for Next.Discovery
author: Kevin Jamieson, kevin.g.jamieson@gmail.com
last updated: 2/17/2015
######################################
Serves as a library of utilities for all the adaptive and online learning applications on next.dicovery.
There exist a few distinct sections:
- Learning Library Utilties, utilities for app and alg including lists of implemnted algs and how to get alg objects
- Namespace Utilities, utilities for interacting with namespaces used in the database
- Time Utilities, utilities dealing with timing code and timestamps
Usage: ::\n
import next.utils as utils
app_id_list = utils.supportedApps()
for app_id in app_id_list:
print app_id
alg_id_list = utils.supportedAlgs(app_id)
for alg_id in alg_id_list:
print "\t-> "+alg_id
which outputs: ::\n
StochasticBanditsPureExploration
-> LilUCB
-> LUCB
-> SuccElim
DuelingBanditsPureExploration
-> BR_LilUCB
StochasticLinearBanditsExploreExploit
-> OFUL
-> UE
PoolBasedTripletMDS
-> UncertaintySampling
-> RandomSampling
"""
"""
Learning Library Utilties
#########################
"""
def get_supported_apps():
"""
Returns a list of strings correspdoning to the app_id's that are fully operational in the learning library.
Usage: ::\n
app_id_list = utils.get_supported_apps()
print app_id_list
>>> ['StochasticBanditsPureExploration', 'DuelingBanditsPureExploration', 'StochasticLinearBanditsExploreExploit', 'PoolBasedTripletMDS']
"""
next_path = 'next.apps'
app_module = __import__(next_path,fromlist=[''])
return app_module.implemented_apps
def get_app_about(app_id):
"""
Returns a string with a high-level description of the app
Usage: ::\n
about = utils.get_default_alg_list('PoolBasedTripletMDS')
print about
"""
app_id = str(app_id) # sometimes input is unicode formatted which causes error
next_path = 'next.apps.'
app_module = __import__(next_path+app_id,fromlist=[''])
return app_module.get_about()
def get_app_info_object(app_id):
"""
Returns a string with a high-level description of the app
Usage: ::\n
about = utils.get_default_alg_list('PoolBasedTripletMDS')
print about
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
next_path = 'next.apps.'
app_module = __import__(next_path+app_id,fromlist=[''])
return app_module.get_info_object()
def get_app_default_instructions(app_id):
"""
Returns a string with default instructions for the app (can be overwritten on initExp)
Usage: ::\n
about = utils.get_app_default_instructions('PoolBasedTripletMDS')
print about
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
next_path = 'next.apps.'
app_module = __import__(next_path+app_id,fromlist=[''])
return app_module.get_default_instructions()
def get_app_default_debrief(app_id):
"""
Returns a string with default debrief for the app (can be overwritten on initExp)
Usage: ::\n
about = utils.get_default_debrief('PoolBasedTripletMDS')
print about
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
next_path = 'next.apps.'
app_module = __import__(next_path+app_id,fromlist=[''])
return app_module.get_default_debrief()
def get_app_default_num_tries(app_id):
"""
Returns an int with default num_tries for the app (can be overwritten on initExp)
Usage: ::\n
about = utils.get_app_default_num_tries('PoolBasedTripletMDS')
print about
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
next_path = 'next.apps.'
app_module = __import__(next_path+app_id,fromlist=[''])
return app_module.get_default_num_tries()
def get_app(app_id):
"""
Returns an object correspoding to the app_id that contains methods like initExp,getQuery,etc.
Usage: ::\n
app = utils.get_app(app_id)
print app
>>> <next.apps.StochasticBanditsPureExploration.StochasticBanditsPureExploration.StochasticBanditsPureExploration object at 0x103c9dcd0>
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
next_path = 'next.apps.'
app_module = __import__(next_path+app_id,fromlist=[''])
app_class = getattr(app_module,app_id)
return app_class()
def get_app_alg(app_id,alg_id):
"""
Returns an object correspoding to the alg_id that contains methods like initExp,getQuery,etc.
Note that each algorithm (with an alg_id) is a child of an app (with an app_id), hence the app_id input
Usage: ::\n
alg = utils.get_app_alg(app_id,alg_id)
print alg
>>> <next.apps.PoolBasedTripletMDS.RandomSampling.RandomSampling.RandomSampling object at 0x103cb7e10>
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
alg_id = str(alg_id) # soemtimes input is unicode formatted which causes error
next_path = 'next.apps.'+app_id+'.algs.'
alg_module = __import__(next_path+alg_id,fromlist=[''])
alg_class = getattr(alg_module,alg_id)
return alg_class()
def get_app_supported_algs(app_id):
"""
Returns a list of strings correspdoning to the alg_id's that are fully operational in the learning library for the given app_id.
Usage: ::\n
alg_id_list = utils.get_app_supported_algs('StochasticBanditsPureExploration')
print alg_id_list
>>> ['LilUCB', 'LUCB', 'SuccElim']
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
next_path = 'next.apps.'
app_module = __import__(next_path+app_id,fromlist=[''])
return app_module.get_implemented_algs()
def get_app_supported_stats(app_id):
"""
Returns a list of dicts describing the stats available for the app and what are the necessary inputs
Usage: ::\n
stat_list = utils.get_app_supported_stats('PoolBasedTripletMDS')
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
next_path = 'next.apps.'+app_id+'.'
dashboard_module = __import__(next_path+'Dashboard',fromlist=[''])
dashboard_class = getattr(dashboard_module,app_id+'Dashboard')
dashboard = dashboard_class()
return dashboard.get_app_supported_stats()
def get_app_default_alg_list(app_id):
"""
The NEXT system was designed with evaluation in mind meaning that users would upload their own algorithms or
compare exsiting algorithms on their use cases. However, a number of users just want to use NEXT as a system
to adaptively collect data or just organized their data collection task. For this purpose, we have a set of defaults
for the algorithms and input parameters.
This script is primarily used for the internals of the system but may be of interest to those wondering what an example alg_list looks like.
Usage: ::\n
alg_list = utils.get_app_default_alg_list('PoolBasedTripletMDS')
print json.dumps(alg_list,indent=2)
[
{
"alg_label": "Test",
"alg_id": "RandomSampling",
"proportion": 0.1,
"test_alg_label": "Test",
"params": {}
},
{
"alg_label": "Random",
"alg_id": "RandomSampling",
"proportion": 0.45,
"test_alg_label": "Test",
"params": {}
},
{
"alg_label": "Uncertainty Sampling",
"alg_id": "UncertaintySampling",
"proportion": 0.45,
"test_alg_label": "Test",
"params": {}
}
]
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
next_path = 'next.apps.'
app_module = __import__(next_path+app_id,fromlist=[''])
return app_module.get_default_alg_list()
"""
Namespace Utilties
#########################
"""
def getDocUID(exp_uid,alg_uid=None):
"""
Each instance of an app (with an (app_id,exp_uid) pair) and an algorithm (with an (app_id,exp_uid,alg_id,alg_uid) tuple)
gets its own namespace. This method defines that namespace given the exp_uid, or (exp_uid,alg_uid)
Usage::\n
print utils.getDocUID(exp_uid)
>>> 'eee9d58c61d580029113ba593446d23a'
print utils.getDocUID(exp_uid,alg_uid)
>>> 'eee9d58c61d580029113ba593446d23a-f081d374abac6c009f5a74877f8b9f3c'
"""
if alg_uid==None:
return exp_uid
else:
return exp_uid + "-" + alg_uid
import os
def getNewUID():
"""
Returns length 32 string of random hex that is generated from machine state - good enough for cryptography
Probability of collision is 1 in 340282366920938463463374607431768211456
Used for unique identifiers all over the system
"""
uid = os.urandom(16).encode('hex')
return uid
"""
Time Utilities
#########################
"""
from datetime import datetime
def datetimeNow(format='datetime'):
"""
Returns the current datetime in the format used throughout the system.
For consistency, one should ALWAYS call this method, do not make your own call to datetime.
Usage: ::\n
utils.datetimeNow()
>>> datetime.datetime(2015, 2, 17, 11, 5, 56, 27822)
"""
date = datetime.now()
if format=='string':
return datetime2str(date)
else:
return date
def datetime2filename(obj_datetime):
"""
Converts a datetime string into a datetime object in the system.
For consistency, one should never use their own method of converting to string, always use this method.
Usage: ::\n
date = utils.datetimeNow()
date_str = utils.datetime2str(date)
print date_str
>>> '2015-02-17 11:11:07.489925'
"""
return obj_datetime.strftime("%Y-%m-%d_%H:%M:%S")
def datetime2str(obj_datetime):
"""
Converts a datetime string into a datetime object in the system.
For consistency, one should never use their own method of converting to string, always use this method.
Usage: ::\n
date = utils.datetimeNow()
date_str = utils.datetime2str(date)
print date_str
>>> '2015-02-17 11:11:07.489925'
"""
return str(obj_datetime)
def str2datetime(str_time):
"""
Converts a datetime object into the string format used in the system.
For consistency, one should never use their own method of converting to string, always use this method.
Usage: ::\n
date = utils.datetimeNow()
date_str = utils.datetime2str(date)
utils.str2datetime(date_str)
"""
try:
return datetime.strptime(str_time,'%Y-%m-%d %H:%M:%S.%f')
except:
return datetime.strptime(str_time,'%Y-%m-%d %H:%M:%S')
import time
def timeit(f):
"""
Utility used to time the duration of code execution. This script can be composed with any other script.
Usage::\n
def f(n):
return n**n
def g(n):
return n,n**n
answer0,dt = timeit(f)(3)
answer1,answer2,dt = timeit(g)(3)
"""
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
if type(result)==tuple:
return result + ((te-ts),)
else:
return result,(te-ts)
return timed
|
{
"content_hash": "d3035d158d04aaeed6686a4bde0012ff",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 142,
"avg_line_length": 31.33810888252149,
"alnum_prop": 0.6766023589649812,
"repo_name": "crcox/NEXT",
"id": "9b96ac961ec299d0bf979063f680de0c0a571350",
"size": "10937",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "next/utils/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "74514"
},
{
"name": "JavaScript",
"bytes": "16603"
},
{
"name": "Python",
"bytes": "817267"
},
{
"name": "Shell",
"bytes": "5783"
}
],
"symlink_target": ""
}
|
_base_ = './faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py'
model = dict(
backbone=dict(
stem_channels=128,
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='open-mmlab://resnest101')))
|
{
"content_hash": "f108e5339227b28476e70e7a68bae13d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 78,
"avg_line_length": 37.285714285714285,
"alnum_prop": 0.5977011494252874,
"repo_name": "open-mmlab/mmdetection",
"id": "40a2f1f2c9d62f173e88893e4ef809e70e2cbf5b",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2540"
},
{
"name": "Python",
"bytes": "4811377"
},
{
"name": "Shell",
"bytes": "47911"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from requests.exceptions import HTTPError
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.urls import reverse
from allauth.socialaccount.models import SocialToken
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase, patch
from .provider import YNABProvider
@override_settings(
SOCIALACCOUNT_AUTO_SIGNUP=True,
ACCOUNT_SIGNUP_FORM_CLASS=None, )
# ACCOUNT_EMAIL_VERIFICATION=account_settings
# .EmailVerificationMethod.MANDATORY)
class YNABTests(OAuth2TestsMixin, TestCase):
provider_id = YNABProvider.id
def get_mocked_response(self):
return MockedResponse(200, """
{"data": {
"user":{
"id": "abcd1234xyz5678"
}
}
}
""")
def test_ynab_compelete_login_401(self):
from allauth.socialaccount.providers.ynab.views import \
YNABOAuth2Adapter
class LessMockedResponse(MockedResponse):
def raise_for_status(self):
if self.status_code != 200:
raise HTTPError(None)
request = RequestFactory().get(
reverse(self.provider.id + '_login'),
dict(process='login'))
adapter = YNABOAuth2Adapter(request)
app = adapter.get_provider().get_app(request)
token = SocialToken(token='some_token')
response_with_401 = LessMockedResponse(
401, """
{"error": {
"errors": [{
"domain": "global",
"reason": "authError",
"message": "Invalid Credentials",
"locationType": "header",
"location": "Authorization" } ],
"code": 401,
"message": "Invalid Credentials" }
}""")
with patch(
'allauth.socialaccount.providers.ynab.views'
'.requests') as patched_requests:
patched_requests.get.return_value = response_with_401
with self.assertRaises(HTTPError):
adapter.complete_login(request, app, token)
|
{
"content_hash": "77e9453b51b81e1f24c8da33400f87dc",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 65,
"avg_line_length": 33.298507462686565,
"alnum_prop": 0.6024204392649036,
"repo_name": "bittner/django-allauth",
"id": "d18f0ec4a24000931a0ed817e886101b55fca8a0",
"size": "2255",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/ynab/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "104"
},
{
"name": "HTML",
"bytes": "42082"
},
{
"name": "JavaScript",
"bytes": "3248"
},
{
"name": "Makefile",
"bytes": "396"
},
{
"name": "Python",
"bytes": "706342"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import pytest
from case import Mock, patch
from kombu import Connection, Exchange, Queue
from kombu import compat
from t.mocks import Transport, Channel
class test_misc:
def test_iterconsume(self):
class MyConnection(object):
drained = 0
def drain_events(self, *args, **kwargs):
self.drained += 1
return self.drained
class Consumer(object):
active = False
def consume(self, *args, **kwargs):
self.active = True
conn = MyConnection()
consumer = Consumer()
it = compat._iterconsume(conn, consumer)
assert next(it) == 1
assert consumer.active
it2 = compat._iterconsume(conn, consumer, limit=10)
assert list(it2), [2, 3, 4, 5, 6, 7, 8, 9, 10 == 11]
def test_Queue_from_dict(self):
defs = {'binding_key': 'foo.#',
'exchange': 'fooex',
'exchange_type': 'topic',
'durable': True,
'auto_delete': False}
q1 = Queue.from_dict('foo', **dict(defs))
assert q1.name == 'foo'
assert q1.routing_key == 'foo.#'
assert q1.exchange.name == 'fooex'
assert q1.exchange.type == 'topic'
assert q1.durable
assert q1.exchange.durable
assert not q1.auto_delete
assert not q1.exchange.auto_delete
q2 = Queue.from_dict('foo', **dict(defs,
exchange_durable=False))
assert q2.durable
assert not q2.exchange.durable
q3 = Queue.from_dict('foo', **dict(defs,
exchange_auto_delete=True))
assert not q3.auto_delete
assert q3.exchange.auto_delete
q4 = Queue.from_dict('foo', **dict(defs,
queue_durable=False))
assert not q4.durable
assert q4.exchange.durable
q5 = Queue.from_dict('foo', **dict(defs,
queue_auto_delete=True))
assert q5.auto_delete
assert not q5.exchange.auto_delete
assert (Queue.from_dict('foo', **dict(defs)) ==
Queue.from_dict('foo', **dict(defs)))
class test_Publisher:
def setup(self):
self.connection = Connection(transport=Transport)
def test_constructor(self):
pub = compat.Publisher(self.connection,
exchange='test_Publisher_constructor',
routing_key='rkey')
assert isinstance(pub.backend, Channel)
assert pub.exchange.name == 'test_Publisher_constructor'
assert pub.exchange.durable
assert not pub.exchange.auto_delete
assert pub.exchange.type == 'direct'
pub2 = compat.Publisher(self.connection,
exchange='test_Publisher_constructor2',
routing_key='rkey',
auto_delete=True,
durable=False)
assert pub2.exchange.auto_delete
assert not pub2.exchange.durable
explicit = Exchange('test_Publisher_constructor_explicit',
type='topic')
pub3 = compat.Publisher(self.connection,
exchange=explicit)
assert pub3.exchange == explicit
compat.Publisher(self.connection,
exchange='test_Publisher_constructor3',
channel=self.connection.default_channel)
def test_send(self):
pub = compat.Publisher(self.connection,
exchange='test_Publisher_send',
routing_key='rkey')
pub.send({'foo': 'bar'})
assert 'basic_publish' in pub.backend
pub.close()
def test__enter__exit__(self):
pub = compat.Publisher(self.connection,
exchange='test_Publisher_send',
routing_key='rkey')
x = pub.__enter__()
assert x is pub
x.__exit__()
assert pub._closed
class test_Consumer:
def setup(self):
self.connection = Connection(transport=Transport)
@patch('kombu.compat._iterconsume')
def test_iterconsume_calls__iterconsume(self, it, n='test_iterconsume'):
c = compat.Consumer(self.connection, queue=n, exchange=n)
c.iterconsume(limit=10, no_ack=True)
it.assert_called_with(c.connection, c, True, 10)
def test_constructor(self, n='test_Consumer_constructor'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
assert isinstance(c.backend, Channel)
q = c.queues[0]
assert q.durable
assert q.exchange.durable
assert not q.auto_delete
assert not q.exchange.auto_delete
assert q.name == n
assert q.exchange.name == n
c2 = compat.Consumer(self.connection, queue=n + '2',
exchange=n + '2',
routing_key='rkey', durable=False,
auto_delete=True, exclusive=True)
q2 = c2.queues[0]
assert not q2.durable
assert not q2.exchange.durable
assert q2.auto_delete
assert q2.exchange.auto_delete
def test__enter__exit__(self, n='test__enter__exit__'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
x = c.__enter__()
assert x is c
x.__exit__()
assert c._closed
def test_revive(self, n='test_revive'):
c = compat.Consumer(self.connection, queue=n, exchange=n)
with self.connection.channel() as c2:
c.revive(c2)
assert c.backend is c2
def test__iter__(self, n='test__iter__'):
c = compat.Consumer(self.connection, queue=n, exchange=n)
c.iterqueue = Mock()
c.__iter__()
c.iterqueue.assert_called_with(infinite=True)
def test_iter(self, n='test_iterqueue'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
c.close()
def test_process_next(self, n='test_process_next'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
with pytest.raises(NotImplementedError):
c.process_next()
c.close()
def test_iterconsume(self, n='test_iterconsume'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
c.close()
def test_discard_all(self, n='test_discard_all'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
c.discard_all()
assert 'queue_purge' in c.backend
def test_fetch(self, n='test_fetch'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
assert c.fetch() is None
assert c.fetch(no_ack=True) is None
assert 'basic_get' in c.backend
callback_called = [False]
def receive(payload, message):
callback_called[0] = True
c.backend.to_deliver.append('42')
payload = c.fetch().payload
assert payload == '42'
c.backend.to_deliver.append('46')
c.register_callback(receive)
assert c.fetch(enable_callbacks=True).payload == '46'
assert callback_called[0]
def test_discard_all_filterfunc_not_supported(self, n='xjf21j21'):
c = compat.Consumer(self.connection, queue=n, exchange=n,
routing_key='rkey')
with pytest.raises(NotImplementedError):
c.discard_all(filterfunc=lambda x: x)
c.close()
def test_wait(self, n='test_wait'):
class C(compat.Consumer):
def iterconsume(self, limit=None):
for i in range(limit):
yield i
c = C(self.connection,
queue=n, exchange=n, routing_key='rkey')
assert c.wait(10) == list(range(10))
c.close()
def test_iterqueue(self, n='test_iterqueue'):
i = [0]
class C(compat.Consumer):
def fetch(self, limit=None):
z = i[0]
i[0] += 1
return z
c = C(self.connection,
queue=n, exchange=n, routing_key='rkey')
assert list(c.iterqueue(limit=10)) == list(range(10))
c.close()
class test_ConsumerSet:
def setup(self):
self.connection = Connection(transport=Transport)
def test_providing_channel(self):
chan = Mock(name='channel')
cs = compat.ConsumerSet(self.connection, channel=chan)
assert cs._provided_channel
assert cs.backend is chan
cs.cancel = Mock(name='cancel')
cs.close()
chan.close.assert_not_called()
@patch('kombu.compat._iterconsume')
def test_iterconsume(self, _iterconsume, n='test_iterconsume'):
c = compat.Consumer(self.connection, queue=n, exchange=n)
cs = compat.ConsumerSet(self.connection, consumers=[c])
cs.iterconsume(limit=10, no_ack=True)
_iterconsume.assert_called_with(c.connection, cs, True, 10)
def test_revive(self, n='test_revive'):
c = compat.Consumer(self.connection, queue=n, exchange=n)
cs = compat.ConsumerSet(self.connection, consumers=[c])
with self.connection.channel() as c2:
cs.revive(c2)
assert cs.backend is c2
def test_constructor(self, prefix='0daf8h21'):
dcon = {'%s.xyx' % prefix: {'exchange': '%s.xyx' % prefix,
'routing_key': 'xyx'},
'%s.xyz' % prefix: {'exchange': '%s.xyz' % prefix,
'routing_key': 'xyz'}}
consumers = [compat.Consumer(self.connection, queue=prefix + str(i),
exchange=prefix + str(i))
for i in range(3)]
c = compat.ConsumerSet(self.connection, consumers=consumers)
c2 = compat.ConsumerSet(self.connection, from_dict=dcon)
assert len(c.queues) == 3
assert len(c2.queues) == 2
c.add_consumer(compat.Consumer(self.connection,
queue=prefix + 'xaxxxa',
exchange=prefix + 'xaxxxa'))
assert len(c.queues) == 4
for cq in c.queues:
assert cq.channel is c.channel
c2.add_consumer_from_dict(
'%s.xxx' % prefix,
exchange='%s.xxx' % prefix,
routing_key='xxx',
)
assert len(c2.queues) == 3
for c2q in c2.queues:
assert c2q.channel is c2.channel
c.discard_all()
assert c.channel.called.count('queue_purge') == 4
c.consume()
c.close()
c2.close()
assert 'basic_cancel' in c.channel
assert 'close' in c.channel
assert 'close' in c2.channel
|
{
"content_hash": "a2ec5975f7bdc3a4e25b44374e32c0bb",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 76,
"avg_line_length": 33.885885885885884,
"alnum_prop": 0.5414746543778802,
"repo_name": "kawamon/hue",
"id": "485625d0b8e0780dc515838a83933820d2256fff",
"size": "11284",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/kombu-4.3.0/t/unit/test_compat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.views.generic import DetailView, ListView
from .models import JST
class JSTDetailView(DetailView):
model = JST
class JSTListView(ListView):
model = JST
def get_queryset(self, *args, **kwargs):
qs = super(JSTListView, self).get_queryset(*args, **kwargs)
return qs.voivodeship()
|
{
"content_hash": "0fd92945fc33ff08b9788f93e34cdfca",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 24.133333333333333,
"alnum_prop": 0.7016574585635359,
"repo_name": "watchdogpolska/bliski_publikator",
"id": "534988e8975790432ec68406a05255d45e126d8f",
"size": "362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bliski_publikator/teryt/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38778"
},
{
"name": "HTML",
"bytes": "109977"
},
{
"name": "JavaScript",
"bytes": "14459"
},
{
"name": "Python",
"bytes": "184036"
},
{
"name": "TypeScript",
"bytes": "38566"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_streetlamp_small_blue_style_01_on.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "3c7f84996796b1566796ee137e42b59a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 97,
"avg_line_length": 25.076923076923077,
"alnum_prop": 0.7024539877300614,
"repo_name": "obi-two/Rebelion",
"id": "d4c85e7514e046cd956c896b8d45e3315e42451f",
"size": "471",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/static/structure/general/shared_streetlamp_small_blue_style_01_on.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
import re
import sys
from types import FrameType
from unittest import mock
import pytest
from _pytest._code import Code
from _pytest._code import ExceptionInfo
from _pytest._code import Frame
from _pytest._code import Source
from _pytest._code.code import ExceptionChainRepr
from _pytest._code.code import ReprFuncArgs
def test_ne() -> None:
code1 = Code(compile('foo = "bar"', "", "exec"))
assert code1 == code1
code2 = Code(compile('foo = "baz"', "", "exec"))
assert code2 != code1
def test_code_gives_back_name_for_not_existing_file() -> None:
name = "abc-123"
co_code = compile("pass\n", name, "exec")
assert co_code.co_filename == name
code = Code(co_code)
assert str(code.path) == name
assert code.fullsource is None
def test_code_with_class() -> None:
class A:
pass
pytest.raises(TypeError, Code, A)
def x() -> None:
raise NotImplementedError()
def test_code_fullsource() -> None:
code = Code(x)
full = code.fullsource
assert "test_code_fullsource()" in str(full)
def test_code_source() -> None:
code = Code(x)
src = code.source()
expected = """def x() -> None:
raise NotImplementedError()"""
assert str(src) == expected
def test_frame_getsourcelineno_myself() -> None:
def func() -> FrameType:
return sys._getframe(0)
f = Frame(func())
source, lineno = f.code.fullsource, f.lineno
assert source is not None
assert source[lineno].startswith(" return sys._getframe(0)")
def test_getstatement_empty_fullsource() -> None:
def func() -> FrameType:
return sys._getframe(0)
f = Frame(func())
with mock.patch.object(f.code.__class__, "fullsource", None):
assert f.statement == Source("")
def test_code_from_func() -> None:
co = Code(test_frame_getsourcelineno_myself)
assert co.firstlineno
assert co.path
def test_unicode_handling() -> None:
value = "ąć".encode()
def f() -> None:
raise Exception(value)
excinfo = pytest.raises(Exception, f)
str(excinfo)
def test_code_getargs() -> None:
def f1(x):
raise NotImplementedError()
c1 = Code(f1)
assert c1.getargs(var=True) == ("x",)
def f2(x, *y):
raise NotImplementedError()
c2 = Code(f2)
assert c2.getargs(var=True) == ("x", "y")
def f3(x, **z):
raise NotImplementedError()
c3 = Code(f3)
assert c3.getargs(var=True) == ("x", "z")
def f4(x, *y, **z):
raise NotImplementedError()
c4 = Code(f4)
assert c4.getargs(var=True) == ("x", "y", "z")
def test_frame_getargs() -> None:
def f1(x) -> FrameType:
return sys._getframe(0)
fr1 = Frame(f1("a"))
assert fr1.getargs(var=True) == [("x", "a")]
def f2(x, *y) -> FrameType:
return sys._getframe(0)
fr2 = Frame(f2("a", "b", "c"))
assert fr2.getargs(var=True) == [("x", "a"), ("y", ("b", "c"))]
def f3(x, **z) -> FrameType:
return sys._getframe(0)
fr3 = Frame(f3("a", b="c"))
assert fr3.getargs(var=True) == [("x", "a"), ("z", {"b": "c"})]
def f4(x, *y, **z) -> FrameType:
return sys._getframe(0)
fr4 = Frame(f4("a", "b", c="d"))
assert fr4.getargs(var=True) == [("x", "a"), ("y", ("b",)), ("z", {"c": "d"})]
class TestExceptionInfo:
def test_bad_getsource(self) -> None:
try:
if False:
pass
else:
assert False
except AssertionError:
exci = ExceptionInfo.from_current()
assert exci.getrepr()
def test_from_current_with_missing(self) -> None:
with pytest.raises(AssertionError, match="no current exception"):
ExceptionInfo.from_current()
class TestTracebackEntry:
def test_getsource(self) -> None:
try:
if False:
pass
else:
assert False
except AssertionError:
exci = ExceptionInfo.from_current()
entry = exci.traceback[0]
source = entry.getsource()
assert source is not None
assert len(source) == 6
assert "assert False" in source[5]
def test_tb_entry_str(self):
try:
assert False
except AssertionError:
exci = ExceptionInfo.from_current()
pattern = r" File '.*test_code.py':\d+ in test_tb_entry_str\n assert False"
entry = str(exci.traceback[0])
assert re.match(pattern, entry)
class TestReprFuncArgs:
def test_not_raise_exception_with_mixed_encoding(self, tw_mock) -> None:
args = [("unicode_string", "São Paulo"), ("utf8_string", b"S\xc3\xa3o Paulo")]
r = ReprFuncArgs(args)
r.toterminal(tw_mock)
assert (
tw_mock.lines[0]
== r"unicode_string = São Paulo, utf8_string = b'S\xc3\xa3o Paulo'"
)
def test_ExceptionChainRepr():
"""Test ExceptionChainRepr, especially with regard to being hashable."""
try:
raise ValueError()
except ValueError:
excinfo1 = ExceptionInfo.from_current()
excinfo2 = ExceptionInfo.from_current()
repr1 = excinfo1.getrepr()
repr2 = excinfo2.getrepr()
assert repr1 != repr2
assert isinstance(repr1, ExceptionChainRepr)
assert hash(repr1) != hash(repr2)
assert repr1 is not excinfo1.getrepr()
|
{
"content_hash": "ebb7356a79b92478e2f2094fdc86a8f7",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 86,
"avg_line_length": 25.393364928909953,
"alnum_prop": 0.5865994774169466,
"repo_name": "JoelMarcey/buck",
"id": "bae86be347fa3e06b7c0e7d8f1ea9177b3cc4be0",
"size": "5362",
"binary": false,
"copies": "10",
"ref": "refs/heads/dev",
"path": "third-party/py/pytest/testing/code/test_code.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "579"
},
{
"name": "Batchfile",
"bytes": "2093"
},
{
"name": "C",
"bytes": "255521"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "10992"
},
{
"name": "CSS",
"bytes": "54863"
},
{
"name": "D",
"bytes": "1017"
},
{
"name": "Go",
"bytes": "16819"
},
{
"name": "Groovy",
"bytes": "3362"
},
{
"name": "HTML",
"bytes": "6115"
},
{
"name": "Haskell",
"bytes": "895"
},
{
"name": "IDL",
"bytes": "385"
},
{
"name": "Java",
"bytes": "19430296"
},
{
"name": "JavaScript",
"bytes": "932672"
},
{
"name": "Kotlin",
"bytes": "2079"
},
{
"name": "Lex",
"bytes": "2731"
},
{
"name": "Makefile",
"bytes": "1816"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "4384"
},
{
"name": "Objective-C",
"bytes": "138150"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "244"
},
{
"name": "Prolog",
"bytes": "858"
},
{
"name": "Python",
"bytes": "1786899"
},
{
"name": "Roff",
"bytes": "1109"
},
{
"name": "Rust",
"bytes": "3618"
},
{
"name": "Scala",
"bytes": "4906"
},
{
"name": "Shell",
"bytes": "49876"
},
{
"name": "Smalltalk",
"bytes": "3355"
},
{
"name": "Standard ML",
"bytes": "15"
},
{
"name": "Swift",
"bytes": "6897"
},
{
"name": "Thrift",
"bytes": "26256"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
}
|
"""
Copyright 2014-2020 Parsely, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from setuptools import setup, find_packages
# Get version without importing, which avoids dependency issues
def get_version():
with open("streamparse/version.py") as version_file:
return re.search(
r"""__version__\s+=\s+(['"])(?P<version>.+?)\1""", version_file.read()
).group("version")
def readme():
""" Returns README.rst contents as str """
with open("README.rst") as f:
return f.read()
install_requires = [
l.split("#")[0].strip()
for l in open("requirements.txt").readlines()
if not l.startswith(("#", "-"))
]
tests_require = ["graphviz", "pytest"]
setup(
name="streamparse",
version=get_version(),
author="Parsely, Inc.",
author_email="hello@parsely.com",
url="https://github.com/Parsely/streamparse",
description=(
"streamparse lets you run Python code against real-time "
"streams of data. Integrates with Apache Storm."
),
long_description=readme(),
license="Apache License 2.0",
packages=find_packages(),
entry_points={
"console_scripts": [
"sparse = streamparse.cli.sparse:main",
"streamparse = streamparse.cli.sparse:main",
"streamparse_run = streamparse.run:main",
]
},
install_requires=install_requires,
tests_require=tests_require,
extras_require={
"test": tests_require,
"all": install_requires + tests_require,
"docs": ["sphinx"] + tests_require,
},
zip_safe=False,
include_package_data=True,
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
)
|
{
"content_hash": "1908c6a972bf4b02fdfcc23b35d67215",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 82,
"avg_line_length": 32.109756097560975,
"alnum_prop": 0.6410938093429548,
"repo_name": "Parsely/streamparse",
"id": "38ef7286758e138fe90a4dd9f2f1a0f170aef47f",
"size": "2655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "517"
},
{
"name": "Python",
"bytes": "211941"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
}
|
"""
A Django app enabling cross-origin resource sharing in views.
Developed by Mohawk.
<http://www.mohawkhq.com/>
Contributors
------------
- Dave Hall <http://blog.etianen.com/>
"""
__version__ = (0, 9, 0)
|
{
"content_hash": "a212c4673bd6ce8958a9c72945a49111",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 61,
"avg_line_length": 13.3125,
"alnum_prop": 0.6291079812206573,
"repo_name": "mohawkhq/django-cross-origin",
"id": "90f280dfe2c4ff6f0d4eeb96da1171387183a0ff",
"size": "213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cross_origin/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10294"
}
],
"symlink_target": ""
}
|
import os
import mock
from datadog_checks.dev import get_here
from datadog_checks.postfix import PostfixCheck
MOCK_VERSION = '1.3.1'
def test__get_postqueue_stats(aggregator):
check = PostfixCheck('postfix', {}, [])
common_tags = ['instance:/etc/postfix', 'foo:bar']
filepath = os.path.join(get_here(), 'fixtures', 'postqueue_p.txt')
with open(filepath, 'r') as f:
mocked_output = f.read()
with mock.patch('datadog_checks.postfix.postfix.get_subprocess_output') as s:
s.side_effect = [(False, None, None), (mocked_output, None, None)]
check._get_postqueue_stats('/etc/postfix', ['foo:bar'])
aggregator.assert_metric('postfix.queue.size', 1, tags=common_tags + ['queue:active'])
aggregator.assert_metric('postfix.queue.size', 1, tags=common_tags + ['queue:hold'])
aggregator.assert_metric('postfix.queue.size', 2, tags=common_tags + ['queue:deferred'])
def test__get_postqueue_stats_empty(aggregator):
check = PostfixCheck('postfix', {}, [])
common_tags = ['instance:/etc/postfix']
with mock.patch('datadog_checks.postfix.postfix.get_subprocess_output') as s:
s.side_effect = [(False, None, None), ('Mail queue is empty', None, None)]
check._get_postqueue_stats('/etc/postfix', [])
aggregator.assert_metric('postfix.queue.size', 0, tags=common_tags + ['queue:active'])
aggregator.assert_metric('postfix.queue.size', 0, tags=common_tags + ['queue:active'])
aggregator.assert_metric('postfix.queue.size', 0, tags=common_tags + ['queue:deferred'])
@mock.patch(
'datadog_checks.postfix.postfix.get_subprocess_output',
return_value=('mail_version = {}'.format(MOCK_VERSION), None, None),
)
def test_collect_metadata(aggregator, datadog_agent):
# TODO: Migrate this test as e2e test when it's possible to retrieve the metadata from the Agent
check = PostfixCheck('postfix', {}, [{}])
check.check_id = 'test:123'
check._collect_metadata()
major, minor, patch = MOCK_VERSION.split('.')
version_metadata = {
'version.scheme': 'semver',
'version.major': major,
'version.minor': minor,
'version.patch': patch,
'version.raw': MOCK_VERSION,
}
datadog_agent.assert_metadata('test:123', version_metadata)
|
{
"content_hash": "25016a63e1af7ecd7cc39a8a6a18c7a4",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 100,
"avg_line_length": 37.80327868852459,
"alnum_prop": 0.6569817866435386,
"repo_name": "DataDog/integrations-core",
"id": "4b7ce1110684e07741f235f233a13db304426bf6",
"size": "2421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "postfix/tests/test_unit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
}
|
from ofx.builder import *
import datetime
import uuid
class Request:
def __init__(self, cookie=4, app_name="Money", app_version="1400"):
# Note that American Express, at least, requires the app name
# to be titlecase, and not all uppercase, for the request to
# succeed. Memories of Mozilla....
self.app_name = app_name
self.app_version = app_version
self.cookie = cookie # FIXME: find out the meaning of this magic value. Why not 3 or 5?
self.request_id = str(uuid.uuid4()).upper()
def _format_date(self, date=None, datetime=datetime.datetime.now()):
if date == None:
return datetime.strftime("%Y%m%d%H%M%S")
else:
return date.strftime("%Y%m%d")
def _message(self, institution, username, password, body):
"""Composes a complete OFX message document."""
return DOCUMENT(self._header(),
OFX(self._sign_on(institution, username, password),
body))
def _header(self):
"""Formats an OFX message header."""
return HEADER(
OFXHEADER("100"),
DATA("OFXSGML"),
VERSION("102"),
SECURITY("NONE"),
ENCODING("USASCII"),
CHARSET("1252"),
COMPRESSION("NONE"),
OLDFILEUID("NONE"),
NEWFILEUID(self.request_id))
def _sign_on(self, institution, username, password):
"""Formats an OFX sign-on block."""
return SIGNONMSGSRQV1(
SONRQ(
DTCLIENT(self._format_date()),
USERID(username),
USERPASS(password),
LANGUAGE("ENG"),
FI(
ORG(institution.ofx_org),
FID(institution.ofx_fid)),
APPID(self.app_name),
APPVER(self.app_version)))
def fi_profile(self, institution, username, password):
return self._message(institution, username, password,
PROFMSGSRQV1(
PROFTRNRQ(
TRNUID(self.request_id),
CLTCOOKIE(self.cookie),
PROFRQ(
CLIENTROUTING("NONE"),
DTPROFUP("19980101")))))
def account_info(self, institution, username, password):
"""Returns a complete OFX account information request document."""
return self._message(institution, username, password,
SIGNUPMSGSRQV1(
ACCTINFOTRNRQ(
TRNUID(self.request_id),
CLTCOOKIE(self.cookie),
ACCTINFORQ(
DTACCTUP("19980101")))))
def bank_stmt(self, account, username, password, daysago=90):
"""Returns a complete OFX bank statement request document."""
dt_start = datetime.datetime.now() - datetime.timedelta(days=daysago)
return self._message(account.institution, username, password,
BANKMSGSRQV1(
STMTTRNRQ(
TRNUID(self.request_id),
CLTCOOKIE(self.cookie),
STMTRQ(
BANKACCTFROM(
BANKID(account.aba_number),
ACCTID(account.acct_number),
ACCTTYPE(account.get_ofx_accttype())),
INCTRAN(
DTSTART(self._format_date(date=dt_start)),
INCLUDE("Y"))))))
def bank_closing(self, account, username, password):
"""Returns a complete OFX bank closing information request document."""
return self._message(account.institution, username, password,
BANKMSGSRQV1(
STMTENDTRNRQ(
TRNUID(self.request_id),
CLTCOOKIE(self.cookie),
STMTENDRQ(
BANKACCTFROM(
BANKID(account.aba_number),
ACCTID(account.acct_number),
ACCTTYPE(account.get_ofx_accttype()))))))
def creditcard_stmt(self, account, username, password, daysago=90):
"""Returns a complete OFX credit card statement request document."""
dt_start = datetime.datetime.now() - datetime.timedelta(days=daysago)
return self._message(account.institution, username, password,
CREDITCARDMSGSRQV1(
CCSTMTTRNRQ(
TRNUID(self.request_id),
CLTCOOKIE(self.cookie),
CCSTMTRQ(
CCACCTFROM(
ACCTID(account.acct_number)),
INCTRAN(
DTSTART(self._format_date(date=dt_start)),
INCLUDE("Y"))))))
def creditcard_closing(self, account, username, password):
"""Returns a complete OFX credit card closing information request document."""
dt_start = datetime.datetime.now() - datetime.timedelta(days=61)
dt_end = datetime.datetime.now() - datetime.timedelta(days=31)
return self._message(account.institution, username, password,
CREDITCARDMSGSRQV1(
CCSTMTENDTRNRQ(
TRNUID(self.request_id),
CLTCOOKIE(self.cookie),
CCSTMTENDRQ(
CCACCTFROM(
ACCTID(account.acct_number)),
DTSTART(self._format_date(date=dt_end)),
DTEND(self._format_date(date=dt_end))))))
|
{
"content_hash": "1446a084a0ec350ec434ffc2d5e4756a",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 101,
"avg_line_length": 42.507462686567166,
"alnum_prop": 0.514747191011236,
"repo_name": "wesabe/fixofx",
"id": "d82fb3b0509bb88a76d3bed2e0eb75a7f1838a19",
"size": "6332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/ofx/request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "576551"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from django_comments.views.comments import post_comment, comment_done
from . import views
urlpatterns = [
url(r'^post/$', post_comment, name='comments-post-comment'),
url(r'^posted/$', comment_done, name='comments-comment-done'),
url(r'^cr/(\d+)/(.+)/$', views.post_redirect, name='comments-url-redirect'),
]
|
{
"content_hash": "8d3fe296ff3990d3dba9a31b6f362909",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 80,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.6843575418994413,
"repo_name": "HMSBeagle1831/rapidscience",
"id": "8f43bb04131f5fe90e7ff647dfd1e18f4d623972",
"size": "358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rlp/discussions/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "115769"
},
{
"name": "HTML",
"bytes": "118911"
},
{
"name": "JavaScript",
"bytes": "13496"
},
{
"name": "Python",
"bytes": "270256"
}
],
"symlink_target": ""
}
|
"""sysdescrparser.brocade."""
from sysdescr import SysDescr
# pylint: disable=no-name-in-module
class Brocade(SysDescr):
"""Class Brocade.
This class is only for vendor definition.
"""
def __init__(self, raw):
"""Constructor."""
super(Brocade, self).__init__(raw)
self.vendor = 'BROCADE'
self.model = self.UNKNOWN
self.os = self.UNKNOWN
self.version = self.UNKNOWN
def parse(self):
"""Parsing for sysDescr value."""
return self
|
{
"content_hash": "86ffd457e09e0eb58130cbf5a92a12d1",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 45,
"avg_line_length": 20.8,
"alnum_prop": 0.5942307692307692,
"repo_name": "mtoshi/sysdescrparser",
"id": "dd536b8481877f1534ded7055c045a0f40926003",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sysdescrparser/brocade.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35268"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import jmespath
from unittest import TestCase
from .common import event_data, BaseTest, TestConfig as Config
from c7n.cwe import CloudWatchEvents
class CloudWatchRuleTarget(BaseTest):
def test_target_cross_account_remove(self):
session_factory = self.replay_flight_data("test_cwe_rule_target_cross")
client = session_factory().client("events")
policy = self.load_policy(
{
"name": "cwe-cross-account",
"resource": "event-rule-target",
"filters": [{"type": "cross-account"}],
"actions": ["delete"],
},
config=Config.empty(),
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
targets = client.list_targets_by_rule(Rule=resources[0]["c7n:parent-id"]).get(
"Targets"
)
self.assertEqual(targets, [])
class CloudWatchEventsFacadeTest(TestCase):
# DISABLED / Record flight data
def test_get_ids(self):
self.assertEqual(
CloudWatchEvents.get_ids(
{"detail": event_data("event-cloud-trail-run-instances.json")},
{"type": "cloudtrail", "events": ["RunInstances"]},
),
["i-784cdacd", "i-7b4cdace"],
)
def test_get_ids_sans_with_details_expr(self):
self.assertEqual(
CloudWatchEvents.get_ids(
{'detail': event_data('event-cloud-trail-run-instances.json')},
{'type': 'cloudtrail', 'events': [
{'ids': 'detail.responseElements.instancesSet.items[].instanceId',
'source': 'ec2.amazonaws.com',
'event': 'RunInstances'}]}),
["i-784cdacd", "i-7b4cdace"],
)
def test_get_ids_sans_without_details_expr(self):
self.assertEqual(
sorted(CloudWatchEvents.get_ids(
{'detail': event_data('event-cloud-trail-run-instances.json')},
{'type': 'cloudtrail', 'events': [
{'ids': 'responseElements.instancesSet.items[].instanceId',
'source': 'ec2.amazonaws.com',
'event': 'RunInstances'}
]})),
["i-784cdacd", "i-7b4cdace"],
)
def test_get_ids_multiple_events(self):
d = event_data("event-cloud-trail-run-instances.json")
d["eventName"] = "StartInstances"
self.assertEqual(
CloudWatchEvents.get_ids(
{"detail": d},
{
"type": "cloudtrail",
"events": [
# wrong event name
{
"source": "ec2.amazonaws.com",
"event": "CreateTags",
"ids": "requestParameters.resourcesSet.items[].resourceId",
},
# wrong event source
{
"source": "ecs.amazonaws.com",
"event": "StartInstances",
"ids": "responseElements.instancesSet.items",
},
# matches no resource ids
{
"source": "ec2.amazonaws.com",
"event": "StartInstances",
"ids": "responseElements.instancesSet2.items[].instanceId",
},
# correct
{
"source": "ec2.amazonaws.com",
"event": "StartInstances",
"ids": "responseElements.instancesSet.items[].instanceId",
},
# we don't fall off the end
{
"source": "ec2.amazonaws.com",
"event": "StartInstances",
"ids": "responseElements.instancesSet.items[]",
},
],
},
),
["i-784cdacd", u"i-7b4cdace"],
)
def test_ec2_state(self):
self.assertEqual(
CloudWatchEvents.get_ids(
event_data("event-instance-state.json"), {"type": "ec2-instance-state"}
),
["i-a2d74f12"],
)
def test_asg_state(self):
self.assertEqual(
CloudWatchEvents.get_ids(
event_data("event-asg-instance-failed.json"),
{
"type": "asg-instance-state",
"events": ["EC2 Instance Launch Unsuccessful"],
},
),
["CustodianTest"],
)
def test_custom_event(self):
d = {"detail": event_data("event-cloud-trail-run-instances.json")}
d["detail"]["eventName"] = "StartInstances"
self.assertEqual(
CloudWatchEvents.get_ids(
d,
{
"type": "cloudtrail",
"events": [
{
"event": "StartInstances",
"ids": "responseElements.instancesSet.items[].instanceId",
"source": "ec2.amazonaws.com",
}
],
},
),
["i-784cdacd", u"i-7b4cdace"],
)
def test_non_cloud_trail_event(self):
for event in ["event-instance-state.json", "event-scheduled.json"]:
self.assertFalse(CloudWatchEvents.match(event_data(event)))
def test_cloud_trail_resource(self):
self.assertEqual(
CloudWatchEvents.match(event_data("event-cloud-trail-s3.json")),
{
"source": "s3.amazonaws.com",
"ids": jmespath.compile("detail.requestParameters.bucketName"),
},
)
|
{
"content_hash": "df7e505773c7d77bc0fc37281a8f23e1",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 87,
"avg_line_length": 36.75449101796407,
"alnum_prop": 0.4604105571847507,
"repo_name": "ewbankkit/cloud-custodian",
"id": "9cade51aec1f93a0c47b10b126f2f7331e59bc7a",
"size": "6728",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_cwe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "145643"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9857"
},
{
"name": "PowerShell",
"bytes": "1749"
},
{
"name": "Python",
"bytes": "4913354"
},
{
"name": "Shell",
"bytes": "7277"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import DebugPanel
from debug_toolbar.utils import get_name_from_obj
class RequestVarsDebugPanel(DebugPanel):
"""
A panel to display request variables (POST/GET, session, cookies).
"""
name = 'RequestVars'
template = 'debug_toolbar/panels/request_vars.html'
has_content = True
def __init__(self, *args, **kwargs):
DebugPanel.__init__(self, *args, **kwargs)
self.view_func = None
self.view_args = None
self.view_kwargs = None
def nav_title(self):
return _('Request Vars')
def title(self):
return _('Request Vars')
def url(self):
return ''
def process_request(self, request):
self.request = request
def process_view(self, request, view_func, view_args, view_kwargs):
self.view_func = view_func
self.view_args = view_args
self.view_kwargs = view_kwargs
def process_response(self, request, response):
self.record_stats({
'get': [(k, self.request.GET.getlist(k)) for k in self.request.GET],
'post': [(k, self.request.POST.getlist(k)) for k in self.request.POST],
'cookies': [(k, self.request.COOKIES.get(k)) for k in self.request.COOKIES],
})
if hasattr(self, 'view_func'):
if self.view_func is not None:
name = get_name_from_obj(self.view_func)
else:
name = '<no view>'
self.record_stats({
'view_func': name,
'view_args': self.view_args,
'view_kwargs': self.view_kwargs
})
if hasattr(self.request, 'session'):
self.record_stats({
'session': [(k, self.request.session.get(k)) for k in self.request.session.iterkeys()]
})
|
{
"content_hash": "55f6daa503f83e901405db232b1a2624",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 102,
"avg_line_length": 31.433333333333334,
"alnum_prop": 0.574761399787911,
"repo_name": "mozilla/popcorn_maker",
"id": "4f85052c7ec882f78f58738cc4f3ca703b4cd311",
"size": "1886",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/debug_toolbar/panels/request_vars.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "3779620"
},
{
"name": "Puppet",
"bytes": "11668"
},
{
"name": "Python",
"bytes": "5113791"
},
{
"name": "Ruby",
"bytes": "1970"
},
{
"name": "Shell",
"bytes": "2419"
}
],
"symlink_target": ""
}
|
async def get_users(request):
users = await request.app.m.user.get_list(fields=['username', 'email'])
return users
|
{
"content_hash": "9294ecd8d65f91806d5784058fc27f46",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 75,
"avg_line_length": 41,
"alnum_prop": 0.6910569105691057,
"repo_name": "dvhbru/dvhb-hybrid",
"id": "acef2ecc76f109afae226b3d1d8a1579a8c7e8b3",
"size": "123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/tutorial/tutorial/users/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "177082"
}
],
"symlink_target": ""
}
|
import pysam
import numpy as np
import argparse
import sys
ap = argparse.ArgumentParser(description="Compute expected insert size")
ap.add_argument("bam", help="Input bam file.")
ap.add_argument("-n", help="Num samples", type=int, default=100000)
ap.add_argument("-M", help="Max isize", type=int, default=10000)
ap.add_argument("-m", help="Max isize", type=int, default=100)
ap.add_argument("--all", help="Print all to this file", default=None)
args = ap.parse_args()
bamFile = pysam.Samfile(args.bam, 'rb')
if (args.all is not None):
allFile = open(args.all, 'w')
reads = {}
nUsed = 0
spans = []
nproc =0
for aln in bamFile.fetch():
if (aln.qname not in reads):
if (np.random.randint(0,50) < 1 and abs(aln.isize) < args.M and abs(aln.isize) > args.m ):
spans.append(abs(aln.isize))
reads[aln.qname]=True
if (args.all is not None):
allFile.write(str(aln.isize) + "\n")
if (len(spans) >= args.n):
break
nproc += 1
if (nproc % 10000 == 0):
sys.stderr.write(str(nproc) + "\t" + str(len(spans)) + "\n")
npspans = np.asarray(spans)
npspans.sort()
print "Num: " + str(len(npspans))
print "Median: " + str(npspans[len(npspans)/2])
print "Mean: " + str(np.mean(npspans))
print "SD: " + str(np.std(npspans))
print "max: " + str(np.max(npspans))
print "max: " + str(np.max(spans))
|
{
"content_hash": "2156cd738a36bee180072bfd118d2aeb",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 98,
"avg_line_length": 27.058823529411764,
"alnum_prop": 0.6217391304347826,
"repo_name": "yunlongliukm/chm1_scripts",
"id": "da499256f071610faad656915b0af05007cf7886",
"size": "1403",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ComputeCloneSize.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "50662"
},
{
"name": "Java",
"bytes": "488"
},
{
"name": "Makefile",
"bytes": "15037"
},
{
"name": "Python",
"bytes": "375549"
},
{
"name": "R",
"bytes": "50744"
},
{
"name": "Shell",
"bytes": "22590"
}
],
"symlink_target": ""
}
|
import warnings
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
from oscar.core import prices
from oscar.core.loading import get_class, get_model
from rest_framework import serializers, exceptions
from django.utils.translation import ugettext_lazy as _
from oscarapi.basket.operations import prepare_basket
from oscarapi.utils import (
OscarHyperlinkedModelSerializer,
OscarModelSerializer,
GetShippingMixin,
)
OrderPlacementMixin = get_class('checkout.mixins', 'OrderPlacementMixin')
ShippingAddress = get_model('order', 'ShippingAddress')
CheckoutSessionData = get_class(
'checkout.utils', 'CheckoutSessionData')
CheckoutSessionMixin = get_class('checkout.session', 'CheckoutSessionMixin')
BillingAddress = get_model('order', 'BillingAddress')
Order = get_model('order', 'Order')
Basket = get_model('basket', 'Basket')
ShippingMethod = get_model('shipping', 'OrderAndItemCharges')
Country = get_model('address', 'Country')
class PriceSerializer(serializers.Serializer):
currency = serializers.CharField(
max_length=12, default=settings.OSCAR_DEFAULT_CURRENCY, required=False)
excl_tax = serializers.DecimalField(
decimal_places=2, max_digits=12, required=True)
incl_tax = serializers.DecimalField(
decimal_places=2, max_digits=12, required=False)
tax = serializers.DecimalField(
decimal_places=2, max_digits=12, required=False)
def restore_object(self, attrs, instance=None):
if instance is not None:
instance.currency = attrs.get('currency')
instance.excl_tax = attrs.get('excl_tax')
instance.incl_tax = attrs.get('incl_tax')
instance.tax = attrs.get('tax')
else:
instance = prices.Price(
currency=attrs.get('currency'),
excl_tax=attrs.get('excl_tax'),
incl_tax=attrs.get('incl_tax'),
tax=attrs.get('tax'),
)
return instance
class CountrySerializer(OscarHyperlinkedModelSerializer):
class Meta:
model = Country
class ShippingAddressSerializer(OscarHyperlinkedModelSerializer):
class Meta:
model = ShippingAddress
class InlineShippingAddressSerializer(OscarModelSerializer):
country = serializers.HyperlinkedRelatedField(view_name='country-detail')
class Meta:
model = ShippingAddress
class BillingAddressSerializer(OscarHyperlinkedModelSerializer):
class Meta:
model = BillingAddress
class InlineBillingAddressSerializer(OscarModelSerializer):
country = serializers.HyperlinkedRelatedField(view_name='country-detail')
class Meta:
model = BillingAddress
class ShippingMethodSerializer(OscarHyperlinkedModelSerializer):
class Meta:
model = ShippingMethod
view_name = 'shippingmethod-detail'
class OrderSerializer(OscarModelSerializer):
shipping_address = InlineShippingAddressSerializer(
many=False, required=False)
billing_address = InlineBillingAddressSerializer(
many=False, required=False)
payment_url = serializers.SerializerMethodField('get_payment_url')
def get_payment_url(self, obj):
try:
return reverse('api-payment', args=(obj.pk,))
except NoReverseMatch:
msg = "You need to implement a view named 'api-payment' " \
"which redirects to the payment provider and sets up the " \
"callbacks."
warnings.warn(msg)
return msg
class Meta:
model = Order
# TODO: At the moment, only regular shipping charges are possible.
# Most likely CheckoutSerializer should also accept WeightBased shipping
# charges.
class CheckoutSerializer(serializers.Serializer, OrderPlacementMixin,
GetShippingMixin):
basket = serializers.HyperlinkedRelatedField(
view_name='basket-detail', queryset=Basket.objects)
total = PriceSerializer(many=False, required=True)
shipping_method_code = serializers.CharField(
max_length=128, required=False)
shipping_charge = PriceSerializer(many=False, required=False)
shipping_address = ShippingAddressSerializer(many=False, required=False)
billing_address = BillingAddressSerializer(many=False, required=False)
def validate(self, attrs):
self.request = self.context['request']
basket = prepare_basket(attrs.get('basket'), self.request)
if basket.is_empty:
raise serializers.ValidationError(_('Basket is empty'))
self._set_new_address(basket, attrs.get('shipping_address'))
shipping_address = self.get_shipping_address(basket)
shipping_method = self.get_shipping_method(
basket, shipping_address)
if shipping_method is None:
shipping_method = self._shipping_method(
self.request, basket,
attrs.get('shipping_method_code'),
shipping_address
)
billing_address = self.get_billing_address(shipping_address) \
if attrs.get('billing_address', None) is None \
else attrs.get('billing_address', None)
if not shipping_method:
total = attrs.get('total')
shipping_method = attrs.get('shipping_method', None)
shipping_charge = attrs.get('shipping_charge', None)
else:
shipping_charge = shipping_method.calculate(basket)
total = self.get_order_totals(
basket, shipping_charge=shipping_charge)
if shipping_charge.incl_tax != attrs.get('shipping_charge').incl_tax:
raise serializers.ValidationError(_('Invalid shipping charge'))
if total.incl_tax != attrs.get('total').incl_tax:
raise serializers.ValidationError(_('Invalid order total'))
attrs['shipping_address'] = shipping_address
attrs['billing_address'] = billing_address
attrs['total'] = total
attrs['shipping_charge'] = shipping_charge
attrs['shipping_method'] = shipping_method
attrs['basket'] = basket
return attrs
def restore_object(self, attrs, instance=None):
if instance is not None:
return instance
basket = attrs.get('basket')
order_number = self.generate_order_number(basket)
try:
return self.place_order(
order_number=order_number,
user=self.request.user,
basket=basket,
shipping_address=attrs.get('shipping_address'),
shipping_method=attrs.get('shipping_method'),
shipping_charge=attrs.get('shipping_charge'),
billing_address=attrs.get('billing_address'),
order_total=attrs.get('total'),
)
except ValueError as e:
raise exceptions.NotAcceptable(e.message)
class TotalChargeSerializer(serializers.Serializer, OrderPlacementMixin):
basket = serializers.HyperlinkedRelatedField(
view_name='basket-detail', queryset=Basket.objects)
shipping_charge = PriceSerializer(many=False, required=True)
def validate(self, attrs):
self.request = self.context['request']
basket = prepare_basket(attrs.get('basket'), self.request)
if basket.is_empty:
raise serializers.ValidationError(_('Basket is empty'))
total = self.get_order_totals(
basket, shipping_charge=attrs.get('shipping_charge'))
return {
'basket_url': self.init_data['basket'],
'total': PriceSerializer(total).data
}
|
{
"content_hash": "d02b201ee3e43504f7d819c549829fe4",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 79,
"avg_line_length": 37.69950738916256,
"alnum_prop": 0.6632693061544492,
"repo_name": "KuwaitNET/django-oscar-api",
"id": "c93227b69b5e3c8eb9048f9c24e6332c46264c14",
"size": "7653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscarapi/serializers/checkout.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Python",
"bytes": "166640"
}
],
"symlink_target": ""
}
|
"""This module contains utilities for methods."""
import logging
from math import ceil
import numpy as np
import scipy.stats as ss
import elfi.model.augmenter as augmenter
from elfi.clients.native import Client
from elfi.model.elfi_model import ComputationContext
logger = logging.getLogger(__name__)
def arr2d_to_batch(x, names):
"""Convert a 2d array to a batch dictionary columnwise.
Parameters
----------
x : np.ndarray
2d array of values
names : list[str]
List of names
Returns
-------
dict
A batch dictionary
"""
# TODO: support vector parameter nodes
try:
x = x.reshape((-1, len(names)))
except BaseException:
raise ValueError("A dimension mismatch in converting array to batch dictionary. "
"This may be caused by multidimensional "
"prior nodes that are not yet supported.")
batch = {p: x[:, i] for i, p in enumerate(names)}
return batch
def batch_to_arr2d(batches, names):
"""Convert batches into a single numpy array.
Parameters
----------
batches : dict or list
A list of batches or a single batch
names : list
Name of outputs to include in the array. Specifies the order.
Returns
-------
np.array
2d, where columns are batch outputs
"""
if not batches:
return []
if not isinstance(batches, list):
batches = [batches]
rows = []
for batch_ in batches:
rows.append(np.column_stack([batch_[n] for n in names]))
return np.vstack(rows)
def ceil_to_batch_size(num, batch_size):
"""Calculate how many full batches in num.
Parameters
----------
num : int
batch_size : int
"""
return int(batch_size * ceil(num / batch_size))
def normalize_weights(weights):
"""Normalize weights to sum to unity."""
w = np.atleast_1d(weights)
if np.any(w < 0):
raise ValueError("Weights must be positive")
wsum = np.sum(weights)
if wsum == 0:
raise ValueError("All weights are zero")
return w / wsum
def weighted_var(x, weights=None):
"""Unbiased weighted variance (sample variance) for the components of x.
The weights are assumed to be non random (reliability weights).
Parameters
----------
x : np.ndarray
1d or 2d with observations in rows
weights : np.ndarray or None
1d array of weights. None defaults to standard variance.
Returns
-------
s2 : np.array
1d vector of component variances
References
----------
[1] https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
"""
if weights is None:
weights = np.ones(len(x))
V_1 = np.sum(weights)
V_2 = np.sum(weights**2)
xbar = np.average(x, weights=weights, axis=0)
numerator = weights.dot((x - xbar)**2)
s2 = numerator / (V_1 - (V_2 / V_1))
return s2
class GMDistribution:
"""Gaussian mixture distribution with a shared covariance matrix."""
@classmethod
def pdf(cls, x, means, cov=1, weights=None):
"""Evaluate the density at points x.
Parameters
----------
x : array_like
Scalar, 1d or 2d array of points where to evaluate, observations in rows
means : array_like
Means of the Gaussian mixture components. It is assumed that means[0] contains
the mean of the first gaussian component.
weights : array_like
1d array of weights of the gaussian mixture components
cov : array_like, float
A shared covariance matrix for the mixture components
"""
means, weights = cls._normalize_params(means, weights)
ndim = np.asanyarray(x).ndim
if means.ndim == 1:
x = np.atleast_1d(x)
if means.ndim == 2:
x = np.atleast_2d(x)
d = np.zeros(len(x))
for m, w in zip(means, weights):
d += w * ss.multivariate_normal.pdf(x, mean=m, cov=cov)
# Cast to correct ndim
if ndim == 0 or (ndim == 1 and means.ndim == 2):
return d.squeeze()
else:
return d
@classmethod
def logpdf(cls, x, means, cov=1, weights=None):
"""Evaluate the log density at points x.
Parameters
----------
x : array_like
Scalar, 1d or 2d array of points where to evaluate, observations in rows
means : array_like
Means of the Gaussian mixture components. It is assumed that means[0] contains
the mean of the first gaussian component.
weights : array_like
1d array of weights of the gaussian mixture components
cov : array_like, float
A shared covariance matrix for the mixture components
"""
return np.log(cls.pdf(x, means=means, cov=cov, weights=weights))
@classmethod
def rvs(cls, means, cov=1, weights=None, size=1, prior_logpdf=None, random_state=None):
"""Draw random variates from the distribution.
Parameters
----------
means : array_like
Means of the Gaussian mixture components
cov : array_like, optional
A shared covariance matrix for the mixture components
weights : array_like, optional
1d array of weights of the gaussian mixture components
size : int or tuple or None, optional
Number or shape of samples to draw (a single sample has the shape of `means`).
If None, return one sample without an enclosing array.
prior_logpdf : callable, optional
Can be used to check validity of random variable.
random_state : np.random.RandomState, optional
"""
random_state = random_state or np.random
means, weights = cls._normalize_params(means, weights)
if size is None:
size = 1
no_wrap = True
else:
no_wrap = False
output = np.empty((size,) + means.shape[1:])
n_accepted = 0
n_left = size
trials = 0
while n_accepted < size:
inds = random_state.choice(len(means), size=n_left, p=weights)
rvs = means[inds]
perturb = ss.multivariate_normal.rvs(mean=means[0] * 0,
cov=cov,
random_state=random_state,
size=n_left)
x = rvs + perturb
# check validity of x
if prior_logpdf is not None:
x = x[np.isfinite(prior_logpdf(x))]
n_accepted1 = len(x)
output[n_accepted: n_accepted+n_accepted1] = x
n_accepted += n_accepted1
n_left -= n_accepted1
trials += 1
if trials == 100:
logger.warning("SMC: It appears to be difficult to find enough valid proposals "
"with prior pdf > 0. ELFI will keep trying, but you may wish "
"to kill the process and adjust the model priors.")
logger.debug('Needed %i trials to find %i valid samples.', trials, size)
if no_wrap:
return output[0]
else:
return output
@staticmethod
def _normalize_params(means, weights):
means = np.atleast_1d(means)
if means.ndim > 2:
raise ValueError('means.ndim = {} but must be at most 2.'.format(means.ndim))
if weights is None:
weights = np.ones(len(means))
weights = normalize_weights(weights)
return means, weights
def numgrad(fn, x, h=None, replace_neg_inf=True):
"""Naive numeric gradient implementation for scalar valued functions.
Parameters
----------
fn
x : np.ndarray
A single point in 1d vector
h : float or list
Stepsize or stepsizes for the dimensions
replace_neg_inf : bool
Replace neg inf fn values with gradient 0 (useful for logpdf gradients)
Returns
-------
grad : np.ndarray
1D gradient vector
"""
h = 0.00001 if h is None else h
h = np.asanyarray(h).reshape(-1)
x = np.asanyarray(x, dtype=np.float).reshape(-1)
dim = len(x)
X = np.zeros((dim * 3, dim))
for i in range(3):
Xi = np.tile(x, (dim, 1))
np.fill_diagonal(Xi, Xi.diagonal() + (i - 1) * h)
X[i * dim:(i + 1) * dim, :] = Xi
f = fn(X)
f = f.reshape((3, dim))
if replace_neg_inf:
if np.any(np.isneginf(f)):
return np.zeros(dim)
grad = np.gradient(f, *h, axis=0)
return grad[1, :]
# TODO: check that there are no latent variables in parameter parents.
# pdfs and gradients wouldn't be correct in those cases as it would require
# integrating out those latent variables. This is equivalent to that all
# stochastic nodes are parameters.
# TODO: could use some optimization
class ModelPrior:
"""Construct a joint prior distribution over all the parameter nodes in `ElfiModel`."""
def __init__(self, model):
"""Initialize a ModelPrior.
Parameters
----------
model : ElfiModel
"""
model = model.copy()
self.parameter_names = model.parameter_names
self.dim = len(self.parameter_names)
self.client = Client()
# Prepare nets for the pdf methods
self._pdf_node = augmenter.add_pdf_nodes(model, log=False)[0]
self._logpdf_node = augmenter.add_pdf_nodes(model, log=True)[0]
self._rvs_net = self.client.compile(model.source_net, outputs=self.parameter_names)
self._pdf_net = self.client.compile(model.source_net, outputs=self._pdf_node)
self._logpdf_net = self.client.compile(model.source_net, outputs=self._logpdf_node)
def rvs(self, size=None, random_state=None):
"""Sample the joint prior."""
random_state = np.random if random_state is None else random_state
context = ComputationContext(size or 1, seed='global')
loaded_net = self.client.load_data(self._rvs_net, context, batch_index=0)
# Change to the correct random_state instance
# TODO: allow passing random_state to ComputationContext seed
loaded_net.node['_random_state'] = {'output': random_state}
batch = self.client.compute(loaded_net)
rvs = np.column_stack([batch[p] for p in self.parameter_names])
if self.dim == 1:
rvs = rvs.reshape(size or 1)
return rvs[0] if size is None else rvs
def pdf(self, x):
"""Return the density of the joint prior at x."""
return self._evaluate_pdf(x)
def logpdf(self, x):
"""Return the log density of the joint prior at x."""
return self._evaluate_pdf(x, log=True)
def _evaluate_pdf(self, x, log=False):
if log:
net = self._logpdf_net
node = self._logpdf_node
else:
net = self._pdf_net
node = self._pdf_node
x = np.asanyarray(x)
ndim = x.ndim
x = x.reshape((-1, self.dim))
batch = self._to_batch(x)
# TODO: we could add a seed value that would load a "random state" instance
# throwing an error if it is used, for instance seed="not used".
context = ComputationContext(len(x), seed=0)
loaded_net = self.client.load_data(net, context, batch_index=0)
# Override
for k, v in batch.items():
loaded_net.node[k] = {'output': v}
val = self.client.compute(loaded_net)[node]
if ndim == 0 or (ndim == 1 and self.dim > 1):
val = val[0]
return val
def gradient_pdf(self, x):
"""Return the gradient of density of the joint prior at x."""
raise NotImplementedError
def gradient_logpdf(self, x, stepsize=None):
"""Return the gradient of log density of the joint prior at x.
Parameters
----------
x : float or np.ndarray
stepsize : float or list
Stepsize or stepsizes for the dimensions
"""
x = np.asanyarray(x)
ndim = x.ndim
x = x.reshape((-1, self.dim))
grads = np.zeros_like(x)
for i in range(len(grads)):
xi = x[i]
grads[i] = numgrad(self.logpdf, xi, h=stepsize)
grads[np.isinf(grads)] = 0
grads[np.isnan(grads)] = 0
if ndim == 0 or (ndim == 1 and self.dim > 1):
grads = grads[0]
return grads
def _to_batch(self, x):
return {p: x[:, i] for i, p in enumerate(self.parameter_names)}
|
{
"content_hash": "4ea10dccd5b986ede1b831d10effe777",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 96,
"avg_line_length": 30.366666666666667,
"alnum_prop": 0.5776227066018504,
"repo_name": "lintusj1/elfi",
"id": "9c3c4edc5d465dec2f230e24c9a7f9ec584bfc3b",
"size": "12754",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "elfi/methods/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "7042"
},
{
"name": "Makefile",
"bytes": "3467"
},
{
"name": "Python",
"bytes": "355515"
}
],
"symlink_target": ""
}
|
from ..core import fileoperations
from ..objects.exceptions import ValidationError
from ..resources.strings import strings
AUTH_BUCKET_KEY = 'Bucket'
AUTH_KEY = 'Authentication'
AUTHKEY_KEY = 'Key'
CONTAINER_PORT_KEY = 'ContainerPort'
IMG_NAME_KEY = 'Name'
IMG_KEY = 'Image'
IMG_UPDATE_KEY = 'Update'
JSON_FALSE = 'false'
LOGGING_KEY = 'Logging'
PORTS_KEY = 'Ports'
VERSION_ONE = '1'
VERSION_KEY = 'AWSEBDockerrunVersion'
VERSION_TWO = '2'
def validate_dockerrun_v1(dockerrun, is_used_to_make_dockerfile):
"""
Validates given Dockerrun.aws.json version, and that if no Dockerfile
exists, Image.Name and Ports[0].ContainerPort exists.
:param dockerrun: dict: dictionary representation of Dockerrun.aws.json
:param is_used_to_make_dockerfile: bool: whether used to make Dockerfile
:return: None
"""
if dockerrun is None:
return
if _get_version(dockerrun) != VERSION_ONE:
raise ValidationError(strings['local.invaliddockerrunversion'])
if not is_used_to_make_dockerfile:
return
if IMG_KEY not in dockerrun or IMG_NAME_KEY not in dockerrun[IMG_KEY]:
raise ValidationError(strings['local.missingdockerrunimage'])
elif PORTS_KEY not in dockerrun:
raise ValidationError(strings['local.missingdockerrunports'])
elif CONTAINER_PORT_KEY not in dockerrun[PORTS_KEY][0]:
raise ValidationError(strings['local.missingdockerruncontainerport'])
def validate_dockerrun_v2(dockerrun):
if dockerrun is None:
raise ValidationError(strings['local.missingdockerrun'])
elif _get_version(dockerrun) != VERSION_TWO:
raise ValidationError(strings['local.invaliddockerrunversion'])
def require_docker_pull(dockerrun):
"""
Whether 'docker pull' is necessary. Return True if and only if
Dockerrun.aws.json Image.Update value is not false.
:param dockerrun: dict: dictionary representation of Dockerrun.aws.json
:return: bool
"""
return (dockerrun is None or IMG_KEY not in dockerrun or
dockerrun[IMG_KEY].get(IMG_UPDATE_KEY) != JSON_FALSE)
def get_dockerrun(dockerrun_path):
"""
Return dict representation of Dockerrun.aws.json in dockerrun_path
Return None if Dockerrun doesn't exist at that path.
:param dockerrun_path: str: full path to Dockerrun.aws.json
:return: dict
"""
try:
return fileoperations.get_json_dict(dockerrun_path)
except ValueError:
raise ValidationError(strings['local.invalidjson'])
except IOError: # Dockerrun.aws.json doesn't exist
return None
def require_auth_download(dockerrun):
"""
Return whether Authentication.Key and Authentication.Bucket is provided
in Dockerrun.aws.json, in which case we have to pull down the bucket.
:param dockerrun: dict: dictionary representation of Dockerrun.aws.json
:return: bool
"""
if dockerrun is None:
return False
try:
get_auth_key(dockerrun)
get_auth_bucket_name(dockerrun)
return True
except KeyError:
return False
def get_auth_key(dockerrun):
"""
Get Authentication.Key value of dockerrun.
:param dockerrun: dict: dictionary representation of Dockerrun.aws.json
:return: str
"""
if _get_version(dockerrun) == VERSION_ONE:
authkey_key = AUTHKEY_KEY
else:
authkey_key = AUTHKEY_KEY.lower()
return _get_auth(dockerrun)[authkey_key]
def get_auth_bucket_name(dockerrun):
"""
Get Authentication.Bucket value of dockerrun.
:param dockerrun: dict: dictionary representation of Dockerrun.aws.json
:return: str
"""
if _get_version(dockerrun) == VERSION_ONE:
auth_bucket_key = AUTH_BUCKET_KEY
else:
auth_bucket_key = AUTH_BUCKET_KEY.lower()
return _get_auth(dockerrun)[auth_bucket_key]
def get_logdir(dockerrun):
"""
Get Logging value of dockerrun.
:param dockerrun: dict: dictionary representation of Dockerrun.aws.json
:return: str
"""
return dockerrun.get(LOGGING_KEY) if dockerrun else None
def get_base_img(dockerrun):
return dockerrun[IMG_KEY][IMG_NAME_KEY]
def get_exposed_port(dockerrun):
return dockerrun[PORTS_KEY][0][CONTAINER_PORT_KEY]
def _get_auth(dockerrun):
if _get_version(dockerrun) == VERSION_ONE:
auth_key = AUTH_KEY
else:
auth_key = AUTH_KEY.lower()
return dockerrun[auth_key]
def _get_version(dockerrun):
if VERSION_KEY in dockerrun:
return str(dockerrun[VERSION_KEY])
else:
return None
|
{
"content_hash": "ee11696bbbab65e53dc8f771467d73e8",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 77,
"avg_line_length": 27.79268292682927,
"alnum_prop": 0.6904344010530935,
"repo_name": "ianblenke/awsebcli",
"id": "abc51f75753922588fc9b1a2804fa1f819c4d9a1",
"size": "5120",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "ebcli/containers/dockerrun.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "208"
},
{
"name": "Makefile",
"bytes": "633"
},
{
"name": "Python",
"bytes": "3447856"
},
{
"name": "Shell",
"bytes": "280"
}
],
"symlink_target": ""
}
|
from JumpScale import j
j.base.loader.makeAvailable(j, 'system.platform.lxc')
from Lxc import Lxc
j.system.platform.lxc = Lxc()
|
{
"content_hash": "1cf6b0b294dc7ff76a31781636f0492c",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 53,
"avg_line_length": 25.8,
"alnum_prop": 0.7674418604651163,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "463ccfc0f165839f81f22d3d1cdfdff080d3185c",
"size": "129",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/JumpScale/lib/kvm/__init__.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
}
|
"""
This module can do slight modifications to tidy a wiki page's source code.
The changes are not supposed to change the look of the rendered wiki page.
If you wish to run this as an stand-alone script, use scripts/cosmetic_changes.py
For regular use, it is recommended to put this line into your user-config.py:
cosmetic_changes = True
You may enable cosmetic changes for additional languages by adding the
dictionary cosmetic_changes_enable to your user-config.py. It should contain
a tuple of languages for each site where you wish to enable in addition to
your own langlanguage if cosmetic_changes_mylang_only is True (see below).
Please set your dictionary by adding such lines to your user-config.py:
cosmetic_changes_enable['wikipedia'] = ('de', 'en', 'fr')
There is another config variable: You can set
cosmetic_changes_mylang_only = False
if you're running a bot on multiple sites and want to do cosmetic changes on
all of them, but be careful if you do.
You may disable cosmetic changes by adding the all unwanted languages to the
dictionary cosmetic_changes_disable in your user-config.py. It should contain
a tuple of languages for each site where you wish to disable cosmetic changes.
You may use it with cosmetic_changes_mylang_only is False, but you can also
disable your own language. This also overrides the settings in the dictionary
cosmetic_changes_enable. Please set this dictionary by adding such lines to
your user-config.py:
cosmetic_changes_disable['wikipedia'] = ('de', 'en', 'fr')
You may disable cosmetic changes for a given script by appending the all
unwanted scripts to the list cosmetic_changes_deny_script in your
user-config.py. By default it contains cosmetic_changes.py itself and touch.py.
This overrides all other enabling settings for cosmetic changes. Please modify
the given list by adding such lines to your user-config.py:
cosmetic_changes_deny_script.append('your_script_name_1')
or by adding a list to the given one:
cosmetic_changes_deny_script += ['your_script_name_1', 'your_script_name_2']
"""
#
# (C) xqt, 2009-2015
# (C) Pywikibot team, 2006-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import re
from warnings import warn
try:
import stdnum.isbn as stdnum_isbn
scripts_isbn = None
except ImportError:
stdnum_isbn = None
# Old dependency
try:
import scripts.isbn as scripts_isbn
except ImportError:
scripts_isbn = None
import pywikibot
from pywikibot import config, textlib, pagegenerators
from pywikibot.page import url2unicode
from pywikibot.tools import deprecate_arg, first_lower, first_upper
from pywikibot.tools import MediaWikiVersion
warning = """
ATTENTION: You can run this script as a stand-alone for testing purposes.
However, the changes that are made are only minor, and other users
might get angry if you fill the version histories and watchlists with such
irrelevant changes. Some wikis prohibit stand-alone running."""
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
'&warning;': warning,
}
# This is from interwiki.py;
# move it to family file and implement global instances
moved_links = {
'ca': (u'ús de la plantilla', u'/ús'),
'cs': (u'dokumentace', u'/doc'),
'de': (u'dokumentation', u'/Meta'),
'en': ([u'documentation',
u'template documentation',
u'template doc',
u'doc',
u'documentation, template'], u'/doc'),
'es': ([u'documentación', u'documentación de plantilla'], u'/doc'),
'fa': ([u'documentation', u'توضیحات', u'توضیحات الگو',
u'doc'], u'/توضیحات'),
'fr': (u'/documentation', u'/Documentation'),
'hu': (u'sablondokumentáció', u'/doc'),
'id': (u'template doc', u'/doc'),
'ja': (u'documentation', u'/doc'),
'ka': (u'თარგის ინფო', u'/ინფო'),
'ko': (u'documentation', u'/설명문서'),
'ms': (u'documentation', u'/doc'),
'pl': (u'dokumentacja', u'/opis'),
'pt': ([u'documentação', u'/doc'], u'/doc'),
'ro': (u'documentaţie', u'/doc'),
'ru': (u'doc', u'/doc'),
'sv': (u'dokumentation', u'/dok'),
'vi': (u'documentation', u'/doc'),
'zh': ([u'documentation', u'doc'], u'/doc'),
}
# Template which should be replaced or removed.
# Use a list with two entries. The first entry will be replaced by the second.
# Examples:
# For removing {{Foo}}, the list must be:
# (u'Foo', None),
#
# The following also works:
# (u'Foo', ''),
#
# For replacing {{Foo}} with {{Bar}} the list must be:
# (u'Foo', u'Bar'),
#
# This also removes all template parameters of {{Foo}}
# For replacing {{Foo}} with {{Bar}} but keep the template
# parameters in its original order, please use:
# (u'Foo', u'Bar\g<parameters>'),
deprecatedTemplates = {
'wikipedia': {
'de': [
(u'Belege', u'Belege fehlen\\g<parameters>'),
(u'Quelle', u'Belege fehlen\\g<parameters>'),
(u'Quellen', u'Belege fehlen\\g<parameters>'),
(u'Quellen fehlen', u'Belege fehlen\\g<parameters>'),
],
}
}
CANCEL_ALL = False
CANCEL_PAGE = 1
CANCEL_METHOD = 2
CANCEL_MATCH = 3
def _format_isbn_match(match, strict=True):
"""Helper function to validate and format a single matched ISBN."""
isbn = match.group('code')
if stdnum_isbn:
try:
stdnum_isbn.validate(isbn)
except stdnum_isbn.ValidationError as e:
if strict:
raise
pywikibot.log('ISBN "%s" validation error: %s' % (isbn, e))
return isbn
return stdnum_isbn.format(isbn)
else:
try:
scripts_isbn.is_valid(isbn)
except scripts_isbn.InvalidIsbnException as e:
if strict:
raise
pywikibot.log('ISBN "%s" validation error: %s' % (isbn, e))
return isbn
isbn = scripts_isbn.getIsbn(isbn)
try:
isbn.format()
except scripts_isbn.InvalidIsbnException as e:
if strict:
raise
pywikibot.log('ISBN "%s" validation error: %s' % (isbn, e))
return isbn.code
def _reformat_ISBNs(text, strict=True):
"""Helper function to normalise ISBNs in text.
@raises Exception: Invalid ISBN encountered when strict enabled
"""
if not stdnum_isbn:
if not scripts_isbn:
raise NotImplementedError(
'ISBN functionality not available. Install stdnum package.')
warn('package stdnum.isbn not found; using scripts.isbn',
ImportWarning)
return textlib.reformat_ISBNs(
text, lambda match: _format_isbn_match(match, strict=strict))
class CosmeticChangesToolkit:
"""Cosmetic changes toolkit."""
@deprecate_arg('debug', 'diff')
def __init__(self, site, diff=False, redirect=False, namespace=None,
pageTitle=None, ignore=CANCEL_ALL):
"""Constructor."""
self.site = site
self.diff = diff
self.redirect = redirect
self.namespace = namespace
self.template = (self.namespace == 10)
self.talkpage = self.namespace >= 0 and self.namespace % 2 == 1
self.title = pageTitle
self.ignore = ignore
self.common_methods = (
self.commonsfiledesc,
self.fixSelfInterwiki,
self.standardizePageFooter,
self.fixSyntaxSave,
self.cleanUpLinks,
self.cleanUpSectionHeaders,
self.putSpacesInLists,
self.translateAndCapitalizeNamespaces,
# FIXME: self.translateMagicWords,
self.replaceDeprecatedTemplates,
# FIXME: self.resolveHtmlEntities,
self.removeUselessSpaces,
self.removeNonBreakingSpaceBeforePercent,
self.fixHtml,
self.fixReferences,
self.fixStyle,
self.fixTypo,
self.fixArabicLetters,
self.fix_ISBN,
)
@classmethod
def from_page(cls, page, diff, ignore):
"""Create toolkit based on the page."""
return cls(page.site, diff=diff, namespace=page.namespace(),
pageTitle=page.title(), ignore=ignore)
def safe_execute(self, method, text):
"""Execute the method and catch exceptions if enabled."""
result = None
try:
result = method(text)
except Exception as e:
if self.ignore == CANCEL_METHOD:
pywikibot.warning(u'Unable to perform "{0}" on "{1}"!'.format(
method.__name__, self.title))
pywikibot.exception(e)
else:
raise
return text if result is None else result
def _change(self, text):
"""Execute all clean up methods."""
for method in self.common_methods:
text = self.safe_execute(method, text)
return text
def change(self, text):
"""Execute all clean up methods and catch errors if activated."""
try:
new_text = self._change(text)
except Exception as e:
if self.ignore == CANCEL_PAGE:
pywikibot.warning(u'Skipped "{0}", because an error occurred.'.format(self.title))
pywikibot.exception(e)
return False
else:
raise
else:
if self.diff:
pywikibot.showDiff(text, new_text)
return new_text
def fixSelfInterwiki(self, text):
"""
Interwiki links to the site itself are displayed like local links.
Remove their language code prefix.
"""
if not self.talkpage and pywikibot.calledModuleName() != 'interwiki':
interwikiR = re.compile(r'\[\[%s\s?:([^\[\]\n]*)\]\]'
% self.site.code)
text = interwikiR.sub(r'[[\1]]', text)
return text
def standardizePageFooter(self, text):
"""
Standardize page footer.
Makes sure that interwiki links, categories and star templates are
put to the correct position and into the right order. This combines the
old instances standardizeInterwiki and standardizeCategories
The page footer has the following section in that sequence:
1. categories
2. ## TODO: template beyond categories ##
3. additional information depending on local site policy
4. stars templates for featured and good articles
5. interwiki links
"""
starsList = [
u'bueno',
u'bom interwiki',
u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed',
u'destacado', u'destaca[tu]',
u'enllaç[ _]ad',
u'enllaz[ _]ad',
u'leam[ _]vdc',
u'legătură[ _]a[bcf]',
u'liamm[ _]pub',
u'lien[ _]adq',
u'lien[ _]ba',
u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt',
u'liên[ _]kết[ _]chọn[ _]lọc',
u'ligam[ _]adq',
u'ligazón[ _]a[bd]',
u'ligoelstara',
u'ligoleginda',
u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km',
u'link[ _]sm', u'linkfa',
u'na[ _]lotura',
u'nasc[ _]ar',
u'tengill[ _][úg]g',
u'ua',
u'yüm yg',
u'רא',
u'وصلة مقالة جيدة',
u'وصلة مقالة مختارة',
]
categories = None
interwikiLinks = None
allstars = []
# Pywikibot is no longer allowed to touch categories on the
# German Wikipedia. See
# https://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/1#Position_der_Personendaten_am_.22Artikelende.22
# ignoring nn-wiki of cause of the comment line above iw section
if not self.template and '{{Personendaten' not in text and \
'{{SORTIERUNG' not in text and '{{DEFAULTSORT' not in text and \
self.site.code not in ('et', 'it', 'bg', 'ru'):
categories = textlib.getCategoryLinks(text, site=self.site)
if not self.talkpage: # and pywikibot.calledModuleName() <> 'interwiki':
subpage = False
if self.template:
loc = None
try:
tmpl, loc = moved_links[self.site.code]
del tmpl
except KeyError:
pass
if loc is not None and loc in self.title:
subpage = True
interwikiLinks = textlib.getLanguageLinks(
text, insite=self.site, template_subpage=subpage)
# Removing the interwiki
text = textlib.removeLanguageLinks(text, site=self.site)
# Removing the stars' issue
starstext = textlib.removeDisabledParts(text)
for star in starsList:
regex = re.compile(r'(\{\{(?:template:|)%s\|.*?\}\}[\s]*)'
% star, re.I)
found = regex.findall(starstext)
if found != []:
text = regex.sub('', text)
allstars += found
# Adding categories
if categories:
# TODO: Sorting categories in alphabetic order.
# e.g. using categories.sort()
# TODO: Taking main cats to top
# for name in categories:
# if re.search(u"(.+?)\|(.{,1}?)",name.title()) or name.title()==name.title().split(":")[0]+title:
# categories.remove(name)
# categories.insert(0, name)
text = textlib.replaceCategoryLinks(text, categories,
site=self.site)
# Adding stars templates
if allstars:
text = text.strip() + self.site.family.interwiki_text_separator
allstars.sort()
for element in allstars:
text += '%s%s' % (element.strip(), config.line_separator)
pywikibot.log(u'%s' % element.strip())
# Adding the interwiki
if interwikiLinks:
text = textlib.replaceLanguageLinks(text, interwikiLinks,
site=self.site,
template=self.template,
template_subpage=subpage)
return text
def translateAndCapitalizeNamespaces(self, text):
"""Use localized namespace names."""
# arz uses english stylish codes
if self.site.sitename == 'wikipedia:arz':
return text
family = self.site.family
# wiki links aren't parsed here.
exceptions = ['nowiki', 'comment', 'math', 'pre']
for namespace in self.site.namespaces.values():
if namespace.id in (0, 2, 3):
# skip main (article) namespace
# skip user namespace, maybe gender is used
continue
# a clone is needed. Won't change the namespace dict
namespaces = list(namespace)
thisNs = namespaces.pop(0)
if namespace.id == 6 and family.name == 'wikipedia':
if self.site.code in ('en', 'fr') and \
MediaWikiVersion(self.site.version()) >= MediaWikiVersion('1.14'):
# do not change "Image" on en-wiki and fr-wiki
assert u'Image' in namespaces
namespaces.remove(u'Image')
if self.site.code == 'hu':
# do not change "Kép" on hu-wiki
assert u'Kép' in namespaces
namespaces.remove(u'Kép')
elif self.site.code == 'pt':
# bug 55242 should be implemented
continue
# lowerspaced and underscored namespaces
for i in range(len(namespaces)):
item = namespaces[i].replace(' ', '[ _]')
item = u'[%s%s]' % (item[0], item[0].lower()) + item[1:]
namespaces[i] = item
namespaces.append(first_lower(thisNs))
if thisNs and namespaces:
text = textlib.replaceExcept(
text,
r'\[\[\s*(%s) *:(?P<nameAndLabel>.*?)\]\]'
% '|'.join(namespaces),
r'[[%s:\g<nameAndLabel>]]' % thisNs,
exceptions)
return text
def translateMagicWords(self, text):
"""Use localized magic words."""
# not wanted at ru
# arz uses english stylish codes
if self.site.code not in ['arz', 'ru']:
exceptions = ['nowiki', 'comment', 'math', 'pre']
for magicWord in ['img_thumbnail', 'img_left', 'img_center',
'img_right', 'img_none', 'img_framed',
'img_frameless', 'img_border', 'img_upright', ]:
aliases = self.site.getmagicwords(magicWord)
if not aliases:
continue
text = textlib.replaceExcept(
text,
r'\[\[(?P<left>.+?:.+?\..+?\|) *(' + '|'.join(aliases) +
r') *(?P<right>(\|.*?)?\]\])',
r'[[\g<left>' + aliases[0] + r'\g<right>', exceptions)
return text
def cleanUpLinks(self, text):
# helper function which works on one link and either returns it
# unmodified, or returns a replacement.
def handleOneLink(match):
titleWithSection = match.group('titleWithSection')
label = match.group('label')
trailingChars = match.group('linktrail')
newline = match.group('newline')
if not self.site.isInterwikiLink(titleWithSection):
# The link looks like this:
# [[page_title|link_text]]trailing_chars
# We only work on namespace 0 because pipes and linktrails work
# differently for images and categories.
page = pywikibot.Page(pywikibot.Link(titleWithSection,
self.site))
try:
namespace = page.namespace()
except pywikibot.InvalidTitle:
return match.group()
if namespace == 0:
# Replace underlines by spaces, also multiple underlines
titleWithSection = re.sub('_+', ' ', titleWithSection)
# Remove double spaces
titleWithSection = re.sub(' +', ' ', titleWithSection)
# Remove unnecessary leading spaces from title,
# but remember if we did this because we eventually want
# to re-add it outside of the link later.
titleLength = len(titleWithSection)
titleWithSection = titleWithSection.lstrip()
hadLeadingSpaces = (len(titleWithSection) != titleLength)
hadTrailingSpaces = False
# Remove unnecessary trailing spaces from title,
# but remember if we did this because it may affect
# the linktrail and because we eventually want to
# re-add it outside of the link later.
if not trailingChars:
titleLength = len(titleWithSection)
titleWithSection = titleWithSection.rstrip()
hadTrailingSpaces = (len(titleWithSection) !=
titleLength)
# Convert URL-encoded characters to unicode
titleWithSection = url2unicode(titleWithSection,
encodings=self.site)
if titleWithSection == '':
# just skip empty links.
return match.group()
# Remove unnecessary initial and final spaces from label.
# Please note that some editors prefer spaces around pipes.
# (See [[en:Wikipedia:Semi-bots]]). We remove them anyway.
if label is not None:
# Remove unnecessary leading spaces from label,
# but remember if we did this because we want
# to re-add it outside of the link later.
labelLength = len(label)
label = label.lstrip()
hadLeadingSpaces = (len(label) != labelLength)
# Remove unnecessary trailing spaces from label,
# but remember if we did this because it affects
# the linktrail.
if not trailingChars:
labelLength = len(label)
label = label.rstrip()
hadTrailingSpaces = (len(label) != labelLength)
else:
label = titleWithSection
if trailingChars:
label += trailingChars
if titleWithSection == label or \
first_lower(titleWithSection) == label:
newLink = "[[%s]]" % label
# Check if we can create a link with trailing characters
# instead of a pipelink
elif (len(titleWithSection) <= len(label) and
label[:len(titleWithSection)] == titleWithSection and
re.sub(trailR, '',
label[len(titleWithSection):]) == ''):
newLink = "[[%s]]%s" % (label[:len(titleWithSection)],
label[len(titleWithSection):])
else:
# Try to capitalize the first letter of the title.
# Not useful for languages that don't capitalize nouns.
# TODO: Add a configuration variable for each site,
# which determines if the link target is written in
# uppercase
if self.site.sitename == 'wikipedia:de':
titleWithSection = first_upper(titleWithSection)
newLink = "[[%s|%s]]" % (titleWithSection, label)
# re-add spaces that were pulled out of the link.
# Examples:
# text[[ title ]]text -> text [[title]] text
# text[[ title | name ]]text -> text [[title|name]] text
# text[[ title |name]]text -> text[[title|name]]text
# text[[title| name]]text -> text [[title|name]]text
if hadLeadingSpaces and not newline:
newLink = ' ' + newLink
if hadTrailingSpaces:
newLink = newLink + ' '
if newline:
newLink = newline + newLink
return newLink
# don't change anything
return match.group()
trailR = re.compile(self.site.linktrail())
# The regular expression which finds links. Results consist of four groups:
# group <newline> depends whether the links starts with a new line.
# group <titleWithSection> is the page title and section, that is,
# everything before | or ]. It'll include the # to make life easier for us.
# group <label> is the alternative link title between | and ].
# group <linktrail> is the link trail after ]] which are part of the word.
# note that the definition of 'letter' varies from language to language.
linkR = re.compile(
r'(?P<newline>[\n]*)\[\[(?P<titleWithSection>[^\]\|]+)(\|(?P<label>[^\]\|]*))?\]\](?P<linktrail>' +
self.site.linktrail() + ')')
text = textlib.replaceExcept(text, linkR, handleOneLink,
['comment', 'math', 'nowiki', 'pre',
'startspace'])
return text
def resolveHtmlEntities(self, text):
ignore = [
38, # Ampersand (&)
39, # Single quotation mark (") - Bugzilla 24093
60, # Less than (<)
62, # Great than (>)
91, # Opening square bracket ([)
# - sometimes used intentionally inside links
93, # Closing square bracket (])
# - used intentionally inside links
124, # Vertical bar (|)
# - used intentionally in navigation bar templates on w:de
160, # Non-breaking space ( )
# - not supported by Firefox textareas
173, # Soft-hypen (­) - enable editing
8206, # Left-to-right mark (<r;)
8207, # Right-to-left mark (&rtl;)
]
if self.template:
ignore += [58]
text = pywikibot.html2unicode(text, ignore=ignore)
return text
def removeUselessSpaces(self, text):
"""Cleanup multiple or trailing spaces."""
multipleSpacesR = re.compile(' +')
spaceAtLineEndR = re.compile(' $')
exceptions = ['comment', 'math', 'nowiki', 'pre', 'startspace', 'table']
if self.site.sitename != 'wikipedia:cs':
exceptions.append('template')
text = textlib.replaceExcept(text, multipleSpacesR, ' ', exceptions)
text = textlib.replaceExcept(text, spaceAtLineEndR, '', exceptions)
return text
def removeNonBreakingSpaceBeforePercent(self, text):
"""
Remove a non-breaking space between number and percent sign.
Newer MediaWiki versions automatically place a non-breaking space in
front of a percent sign, so it is no longer required to place it
manually.
FIXME: which version should this be run on?
"""
text = textlib.replaceExcept(text, r'(\d) %', r'\1 %',
['timeline'])
return text
def cleanUpSectionHeaders(self, text):
"""
Add a space between the equal signs and the section title.
Example: ==Section title== becomes == Section title ==
NOTE: This space is recommended in the syntax help on the English and
German Wikipedia. It might be that it is not wanted on other wikis.
If there are any complaints, please file a bug report.
"""
return textlib.replaceExcept(
text,
r'(?m)^(={1,7}) *(?P<title>[^=]+?) *\1 *\r?\n',
r'\1 \g<title> \1%s' % config.LS,
['comment', 'math', 'nowiki', 'pre'])
def putSpacesInLists(self, text):
"""
Add a space between the * or # and the text.
NOTE: This space is recommended in the syntax help on the English,
German, and French Wikipedia. It might be that it is not wanted on other
wikis. If there are any complaints, please file a bug report.
"""
if not self.template:
exceptions = ['comment', 'math', 'nowiki', 'pre', 'source', 'template',
'timeline', self.site.redirectRegex()]
text = textlib.replaceExcept(
text,
r'(?m)^(?P<bullet>[:;]*(\*+|#+)[:;\*#]*)(?P<char>[^\s\*#:;].+?)',
r'\g<bullet> \g<char>',
exceptions)
return text
def replaceDeprecatedTemplates(self, text):
exceptions = ['comment', 'math', 'nowiki', 'pre']
if self.site.family.name in deprecatedTemplates and \
self.site.code in deprecatedTemplates[self.site.family.name]:
for template in deprecatedTemplates[
self.site.family.name][self.site.code]:
old = template[0]
new = template[1]
if new is None:
new = ''
else:
new = '{{%s}}' % new
if self.site.namespaces[10].case == 'first-letter':
old = '[' + old[0].upper() + old[0].lower() + ']' + old[1:]
text = textlib.replaceExcept(
text,
r'\{\{([mM][sS][gG]:)?%s(?P<parameters>\|[^}]+|)}}' % old,
new, exceptions)
return text
# from fixes.py
def fixSyntaxSave(self, text):
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source',
'startspace']
# link to the wiki working on
# TODO: disable this for difflinks and titled links,
# to prevent edits like this:
# https://de.wikipedia.org/w/index.php?title=Wikipedia%3aVandalismusmeldung&diff=103109563&oldid=103109271
# text = textlib.replaceExcept(text,
# r'\[https?://%s\.%s\.org/wiki/(?P<link>\S+)\s+(?P<title>.+?)\s?\]'
# % (self.site.code, self.site.family.name),
# r'[[\g<link>|\g<title>]]', exceptions)
# external link in double brackets
text = textlib.replaceExcept(
text,
r'\[\[(?P<url>https?://[^\]]+?)\]\]',
r'[\g<url>]', exceptions)
# external link starting with double bracket
text = textlib.replaceExcept(text,
r'\[\[(?P<url>https?://.+?)\]',
r'[\g<url>]', exceptions)
# external link and description separated by a dash, with
# whitespace in front of the dash, so that it is clear that
# the dash is not a legitimate part of the URL.
text = textlib.replaceExcept(
text,
r'\[(?P<url>https?://[^\|\] \r\n]+?) +\| *(?P<label>[^\|\]]+?)\]',
r'[\g<url> \g<label>]', exceptions)
# dash in external link, where the correct end of the URL can
# be detected from the file extension. It is very unlikely that
# this will cause mistakes.
extensions = [r'\.{0}'.format(ext)
for ext in ['pdf', 'html?', 'php', 'aspx?', 'jsp']]
text = textlib.replaceExcept(
text,
r'\[(?P<url>https?://[^\|\] ]+?(' + '|'.join(extensions) + r')) *'
r'\| *(?P<label>[^\|\]]+?)\]',
r'[\g<url> \g<label>]', exceptions)
return text
def fixHtml(self, text):
# Everything case-insensitive (?i)
# Keep in mind that MediaWiki automatically converts <br> to <br />
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source',
'startspace']
text = textlib.replaceExcept(text, r'(?i)<b>(.*?)</b>', r"'''\1'''",
exceptions)
text = textlib.replaceExcept(text, r'(?i)<strong>(.*?)</strong>',
r"'''\1'''", exceptions)
text = textlib.replaceExcept(text, r'(?i)<i>(.*?)</i>', r"''\1''",
exceptions)
text = textlib.replaceExcept(text, r'(?i)<em>(.*?)</em>', r"''\1''",
exceptions)
# horizontal line without attributes in a single line
text = textlib.replaceExcept(text, r'(?i)([\r\n])<hr[ /]*>([\r\n])',
r'\1----\2', exceptions)
# horizontal line with attributes; can't be done with wiki syntax
# so we only make it XHTML compliant
text = textlib.replaceExcept(text, r'(?i)<hr ([^>/]+?)>',
r'<hr \1 />',
exceptions)
# a header where only spaces are in the same line
for level in range(1, 7):
equals = '\\1%s \\2 %s\\3' % ("=" * level, "=" * level)
text = textlib.replaceExcept(
text,
r'(?i)([\r\n]) *<h%d> *([^<]+?) *</h%d> *([\r\n])'
% (level, level),
r'%s' % equals,
exceptions)
# TODO: maybe we can make the bot replace <p> tags with \r\n's.
return text
def fixReferences(self, text):
# See also https://en.wikipedia.org/wiki/User:AnomieBOT/source/tasks/OrphanReferenceFixer.pm
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source',
'startspace']
# it should be name = " or name=" NOT name ="
text = re.sub(r'(?i)<ref +name(= *| *=)"', r'<ref name="', text)
# remove empty <ref/>-tag
text = textlib.replaceExcept(text,
r'(?i)(<ref\s*/>|<ref *>\s*</ref>)',
r'', exceptions)
text = textlib.replaceExcept(text,
r'(?i)<ref\s+([^>]+?)\s*>\s*</ref>',
r'<ref \1/>', exceptions)
return text
def fixStyle(self, text):
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source',
'startspace']
# convert prettytable to wikitable class
if self.site.code in ('de', 'en'):
text = textlib.replaceExcept(text,
r'(class="[^"]*)prettytable([^"]*")',
r'\1wikitable\2', exceptions)
return text
def fixTypo(self, text):
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source',
'startspace', 'gallery', 'hyperlink', 'interwiki', 'link']
# change <number> ccm -> <number> cm³
text = textlib.replaceExcept(text, r'(\d)\s* ccm',
r'\1 ' + u'cm³', exceptions)
text = textlib.replaceExcept(text,
r'(\d)\s*ccm', r'\1 ' + u'cm³',
exceptions)
# Solve wrong Nº sign with °C or °F
# additional exception requested on fr-wiki for this stuff
pattern = re.compile(u'«.*?»', re.UNICODE)
exceptions.append(pattern)
text = textlib.replaceExcept(text, r'(\d)\s* ' + u'[º°]([CF])',
r'\1 ' + u'°' + r'\2', exceptions)
text = textlib.replaceExcept(text, r'(\d)\s*' + u'[º°]([CF])',
r'\1 ' + u'°' + r'\2', exceptions)
text = textlib.replaceExcept(text, u'º([CF])', u'°' + r'\1',
exceptions)
return text
def fixArabicLetters(self, text):
if self.site.code not in ['ckb', 'fa']:
return
exceptions = [
'gallery',
'hyperlink',
'interwiki',
# FIXME: but changes letters inside wikilinks
# 'link',
'math',
'pre',
'template',
'timeline',
'ref',
'source',
'startspace',
'inputbox',
]
# FIXME: use textlib.NON_LATIN_DIGITS
# valid digits
digits = {
'ckb': u'٠١٢٣٤٥٦٧٨٩',
'fa': u'۰۱۲۳۴۵۶۷۸۹',
}
faChrs = u'ءاآأإئؤبپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیةيك' + digits['fa']
new = digits.pop(self.site.code)
# This only works if there are only two items in digits dict
old = digits[digits.keys()[0]]
# do not change inside file links
namespaces = list(self.site.namespace(6, all=True))
pattern = re.compile(
u'\\[\\[(%s):.+?\\.\\w+? *(\\|((\\[\\[.*?\\]\\])|.)*)?\\]\\]'
% u'|'.join(namespaces),
re.UNICODE)
# not to let bot edits in latin content
exceptions.append(re.compile(u"[^%(fa)s] *?\"*? *?, *?[^%(fa)s]"
% {'fa': faChrs}))
exceptions.append(pattern)
text = textlib.replaceExcept(text, u',', u'،', exceptions)
if self.site.code == 'ckb':
text = textlib.replaceExcept(text,
u'\u0647([.\u060c_<\\]\\s])',
u'\u06d5\\1', exceptions)
text = textlib.replaceExcept(text, u'ه', u'ە', exceptions)
text = textlib.replaceExcept(text, u'ه', u'ھ', exceptions)
text = textlib.replaceExcept(text, u'ك', u'ک', exceptions)
text = textlib.replaceExcept(text, u'[ىي]', u'ی', exceptions)
return text
# FIXME: split this function into two.
# replace persian/arabic digits
# deactivated due to bug 55185
for i in range(0, 10):
text = textlib.replaceExcept(text, old[i], new[i], exceptions)
# do not change digits in class, style and table params
pattern = re.compile(r'\w+=(".+?"|\d+)', re.UNICODE)
exceptions.append(pattern)
# do not change digits inside html-tags
pattern = re.compile(u'<[/]*?[^</]+?[/]*?>', re.UNICODE)
exceptions.append(pattern)
exceptions.append('table') # exclude tables for now
# replace digits
for i in range(0, 10):
text = textlib.replaceExcept(text, str(i), new[i], exceptions)
return text
def commonsfiledesc(self, text):
"""
Clean up file descriptions on the Wikimedia Commons.
It is working according to [1] and works only on pages in the file
namespace on the Wikimedia Commons.
[1]: https://commons.wikimedia.org/wiki/Commons:Tools/pywiki_file_description_cleanup
"""
if self.site.sitename != 'commons:commons' or self.namespace == 6:
return
# section headers to {{int:}} versions
exceptions = ['comment', 'includeonly', 'math', 'noinclude', 'nowiki',
'pre', 'source', 'ref', 'timeline']
text = textlib.replaceExcept(text,
r"([\r\n]|^)\=\= *Summary *\=\=",
r"\1== {{int:filedesc}} ==",
exceptions, True)
text = textlib.replaceExcept(
text,
r"([\r\n])\=\= *\[\[Commons:Copyright tags\|Licensing\]\]: *\=\=",
r"\1== {{int:license-header}} ==", exceptions, True)
text = textlib.replaceExcept(
text,
r"([\r\n])\=\= *(Licensing|License information|{{int:license}}) *\=\=",
r"\1== {{int:license-header}} ==", exceptions, True)
# frequent field values to {{int:}} versions
text = textlib.replaceExcept(
text,
r'([\r\n]\|[Ss]ource *\= *)'
r'(?:[Oo]wn work by uploader|[Oo]wn work|[Ee]igene [Aa]rbeit) *([\r\n])',
r'\1{{own}}\2', exceptions, True)
text = textlib.replaceExcept(
text,
r'(\| *Permission *\=) *(?:[Ss]ee below|[Ss]iehe unten) *([\r\n])',
r'\1\2', exceptions, True)
# added to transwikied pages
text = textlib.replaceExcept(text, r'__NOTOC__', '', exceptions, True)
# tracker element for js upload form
text = textlib.replaceExcept(
text,
r'<!-- *{{ImageUpload\|(?:full|basic)}} *-->',
'', exceptions[1:], True)
text = textlib.replaceExcept(text, r'{{ImageUpload\|(?:basic|full)}}',
'', exceptions, True)
# duplicated section headers
text = textlib.replaceExcept(
text,
r'([\r\n]|^)\=\= *{{int:filedesc}} *\=\=(?:[\r\n ]*)\=\= *{{int:filedesc}} *\=\=',
r'\1== {{int:filedesc}} ==', exceptions, True)
text = textlib.replaceExcept(
text,
r'([\r\n]|^)\=\= *{{int:license-header}} *\=\=(?:[\r\n ]*)'
r'\=\= *{{int:license-header}} *\=\=',
r'\1== {{int:license-header}} ==', exceptions, True)
return text
def fix_ISBN(self, text):
"""Hyphenate ISBN numbers."""
return _reformat_ISBNs(
text, strict=False if self.ignore == CANCEL_MATCH else True)
|
{
"content_hash": "4fc56b64511cb75fdbdb18fde7a20ea5",
"timestamp": "",
"source": "github",
"line_count": 955,
"max_line_length": 127,
"avg_line_length": 42.39057591623037,
"alnum_prop": 0.5199713459970852,
"repo_name": "trishnaguha/pywikibot-core",
"id": "e6b5213a36b343f811c2c5d0c70dbe7dea596e11",
"size": "40742",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pywikibot/cosmetic_changes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "3821251"
}
],
"symlink_target": ""
}
|
import errno
import socket
from utils import require_user
from utils import get_simple_bpf_program
from utils import skip_if_not_supported
from pyroute2 import IPRoute
from pyroute2 import protocols
from pyroute2.common import uifname
from pyroute2.netlink import NetlinkError
from pyroute2.iproute import RTM_NEWQDISC
from pyroute2.iproute import RTM_NEWTFILTER
from pyroute2.iproute import RTM_NEWTCLASS
from pyroute2.iproute import TC_H_INGRESS
from nose.plugins.skip import SkipTest
def try_qd(qd, call, *argv, **kwarg):
try:
call(*argv, **kwarg)
except NetlinkError as e:
# code 2 'no such file or directory)
if e.code == errno.ENOENT:
raise SkipTest('missing traffic control <%s>' % (qd))
raise
class BasicTest(object):
def setup(self):
require_user('root')
self.ip = IPRoute()
self.ifname = uifname()
self.ip.link_create(ifname=self.ifname, kind='dummy')
self.interface = self.ip.link_lookup(ifname=self.ifname)[0]
def teardown(self):
self.ip.link('delete', index=self.interface)
self.ip.close()
def get_qdiscs(self):
return [x for x in self.ip.get_qdiscs() if
x['index'] == self.interface]
def get_qdisc(self):
# get qdiscs list and filter out our interface
qds = self.get_qdiscs()
if qds:
return qds[0]
else:
return None
class TestIngress(BasicTest):
def test_simple(self):
self.ip.tc(RTM_NEWQDISC, 'ingress', self.interface, 0xffff0000)
qds = self.get_qdisc()
# assert the list is not empty
assert qds
# assert there is the ingress queue
assert qds.get_attr('TCA_KIND') == 'ingress'
# assert it has proper handle and parent
assert qds['handle'] == 0xffff0000
assert qds['parent'] == TC_H_INGRESS
def test_filter(self):
self.test_simple()
self.ip.tc(RTM_NEWTFILTER, 'u32', self.interface, 0,
protocol=socket.AF_INET,
parent=0xffff0000,
action='drop',
target=0x1,
rate='10kbit',
burst=10240,
limit=0,
prio=50,
keys=['0x0/0x0+12'])
fls = self.ip.get_filters(index=self.interface, parent=0xffff0000)
# assert there are filters
assert fls
# assert there is one police rule:
prs = [x for x in fls
if x.get_attr('TCA_OPTIONS') is not None and
(x.get_attr('TCA_OPTIONS').get_attr('TCA_U32_POLICE')
is not None or
x.get_attr('TCA_OPTIONS').get_attr('TCA_U32_ACT')
is not None)][0]
# assert the police rule has specified parameters
options = prs.get_attr('TCA_OPTIONS')
police_u32 = options.get_attr('TCA_U32_POLICE')
# on modern kernels there is no TCA_U32_POLICE under
# TCA_OPTIONS, but there is TCA_U32_ACT
if police_u32 is None:
police_u32 = options.get_attr('TCA_U32_ACT').\
get_attr('TCA_ACT_PRIO_0').\
get_attr('TCA_ACT_OPTIONS')
police_tbf = police_u32.get_attr('TCA_POLICE_TBF')
assert police_tbf['rate'] == 1250
assert police_tbf['mtu'] == 2040
@skip_if_not_supported
def test_bpf_filter(self):
self.test_simple()
fd = get_simple_bpf_program()
if fd == -1:
# to get bpf filter working, one should have:
# kernel >= 4.1
# CONFIG_EXPERT=y
# CONFIG_BPF_SYSCALL=y
# CONFIG_NET_CLS_BPF=m/y
#
# see `grep -rn BPF_PROG_TYPE_SCHED_CLS kernel_sources`
raise SkipTest('bpf syscall error')
self.ip.tc(RTM_NEWTFILTER, 'bpf', self.interface, 0,
fd=fd, name='my_func', parent=0xffff0000,
action='ok', classid=1)
fls = self.ip.get_filters(index=self.interface, parent=0xffff0000)
assert fls
acts = [x for x in fls
if x.get_attr('TCA_OPTIONS') is not None and
(x.get_attr('TCA_OPTIONS').get_attr('TCA_BPF_ACT')
is not None)][0]
options = acts.get_attr('TCA_OPTIONS')
parms = options.get_attr('TCA_BPF_ACT').\
get_attr('TCA_ACT_PRIO_1').\
get_attr('TCA_ACT_OPTIONS').\
get_attr('TCA_GACT_PARMS')
assert parms['action'] == 0
@skip_if_not_supported
def test_bpf_filter_policer(self):
self.test_simple()
fd = get_simple_bpf_program()
if fd == -1:
# see comment above about kernel requirements
raise SkipTest('bpf syscall error')
self.ip.tc(RTM_NEWTFILTER, 'bpf', self.interface, 0,
fd=fd, name='my_func', parent=0xffff0000,
action='ok', classid=1, rate='10kbit',
burst=10240, mtu=2040)
fls = self.ip.get_filters(index=self.interface, parent=0xffff0000)
# assert the supplied policer is returned to us intact
plcs = [x for x in fls
if x.get_attr('TCA_OPTIONS') is not None and
(x.get_attr('TCA_OPTIONS').get_attr('TCA_BPF_POLICE')
is not None)][0]
options = plcs.get_attr('TCA_OPTIONS')
police = options.get_attr('TCA_BPF_POLICE').\
get_attr('TCA_POLICE_TBF')
assert police['rate'] == 1250
assert police['mtu'] == 2040
class TestPfifo(BasicTest):
def test_pfifo(self):
try_qd('pfifo_fast', self.ip.tc,
RTM_NEWQDISC, 'pfifo_fast', self.interface, 0)
qds = self.get_qdisc()
assert qds
assert qds.get_attr('TCA_KIND') == 'pfifo_fast'
assert isinstance(qds.get_attr('TCA_OPTIONS')['priomap'], tuple)
class TestSfq(BasicTest):
def test_sfq(self):
try_qd('sfq', self.ip.tc,
RTM_NEWQDISC, 'sfq', self.interface, 0, perturb=10)
qds = self.get_qdisc()
assert qds
assert qds.get_attr('TCA_KIND') == 'sfq'
assert qds.get_attr('TCA_OPTIONS')['perturb_period'] == 10
class TestTbf(BasicTest):
def test_tbf(self):
try_qd('tbf', self.ip.tc,
RTM_NEWQDISC, 'tbf', self.interface, 0,
rate='220kbit',
latency='50ms',
burst=1540)
qds = self.get_qdisc()
assert qds
assert qds.get_attr('TCA_KIND') == 'tbf'
parms = qds.get_attr('TCA_OPTIONS').get_attr('TCA_TBF_PARMS')
assert parms
assert parms['rate'] == 27500
class TestHtb(BasicTest):
def test_htb(self):
# 8<-----------------------------------------------------
# root queue, '1:0' handle notation
try_qd('htb', self.ip.tc,
RTM_NEWQDISC, 'htb', self.interface, '1:',
default='20:0')
qds = self.get_qdiscs()
assert len(qds) == 1
assert qds[0].get_attr('TCA_KIND') == 'htb'
# 8<-----------------------------------------------------
# classes, both string and int handle notation
try_qd('htb', self.ip.tc,
RTM_NEWTCLASS, 'htb', self.interface, '1:1',
parent='1:0',
rate='256kbit',
burst=1024 * 6)
try_qd('htb', self.ip.tc,
RTM_NEWTCLASS, 'htb', self.interface, 0x10010,
parent=0x10001,
rate='192kbit',
burst=1024 * 6,
prio=1)
try_qd('htb', self.ip.tc,
RTM_NEWTCLASS, 'htb', self.interface, '1:20',
parent='1:1',
rate='128kbit',
burst=1024 * 6,
prio=2)
cls = self.ip.get_classes(index=self.interface)
assert len(cls) == 3
# 8<-----------------------------------------------------
# leaves, both string and int handle notation
try_qd('sfq', self.ip.tc,
RTM_NEWQDISC, 'sfq', self.interface, '10:',
parent='1:10',
perturb=10)
try_qd('sfq', self.ip.tc,
RTM_NEWQDISC, 'sfq', self.interface, 0x200000,
parent=0x10020,
perturb=10)
qds = self.get_qdiscs()
types = set([x.get_attr('TCA_KIND') for x in qds])
assert types == set(('htb', 'sfq'))
# 8<-----------------------------------------------------
# filters, both string and int handle notation
#
# Please note, that u32 filter requires ethernet protocol
# numbers, as defined in protocols module. Do not provide
# here socket.AF_INET and so on.
#
try_qd('u32', self.ip.tc,
RTM_NEWTFILTER, 'u32', self.interface, '0:0',
parent='1:0',
prio=10,
protocol=protocols.ETH_P_IP,
target='1:10',
keys=['0x0006/0x00ff+8', '0x0000/0xffc0+2'])
try_qd('u32', self.ip.tc,
RTM_NEWTFILTER, 'u32', self.interface, 0,
parent=0x10000,
prio=10,
protocol=protocols.ETH_P_IP,
target=0x10020,
keys=['0x5/0xf+0', '0x10/0xff+33'])
# 2 filters + 2 autogenerated
fls = self.ip.get_filters(index=self.interface)
assert len(fls) == 4
def test_replace(self):
self.test_htb()
# change class
self.ip.tc('replace-class', 'htb', self.interface,
handle=0x10010, parent=0x10001,
rate='102kbit',
burst=1024 * 6,
prio=3)
clss = self.ip.get_classes(index=self.interface)
for cls in clss:
if cls['handle'] == 0x10010:
break
else:
raise Exception('target class not found')
opts = cls.get_attr('TCA_OPTIONS')
params = opts.get_attr('TCA_HTB_PARMS')
assert params['prio'] == 3
assert params['quantum'] * 8 == 10200
|
{
"content_hash": "13ae56b5f67cd511ea932f99648f3a9a",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 74,
"avg_line_length": 35.651408450704224,
"alnum_prop": 0.5248395061728395,
"repo_name": "nazarewk/pyroute2",
"id": "fb38a2dd062b17935390b957a81bd415c6e1e5d2",
"size": "10125",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/general/test_tc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "413"
},
{
"name": "C",
"bytes": "4259"
},
{
"name": "Makefile",
"bytes": "4163"
},
{
"name": "Python",
"bytes": "607649"
},
{
"name": "Shell",
"bytes": "1914"
}
],
"symlink_target": ""
}
|
"""
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print "consensus score: {:.3f}".format(score)
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
|
{
"content_hash": "e70d3be15356ec99b138b98f09cf9737",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 70,
"avg_line_length": 32.129629629629626,
"alnum_prop": 0.7170028818443804,
"repo_name": "fspaolo/scikit-learn",
"id": "00c4cc474dd3df7d756b463543a24d07f5a9a5d5",
"size": "1735",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "examples/bicluster/plot_spectral_coclustering.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import sys, os, re, shutil
import argparse
from importlib import import_module
from cog.config import Profiles
path = os.path.dirname(os.path.abspath(__file__))
def find_subcommands():
for filename in os.listdir(path + os.sep + 'cli'):
name, ext = os.path.splitext(filename)
if ext.endswith('.py') and not (name.endswith('_argparser') or name == '__init__'):
yield name
def run(command):
module = 'cog.cli.%s' % command
command = import_module(module)
sys.exit(command.main())
def usage(commands, profiles):
print 'cog, a flexible LDAP directory manager'
print
print 'Usage: cog [-p|--profile <profile>] [-h|--help] command [options]'
print ' for more details run ‘cog command --help’ and'
print ' ‘cog command subcommand --help’'
print ' available profiles: %s and %s.' % (', '.join(profiles[:-1]), profiles[-1])
print ' available commands: %s and %s.' % (', '.join(commands[:-1]), commands[-1])
print
print 'Command summary:'
parser = dict()
for command in commands:
parser[command] = __import__('cog.cli.%s_argparser' % command, globals(), locals(), ['tool_parser']).tool_parser
parser[command].prog = 'cog %s' % command
parser[command].add_help = False
print parser[command].format_usage()[6:-1]
print
def make_user_config():
user_dir = os.environ['HOME'] + os.sep + '.cog'
conf_dir = '/etc/cog'
if not os.path.exists(user_dir):
os.makedirs(user_dir, mode=0750)
shutil.copyfile(conf_dir + os.sep + 'examples/settings.local', user_dir + os.sep + 'settings')
def main():
profiles = Profiles()
make_user_config()
subcommands = [command for command in find_subcommands()]
partial_parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False
)
partial_parser.add_argument("-p", "--profile", choices=profiles.list(), dest="profile")
args, remaining = partial_parser.parse_known_args()
profiles.use(args.profile)
if remaining:
command = remaining.pop(0)
sys.argv = ['cog %s' % command] + remaining
if len(sys.argv) < 2:
usage(subcommands, profiles.list())
else:
if command in subcommands:
run(command)
if __name__ == '__main__':
main()
|
{
"content_hash": "aebe57fba929e371d9508b4035557fe5",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 120,
"avg_line_length": 35.55882352941177,
"alnum_prop": 0.6116625310173698,
"repo_name": "Demonware/cog",
"id": "b3977e5a1ef914a60a227ef9adb6c9c02c26f8de",
"size": "2626",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "cog/cmd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "60784"
},
{
"name": "Shell",
"bytes": "562"
}
],
"symlink_target": ""
}
|
"""
Generic support for objects with full-featured Parameters and
messaging.
"""
import copy
import re
import sys
import inspect
import random
import numbers
import operator
from collections import namedtuple, OrderedDict
from operator import itemgetter,attrgetter
from types import FunctionType
from functools import partial, wraps, reduce
import logging
from contextlib import contextmanager
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL
try:
# In case the optional ipython module is unavailable
from .ipython import ParamPager
param_pager = ParamPager(metaclass=True) # Generates param description
except:
param_pager = None
VERBOSE = INFO - 1
logging.addLevelName(VERBOSE, "VERBOSE")
# Get the appropriate logging.Logger instance. If `logger` is None, a
# logger named `"param"` will be instantiated. If `name` is set, a descendant
# logger with the name ``"param.<name>"`` is returned (or
# ``logger.name + ".<name>"``)
logger = None
def get_logger(name=None):
if logger is None:
root_logger = logging.getLogger('param')
if not root_logger.handlers:
root_logger.setLevel(logging.INFO)
formatter = logging.Formatter(
fmt='%(levelname)s:%(name)s: %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
root_logger.addHandler(handler)
else:
root_logger = logger
if name is None:
return root_logger
else:
return logging.getLogger(root_logger.name + '.' + name)
# Indicates whether warnings should be raised as errors, stopping
# processing.
warnings_as_exceptions = False
docstring_signature = True # Add signature to class docstrings
docstring_describe_params = True # Add parameter description to class
# docstrings (requires ipython module)
object_count = 0
warning_count = 0
@contextmanager
def logging_level(level):
"""
Temporarily modify param's logging level.
"""
level = level.upper()
levels = [DEBUG, INFO, WARNING, ERROR, CRITICAL, VERBOSE]
level_names = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', 'VERBOSE']
if level not in level_names:
raise Exception("Level %r not in %r" % (level, levels))
param_logger = get_logger()
logging_level = param_logger.getEffectiveLevel()
param_logger.setLevel(levels[level_names.index(level)])
try:
yield None
finally:
param_logger.setLevel(logging_level)
@contextmanager
def batch_watch(parameterized, run=True):
"""
Context manager to batch watcher events on a parameterized object.
The context manager will queue any events triggered by setting a
parameter on the supplied parameterized object and dispatch them
all at once when the context manager exits. If run=False the
queued events are not dispatched and should be processed manually.
"""
BATCH_WATCH = parameterized.param._BATCH_WATCH
parameterized.param._BATCH_WATCH = True
try:
yield
finally:
parameterized.param._BATCH_WATCH = BATCH_WATCH
if run and not BATCH_WATCH:
parameterized.param._batch_call_watchers()
def classlist(class_):
"""
Return a list of the class hierarchy above (and including) the given class.
Same as inspect.getmro(class_)[::-1]
"""
return inspect.getmro(class_)[::-1]
def descendents(class_):
"""
Return a list of the class hierarchy below (and including) the given class.
The list is ordered from least- to most-specific. Can be useful for
printing the contents of an entire class hierarchy.
"""
assert isinstance(class_,type)
q = [class_]
out = []
while len(q):
x = q.pop(0)
out.insert(0,x)
for b in x.__subclasses__():
if b not in q and b not in out:
q.append(b)
return out[::-1]
def get_all_slots(class_):
"""
Return a list of slot names for slots defined in class_ and its
superclasses.
"""
# A subclass's __slots__ attribute does not contain slots defined
# in its superclass (the superclass' __slots__ end up as
# attributes of the subclass).
all_slots = []
parent_param_classes = [c for c in classlist(class_)[1::]]
for c in parent_param_classes:
if hasattr(c,'__slots__'):
all_slots+=c.__slots__
return all_slots
def get_occupied_slots(instance):
"""
Return a list of slots for which values have been set.
(While a slot might be defined, if a value for that slot hasn't
been set, then it's an AttributeError to request the slot's
value.)
"""
return [slot for slot in get_all_slots(type(instance))
if hasattr(instance,slot)]
def all_equal(arg1,arg2):
"""
Return a single boolean for arg1==arg2, even for numpy arrays
using element-wise comparison.
Uses all(arg1==arg2) for sequences, and arg1==arg2 otherwise.
If both objects have an '_infinitely_iterable' attribute, they are
not be zipped together and are compared directly instead.
"""
if all(hasattr(el, '_infinitely_iterable') for el in [arg1,arg2]):
return arg1==arg2
try:
return all(a1 == a2 for a1, a2 in zip(arg1, arg2))
except TypeError:
return arg1==arg2
# For Python 2 compatibility.
#
# The syntax to use a metaclass changed incompatibly between 2 and
# 3. The add_metaclass() class decorator below creates a class using a
# specified metaclass in a way that works on both 2 and 3. For 3, can
# remove this decorator and specify metaclasses in a simpler way
# (https://docs.python.org/3/reference/datamodel.html#customizing-class-creation)
#
# Code from six (https://bitbucket.org/gutworth/six; version 1.4.1).
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class bothmethod(object): # pylint: disable-msg=R0903
"""
'optional @classmethod'
A decorator that allows a method to receive either the class
object (if called on the class) or the instance object
(if called on the instance) as its first argument.
Code (but not documentation) copied from:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/523033.
"""
# pylint: disable-msg=R0903
def __init__(self, func):
self.func = func
# i.e. this is also a non-data descriptor
def __get__(self, obj, type_=None):
if obj is None:
return wraps(self.func)(partial(self.func, type_))
else:
return wraps(self.func)(partial(self.func, obj))
def _getattrr(obj, attr, *args):
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return reduce(_getattr, [obj] + attr.split('.'))
# (thought I was going to have a few decorators following this pattern)
def accept_arguments(f):
@wraps(f)
def _f(*args, **kwargs):
return lambda actual_f: f(actual_f, *args, **kwargs)
return _f
def no_instance_params(cls):
"""
Disables instance parameters on the class
"""
cls._disable_instance__params = True
return cls
def instance_descriptor(f):
# If parameter has an instance Parameter delegate setting
def _f(self, obj, val):
instance_param = getattr(obj, '_instance__params', {}).get(self.name)
if instance_param is not None and self is not instance_param:
instance_param.__set__(obj, val)
return
return f(self, obj, val)
return _f
@accept_arguments
def depends(func, *dependencies, **kw):
"""
Annotates a Parameterized method to express its dependencies.
The specified dependencies can be either be Parameters of this
class, or Parameters of subobjects (Parameterized objects that
are values of this object's parameters). Dependencies can either
be on Parameter values, or on other metadata about the Parameter.
"""
# python3 would allow kw-only args
# (i.e. "func,*dependencies,watch=False" rather than **kw and the check below)
watch = kw.pop("watch",False)
assert len(kw)==0, "@depends accepts only 'watch' kw"
# TODO: rename dinfo
_dinfo = getattr(func, '_dinfo', {})
_dinfo.update({'dependencies': dependencies,
'watch': watch})
@wraps(func)
def _depends(*args,**kw):
return func(*args,**kw)
# storing here risks it being tricky to find if other libraries
# mess around with methods
_depends._dinfo = _dinfo
return _depends
@accept_arguments
def output(func, *output, **kw):
"""
output allows annotating a method on a Parameterized class to
declare that it returns an output of a specific type. The outputs
of a Parameterized class can be queried using the
Parameterized.param.outputs method. By default the output will
inherit the method name but a custom name can be declared by
expressing the Parameter type using a keyword argument. Declaring
multiple return types using keywords is only supported in Python >= 3.6.
The simplest declaration simply declares the method returns an
object without any type guarantees, e.g.:
@output()
If a specific parameter type is specified this is a declaration
that the method will return a value of that type, e.g.:
@output(param.Number())
To override the default name of the output the type may be declared
as a keyword argument, e.g.:
@output(custom_name=param.Number())
Multiple outputs may be declared using keywords mapping from
output name to the type for Python >= 3.6 or using tuples of the
same format, which is supported for earlier versions, i.e. these
two declarations are equivalent:
@output(number=param.Number(), string=param.String())
@output(('number', param.Number()), ('string', param.String()))
output also accepts Python object types which will be upgraded to
a ClassSelector, e.g.:
@output(int)
"""
if output:
outputs = []
for i, out in enumerate(output):
i = i if len(output) > 1 else None
if isinstance(out, tuple) and len(out) == 2 and isinstance(out[0], str):
outputs.append(out+(i,))
elif isinstance(out, str):
outputs.append((out, Parameter(), i))
else:
outputs.append((None, out, i))
elif kw:
py_major = sys.version_info.major
py_minor = sys.version_info.minor
if (py_major < 3 or (py_major == 3 and py_minor < 6)) and len(kw) > 1:
raise ValueError('Multiple output declaration using keywords '
'only supported in Python >= 3.6.')
# (requires keywords to be kept ordered, which was not true in previous versions)
outputs = [(name, otype, i if len(kw) > 1 else None)
for i, (name, otype) in enumerate(kw.items())]
else:
outputs = [(None, Parameter(), None)]
names, processed = [], []
for name, otype, i in outputs:
if isinstance(otype, type):
if issubclass(otype, Parameter):
otype = otype()
else:
from .import ClassSelector
otype = ClassSelector(class_=otype)
elif isinstance(otype, tuple) and all(isinstance(t, type) for t in otype):
from .import ClassSelector
otype = ClassSelector(class_=otype)
if not isinstance(otype, Parameter):
raise ValueError('output type must be declared with a Parameter class, '
'instance or a Python object type.')
processed.append((name, otype, i))
names.append(name)
if len(set(names)) != len(names):
raise ValueError('When declaring multiple outputs each value '
'must be unique.')
_dinfo = getattr(func, '_dinfo', {})
_dinfo.update({'outputs': processed})
@wraps(func)
def _output(*args,**kw):
return func(*args,**kw)
_output._dinfo = _dinfo
return _output
def _params_depended_on(minfo):
params = []
dinfo = getattr(minfo.method,"_dinfo", {})
for d in dinfo.get('dependencies', list(minfo.cls.param)):
things = (minfo.inst or minfo.cls).param._spec_to_obj(d)
for thing in things:
if isinstance(thing,PInfo):
params.append(thing)
else:
params += _params_depended_on(thing)
return params
def _m_caller(self,n):
return lambda event: getattr(self,n)()
PInfo = namedtuple("PInfo","inst cls name pobj what")
MInfo = namedtuple("MInfo","inst cls name method")
Event = namedtuple("Event","what name obj cls old new type")
Watcher = namedtuple("Watcher","inst cls fn mode onlychanged parameter_names what")
class ParameterMetaclass(type):
"""
Metaclass allowing control over creation of Parameter classes.
"""
def __new__(mcs,classname,bases,classdict):
# store the class's docstring in __classdoc
if '__doc__' in classdict:
classdict['__classdoc']=classdict['__doc__']
# when asking for help on Parameter *object*, return the doc
# slot
classdict['__doc__']=property(attrgetter('doc'))
# To get the benefit of slots, subclasses must themselves define
# __slots__, whether or not they define attributes not present in
# the base Parameter class. That's because a subclass will have
# a __dict__ unless it also defines __slots__.
if '__slots__' not in classdict:
classdict['__slots__']=[]
return type.__new__(mcs,classname,bases,classdict)
def __getattribute__(mcs,name):
if name=='__doc__':
# when asking for help on Parameter *class*, return the
# stored class docstring
return type.__getattribute__(mcs,'__classdoc')
else:
return type.__getattribute__(mcs,name)
# CEBALERT: we break some aspects of slot handling for Parameter and
# Parameterized. The __new__ methods in the metaclasses for those two
# classes omit to handle the case where __dict__ is passed in
# __slots__ (and they possibly omit other things too). Additionally,
# various bits of code in the Parameterized class assumes that all
# Parameterized instances have a __dict__, but I'm not sure that's
# guaranteed to be true (although it's true at the moment).
# CB: we could maybe reduce the complexity by doing something to allow
# a parameter to discover things about itself when created (would also
# allow things like checking a Parameter is owned by a
# Parameterized). I have some vague ideas about what to do.
@add_metaclass(ParameterMetaclass)
class Parameter(object):
"""
An attribute descriptor for declaring parameters.
Parameters are a special kind of class attribute. Setting a
Parameterized class attribute to be a Parameter instance causes
that attribute of the class (and the class's instances) to be
treated as a Parameter. This allows special behavior, including
dynamically generated parameter values, documentation strings,
constant and read-only parameters, and type or range checking at
assignment time.
For example, suppose someone wants to define two new kinds of
objects Foo and Bar, such that Bar has a parameter delta, Foo is a
subclass of Bar, and Foo has parameters alpha, sigma, and gamma
(and delta inherited from Bar). She would begin her class
definitions with something like this:
class Bar(Parameterized):
delta = Parameter(default=0.6, doc='The difference between steps.')
...
class Foo(Bar):
alpha = Parameter(default=0.1, doc='The starting value.')
sigma = Parameter(default=0.5, doc='The standard deviation.',
constant=True)
gamma = Parameter(default=1.0, doc='The ending value.')
...
Class Foo would then have four parameters, with delta defaulting
to 0.6.
Parameters have several advantages over plain attributes:
1. Parameters can be set automatically when an instance is
constructed: The default constructor for Foo (and Bar) will
accept arbitrary keyword arguments, each of which can be used
to specify the value of a Parameter of Foo (or any of Foo's
superclasses). E.g., if a script does this:
myfoo = Foo(alpha=0.5)
myfoo.alpha will return 0.5, without the Foo constructor
needing special code to set alpha.
If Foo implements its own constructor, keyword arguments will
still be accepted if the constructor accepts a dictionary of
keyword arguments (as in ``def __init__(self,**params):``), and
then each class calls its superclass (as in
``super(Foo,self).__init__(**params)``) so that the
Parameterized constructor will process the keywords.
2. A Parameterized class need specify only the attributes of a
Parameter whose values differ from those declared in
superclasses; the other values will be inherited. E.g. if Foo
declares
delta = Parameter(default=0.2)
the default value of 0.2 will override the 0.6 inherited from
Bar, but the doc will be inherited from Bar.
3. The Parameter descriptor class can be subclassed to provide
more complex behavior, allowing special types of parameters
that, for example, require their values to be numbers in
certain ranges, generate their values dynamically from a random
distribution, or read their values from a file or other
external source.
4. The attributes associated with Parameters provide enough
information for automatically generating property sheets in
graphical user interfaces, allowing Parameterized instances to
be edited by users.
Note that Parameters can only be used when set as class attributes
of Parameterized classes. Parameters used as standalone objects,
or as class attributes of non-Parameterized classes, will not have
the behavior described here.
"""
# Because they implement __get__ and __set__, Parameters are known
# as 'descriptors' in Python; see "Implementing Descriptors" and
# "Invoking Descriptors" in the 'Customizing attribute access'
# section of the Python reference manual:
# http://docs.python.org/ref/attribute-access.html
#
# Overview of Parameters for programmers
# ======================================
#
# Consider the following code:
#
#
# class A(Parameterized):
# p = Parameter(default=1)
#
# a1 = A()
# a2 = A()
#
#
# * a1 and a2 share one Parameter object (A.__dict__['p']).
#
# * The default (class) value of p is stored in this Parameter
# object (A.__dict__['p'].default).
#
# * If the value of p is set on a1 (e.g. a1.p=2), a1's value of p
# is stored in a1 itself (a1.__dict__['_p_param_value'])
#
# * When a1.p is requested, a1.__dict__['_p_param_value'] is
# returned. When a2.p is requested, '_p_param_value' is not
# found in a2.__dict__, so A.__dict__['p'].default (i.e. A.p) is
# returned instead.
#
#
# Be careful when referring to the 'name' of a Parameter:
#
# * A Parameterized class has a name for the attribute which is
# being represented by the Parameter ('p' in the example above);
# in the code, this is called the 'attrib_name'.
#
# * When a Parameterized instance has its own local value for a
# parameter, it is stored as '_X_param_value' (where X is the
# attrib_name for the Parameter); in the code, this is called
# the internal_name.
# So that the extra features of Parameters do not require a lot of
# overhead, Parameters are implemented using __slots__ (see
# http://www.python.org/doc/2.4/ref/slots.html). Instead of having
# a full Python dictionary associated with each Parameter instance,
# Parameter instances have an enumerated list (named __slots__) of
# attributes, and reserve just enough space to store these
# attributes. Using __slots__ requires special support for
# operations to copy and restore Parameters (e.g. for Python
# persistent storage pickling); see __getstate__ and __setstate__.
__slots__ = ['name','_internal_name','default','doc',
'precedence','instantiate','constant','readonly',
'pickle_default_value','allow_None', 'per_instance',
'watchers', 'owner', '_label']
# Note: When initially created, a Parameter does not know which
# Parameterized class owns it, nor does it know its names
# (attribute name, internal name). Once the owning Parameterized
# class is created, owner, name, and _internal_name are
# set.
def __init__(self,default=None,doc=None,label=None,precedence=None, # pylint: disable-msg=R0913
instantiate=False,constant=False,readonly=False,
pickle_default_value=True, allow_None=False,
per_instance=True):
"""
Initialize a new Parameter object: store the supplied attributes.
default: the owning class's value for the attribute
represented by this Parameter.
precedence is a value, usually in the range 0.0 to 1.0, that
allows the order of Parameters in a class to be defined (for
e.g. in GUI menus). A negative precedence indicates a
parameter that should be hidden in e.g. GUI menus.
default, doc, and precedence default to None. This is to allow
inheritance of Parameter slots (attributes) from the owning-class'
class hierarchy (see ParameterizedMetaclass).
per_instance defaults to True and controls whether a new
Parameter instance can be created for every Parameterized
instance. If False, all instances of a Parameterized class
will share the same parameter object, including all validation
attributes.
In rare cases where the default value should not be pickled,
set pickle_default_value=False (e.g. for file search paths).
"""
self.name = None
self._internal_name = None
self.owner = None
self._label = label
self.precedence = precedence
self.default = default
self.doc = doc
self.constant = constant or readonly # readonly => constant
self.readonly = readonly
self._set_instantiate(instantiate)
self.pickle_default_value = pickle_default_value
self.allow_None = (default is None or allow_None)
self.watchers = {}
self.per_instance = per_instance
@property
def label(self):
if self.name and self._label is None:
return label_formatter(self.name)
else:
return self._label
@label.setter
def label(self, val):
self._label = val
def _set_instantiate(self,instantiate):
"""Constant parameters must be instantiated."""
# CB: instantiate doesn't actually matter for read-only
# parameters, since they can't be set even on a class. But
# this avoids needless instantiation.
if self.readonly:
self.instantiate = False
else:
self.instantiate = instantiate or self.constant # pylint: disable-msg=W0201
# TODO: quick trick to allow subscription to the setting of
# parameter metadata. ParameterParameter?
# Note that unlike with parameter value setting, there's no access
# to the Parameterized instance, so no per-instance subscription.
def __setattr__(self,attribute,value):
implemented = (attribute!="default" and hasattr(self,'watchers') and attribute in self.watchers)
try:
old = getattr(self,attribute) if implemented else NotImplemented
except AttributeError as e:
if attribute in self.__slots__:
# If Parameter slot is defined but an AttributeError was raised
# we are in __setstate__ and watchers should not be triggered
old = NotImplemented
else:
raise e
super(Parameter, self).__setattr__(attribute, value)
if old is not NotImplemented:
event = Event(what=attribute,name=self.name,obj=None,cls=self.owner,old=old,new=value, type=None)
for watcher in self.watchers[attribute]:
self.owner.param._call_watcher(watcher, event)
if not self.owner.param._BATCH_WATCH:
self.owner.param._batch_call_watchers()
def __get__(self,obj,objtype): # pylint: disable-msg=W0613
"""
Return the value for this Parameter.
If called for a Parameterized class, produce that
class's value (i.e. this Parameter object's 'default'
attribute).
If called for a Parameterized instance, produce that
instance's value, if one has been set - otherwise produce the
class's value (default).
"""
# NB: obj can be None (when __get__ called for a
# Parameterized class); objtype is never None
if obj is None:
result = self.default
else:
result = obj.__dict__.get(self._internal_name,self.default)
return result
@instance_descriptor
def __set__(self,obj,val):
"""
Set the value for this Parameter.
If called for a Parameterized class, set that class's
value (i.e. set this Parameter object's 'default' attribute).
If called for a Parameterized instance, set the value of
this Parameter on that instance (i.e. in the instance's
__dict__, under the parameter's internal_name).
If the Parameter's constant attribute is True, only allows
the value to be set for a Parameterized class or on
uninitialized Parameterized instances.
If the Parameter's readonly attribute is True, only allows the
value to be specified in the Parameter declaration inside the
Parameterized source code. A read-only parameter also
cannot be set on a Parameterized class.
Note that until we support some form of read-only
object, it is still possible to change the attributes of the
object stored in a constant or read-only Parameter (e.g. the
left bound of a BoundingBox).
"""
# ALERT: Deprecated Number set_hook called here to avoid duplicating
# setter, should be removed in 2.0
if hasattr(self, 'set_hook'):
val = self.set_hook(obj,val)
self._validate(val)
_old = NotImplemented
# NB: obj can be None (when __set__ called for a
# Parameterized class)
if self.constant or self.readonly:
if self.readonly:
raise TypeError("Read-only parameter '%s' cannot be modified"%self.name)
elif obj is None: #not obj
_old = self.default
self.default = val
elif not obj.initialized:
_old = obj.__dict__.get(self._internal_name,self.default)
obj.__dict__[self._internal_name] = val
else:
raise TypeError("Constant parameter '%s' cannot be modified"%self.name)
else:
if obj is None:
_old = self.default
self.default = val
else:
_old = obj.__dict__.get(self._internal_name,self.default)
obj.__dict__[self._internal_name] = val
self._post_setter(obj, val)
if obj is None:
watchers = self.watchers.get("value",[])
else:
watchers = getattr(obj,"_param_watchers",{}).get(self.name,{}).get('value',self.watchers.get("value",[]))
event = Event(what='value',name=self.name,obj=obj,cls=self.owner,old=_old,new=val, type=None)
obj = self.owner if obj is None else obj
if obj is None:
return
for watcher in watchers:
obj.param._call_watcher(watcher, event)
if not obj.param._BATCH_WATCH:
obj.param._batch_call_watchers()
def _validate(self, val):
"""Implements validation for the parameter"""
def _post_setter(self, obj, val):
"""Called after the parameter value has been validated and set"""
def __delete__(self,obj):
raise TypeError("Cannot delete '%s': Parameters deletion not allowed." % self.name)
def _set_names(self, attrib_name):
if None not in (self.owner, self.name) and attrib_name != self.name:
raise AttributeError('The %s parameter %r has already been '
'assigned a name by the %s class, '
'could not assign new name %r. Parameters '
'may not be shared by multiple classes; '
'ensure that you create a new parameter '
'instance for each new class.'
% (type(self).__name__, self.name,
self.owner.name, attrib_name))
self.name = attrib_name
self._internal_name = "_%s_param_value"%attrib_name
def __getstate__(self):
"""
All Parameters have slots, not a dict, so we have to support
pickle and deepcopy ourselves.
"""
state = {}
for slot in get_occupied_slots(self):
state[slot] = getattr(self,slot)
return state
def __setstate__(self,state):
# set values of __slots__ (instead of in non-existent __dict__)
# Handle renamed slots introduced for instance params
if '_attrib_name' in state:
state['name'] = state.pop('_attrib_name')
if '_owner' in state:
state['owner'] = state.pop('_owner')
if 'watchers' not in state:
state['watchers'] = {}
if 'per_instance' not in state:
state['per_instance'] = False
if '_label' not in state:
state['_label'] = None
for (k,v) in state.items():
setattr(self,k,v)
# Define one particular type of Parameter that is used in this file
class String(Parameter):
"""
A String Parameter, with a default value and optional regular expression (regex) matching.
Example of using a regex to implement IPv4 address matching::
class IPAddress(String):
'''IPv4 address as a string (dotted decimal notation)'''
def __init__(self, default="0.0.0.0", allow_None=False, **kwargs):
ip_regex = '^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'
super(IPAddress, self).__init__(default=default, regex=ip_regex, **kwargs)
"""
__slots__ = ['regex']
basestring = basestring if sys.version_info[0]==2 else str # noqa: it is defined
def __init__(self, default="", regex=None, allow_None=False, **kwargs):
super(String, self).__init__(default=default, allow_None=allow_None, **kwargs)
self.regex = regex
self.allow_None = (default is None or allow_None)
self._validate(default)
def _validate(self, val):
if self.allow_None and val is None:
return
if not isinstance(val, self.basestring):
raise ValueError("String '%s' only takes a string value."%self.name)
if self.regex is not None and re.match(self.regex, val) is None:
raise ValueError("String '%s': '%s' does not match regex '%s'."%(self.name,val,self.regex))
class shared_parameters(object):
"""
Context manager to share parameter instances when creating
multiple Parameterized objects of the same type. Parameter default
values are instantiated once and cached to be reused when another
Parameterized object of the same type is instantiated.
Can be useful to easily modify large collections of Parameterized
objects at once and can provide a significant speedup.
"""
_share = False
_shared_cache = {}
def __enter__(self):
shared_parameters._share = True
def __exit__(self, exc_type, exc_val, exc_tb):
shared_parameters._share = False
shared_parameters._shared_cache = {}
def as_uninitialized(fn):
"""
Decorator: call fn with the parameterized_instance's
initialization flag set to False, then revert the flag.
(Used to decorate Parameterized methods that must alter
a constant Parameter.)
"""
@wraps(fn)
def override_initialization(self_,*args,**kw):
parameterized_instance = self_.self
original_initialized=parameterized_instance.initialized
parameterized_instance.initialized=False
fn(parameterized_instance,*args,**kw)
parameterized_instance.initialized=original_initialized
return override_initialization
class Comparator(object):
"""
Comparator defines methods for determining whether two objects
should be considered equal. It works by registering custom
comparison functions, which may either be registed by type or with
a predicate function. If no matching comparison can be found for
the two objects the comparison will return False.
If registered by type the Comparator will check whether both
objects are of that type and apply the comparison. If the equality
function is instead registered with a function it will call the
function with each object individually to check if the comparison
applies. This is useful for defining comparisons for objects
without explicitly importing them.
To use the Comparator simply call the is_equal function.
"""
equalities = {
numbers.Number: operator.eq,
String.basestring: operator.eq,
bytes: operator.eq,
type(None): operator.eq
}
@classmethod
def is_equal(cls, obj1, obj2):
for eq_type, eq in cls.equalities.items():
if ((isinstance(eq_type, FunctionType)
and eq_type(obj1) and eq_type(obj2))
or (isinstance(obj1, eq_type) and isinstance(obj2, eq_type))):
return eq(obj1, obj2)
if isinstance(obj2, (list, set, tuple)):
return cls.compare_iterator(obj1, obj2)
elif isinstance(obj2, dict):
return cls.compare_mapping(obj1, obj2)
return False
@classmethod
def compare_iterator(cls, obj1, obj2):
if type(obj1) != type(obj2) or len(obj1) != len(obj2):
return False
for o1, o2 in zip(obj1, obj2):
if not cls.is_equal(o1, o2):
return False
return True
@classmethod
def compare_mapping(cls, obj1, obj2):
if type(obj1) != type(obj2) or len(obj1) != len(obj2): return False
for k in obj1:
if k in obj2:
if not cls.is_equal(obj1[k], obj2[k]):
return False
else:
return False
return True
class Parameters(object):
"""Object that holds the namespace and implementation of Parameterized
methods as well as any state that is not in __slots__ or the
Parameters themselves.
Exists at both the metaclass level (instantiated by the metaclass)
and at the instance level. Can contain state specific to either the
class or the instance as necessary.
"""
_disable_stubs = None # Flag used to disable stubs in the API1 tests
# None for no action, True to raise and False to warn.
def __init__(self_, cls, self=None):
"""
cls is the Parameterized class which is always set.
self is the instance if set.
"""
self_.cls = cls
self_.self = self
self_._BATCH_WATCH = False # If true, Event and watcher objects are queued.
self_._TRIGGER = False
self_._events = [] # Queue of batched eventd
self_._watchers = [] # Queue of batched watchers
@property
def self_or_cls(self_):
return self_.cls if self_.self is None else self_.self
def __getitem__(self_, key):
"""
Returns the class or instance parameter
"""
inst = self_.self
parameters = self_.objects(False) if inst is None else inst.param.objects(False)
p = parameters[key]
if (inst is not None and p.per_instance and
not getattr(inst, '_disable_instance__params', False)):
if key not in inst._instance__params:
try:
# Do not copy watchers on class parameter
watchers = p.watchers
p.watchers = {}
p = copy.copy(p)
except:
raise
finally:
p.watchers = watchers
p.owner = inst
inst._instance__params[key] = p
else:
p = inst._instance__params[key]
return p
def __dir__(self_):
"""
Adds parameters to dir
"""
return super(Parameters, self_).__dir__() + list(self_)
def __iter__(self_):
"""
Iterates over the parameters on this object.
"""
for p in self_.objects(instance=False):
yield p
def __contains__(self_, param):
return param in list(self_)
def __getattr__(self_, attr):
"""
Extends attribute access to parameter objects.
"""
cls = self_.__dict__.get('cls')
if cls is None: # Class not initialized
raise AttributeError
try:
params = list(getattr(cls, '_%s__params' % cls.__name__))
except AttributeError:
params = [n for class_ in classlist(cls) for n, v in class_.__dict__.items()
if isinstance(v, Parameter)]
if attr in params:
return self_.__getitem__(attr)
elif self_.self is None:
raise AttributeError("type object '%s.param' has no attribute %r" %
(self_.cls.__name__, attr))
else:
raise AttributeError("'%s.param' object has no attribute %r" %
(self_.cls.__name__, attr))
@as_uninitialized
def _set_name(self_, name):
self = self_.param.self
self.name=name
@as_uninitialized
def _generate_name(self_):
self = self_.param.self
self.param._set_name('%s%05d' % (self.__class__.__name__ ,object_count))
@as_uninitialized
def _setup_params(self_,**params):
"""
Initialize default and keyword parameter values.
First, ensures that all Parameters with 'instantiate=True'
(typically used for mutable Parameters) are copied directly
into each object, to ensure that there is an independent copy
(to avoid suprising aliasing errors). Then sets each of the
keyword arguments, warning when any of them are not defined as
parameters.
Constant Parameters can be set during calls to this method.
"""
self = self_.param.self
## Deepcopy all 'instantiate=True' parameters
# (build a set of names first to avoid redundantly instantiating
# a later-overridden parent class's parameter)
params_to_instantiate = {}
for class_ in classlist(type(self)):
if not issubclass(class_, Parameterized):
continue
for (k,v) in class_.__dict__.items():
# (avoid replacing name with the default of None)
if isinstance(v,Parameter) and v.instantiate and k!="name":
params_to_instantiate[k]=v
for p in params_to_instantiate.values():
self.param._instantiate_param(p)
## keyword arg setting
for name,val in params.items():
desc = self.__class__.get_param_descriptor(name)[0] # pylint: disable-msg=E1101
if not desc:
self.param.warning("Setting non-parameter attribute %s=%s using a mechanism intended only for parameters",name,val)
# i.e. if not desc it's setting an attribute in __dict__, not a Parameter
setattr(self,name,val)
@classmethod
def deprecate(cls, fn):
"""
Decorator to issue warnings for API moving onto the param
namespace and to add a docstring directing people to the
appropriate method.
"""
def inner(*args, **kwargs):
if cls._disable_stubs:
raise AssertionError('Stubs supporting old API disabled')
elif cls._disable_stubs is None:
pass
elif cls._disable_stubs is False:
get_logger(name=args[0].__class__.__name__).log(
WARNING, 'Use method %r via param namespace ' % fn.__name__)
return fn(*args, **kwargs)
inner.__doc__= "Inspect .param.%s method for the full docstring" % fn.__name__
return inner
@classmethod
def _changed(cls, event):
"""
Predicate that determines whether a Event object has actually
changed such that old != new.
"""
return not Comparator.is_equal(event.old, event.new)
# CEBALERT: this is a bit ugly
def _instantiate_param(self_,param_obj,dict_=None,key=None):
# deepcopy param_obj.default into self.__dict__ (or dict_ if supplied)
# under the parameter's _internal_name (or key if supplied)
self = self_.self
dict_ = dict_ or self.__dict__
key = key or param_obj._internal_name
param_key = (str(type(self)), param_obj.name)
if shared_parameters._share:
if param_key in shared_parameters._shared_cache:
new_object = shared_parameters._shared_cache[param_key]
else:
new_object = copy.deepcopy(param_obj.default)
shared_parameters._shared_cache[param_key] = new_object
else:
new_object = copy.deepcopy(param_obj.default)
dict_[key]=new_object
if isinstance(new_object,Parameterized):
global object_count
object_count+=1
# CB: writes over name given to the original object;
# should it instead keep the same name?
new_object.param._generate_name()
# Classmethods
def print_param_defaults(self_):
"""Print the default values of all cls's Parameters."""
cls = self_.cls
for key,val in cls.__dict__.items():
if isinstance(val,Parameter):
print(cls.__name__+'.'+key+ '='+ repr(val.default))
def set_default(self_,param_name,value):
"""
Set the default value of param_name.
Equivalent to setting param_name on the class.
"""
cls = self_.cls
setattr(cls,param_name,value)
def _add_parameter(self_, param_name,param_obj):
"""
Add a new Parameter object into this object's class.
Supposed to result in a Parameter equivalent to one declared
in the class's source code.
"""
# CEBALERT: can't we just do
# setattr(cls,param_name,param_obj)? The metaclass's
# __setattr__ is actually written to handle that. (Would also
# need to do something about the params() cache. That cache
# is a pain, but it definitely improved the startup time; it
# would be worthwhile making sure no method except for one
# "add_param()" method has to deal with it (plus any future
# remove_param() method.)
cls = self_.cls
type.__setattr__(cls,param_name,param_obj)
ParameterizedMetaclass._initialize_parameter(cls,param_name,param_obj)
# delete cached params()
try:
delattr(cls,'_%s__params'%cls.__name__)
except AttributeError:
pass
def params(self_, parameter_name=None):
"""
Return the Parameters of this class as the
dictionary {name: parameter_object}
Includes Parameters from this class and its
superclasses.
"""
if self_.self is not None and self_.self._instance__params:
self_.warning('The Parameterized instance has instance '
'parameters created using new-style param '
'APIs, which are incompatible with .params. '
'Use the new more explicit APIs on the '
'.param accessor to query parameter instances.'
'To query all parameter instances use '
'.param.objects with the option to return '
'either class or instance parameter objects. '
'Alternatively use .param[name] indexing to '
'access a specific parameter object by name.')
pdict = self_.objects(instance='existing')
if parameter_name is None:
return pdict
else:
return pdict[parameter_name]
# Bothmethods
def set_param(self_, *args,**kwargs):
"""
For each param=value keyword argument, sets the corresponding
parameter of this object or class to the given value.
For backwards compatibility, also accepts
set_param("param",value) for a single parameter value using
positional arguments, but the keyword interface is preferred
because it is more compact and can set multiple values.
"""
BATCH_WATCH = self_.self_or_cls.param._BATCH_WATCH
self_.self_or_cls.param._BATCH_WATCH = True
self_or_cls = self_.self_or_cls
if args:
if len(args) == 2 and not args[0] in kwargs and not kwargs:
kwargs[args[0]] = args[1]
else:
self_.self_or_cls.param._BATCH_WATCH = False
raise ValueError("Invalid positional arguments for %s.set_param" %
(self_or_cls.name))
for (k, v) in kwargs.items():
if k not in self_or_cls.param:
self_.self_or_cls.param._BATCH_WATCH = False
raise ValueError("'%s' is not a parameter of %s" % (k, self_or_cls.name))
try:
setattr(self_or_cls, k, v)
except:
self_.self_or_cls.param._BATCH_WATCH = False
raise
self_.self_or_cls.param._BATCH_WATCH = BATCH_WATCH
if not BATCH_WATCH:
self_._batch_call_watchers()
def objects(self_, instance=True):
"""
Returns the Parameters of this instance or class
If instance=True and called on a Parameterized instance it
will create instance parameters for all Parameters defined on
the class. To force class parameters to be returned use
instance=False. Since classes avoid creating instance
parameters unless necessary you may also request only existing
instance parameters to be returned by setting
instance='existing'.
"""
cls = self_.cls
# CB: we cache the parameters because this method is called often,
# and parameters are rarely added (and cannot be deleted)
try:
pdict = getattr(cls, '_%s__params' % cls.__name__)
except AttributeError:
paramdict = {}
for class_ in classlist(cls):
for name, val in class_.__dict__.items():
if isinstance(val, Parameter):
paramdict[name] = val
# We only want the cache to be visible to the cls on which
# params() is called, so we mangle the name ourselves at
# runtime (if we were to mangle it now, it would be
# _Parameterized.__params for all classes).
setattr(cls, '_%s__params' % cls.__name__, paramdict)
pdict = paramdict
if instance and self_.self is not None:
if instance == 'existing':
if self_.self._instance__params:
return dict(pdict, **self_.self._instance__params)
return pdict
else:
return {k: self_.self.param[k] for k in pdict}
return pdict
def trigger(self_, *param_names):
"""
Trigger watchers for the given set of parameter names. Watchers
will be triggered whether or not the parameter values have
actually changed.
"""
events = self_.self_or_cls.param._events
watchers = self_.self_or_cls.param._watchers
self_.self_or_cls.param._events = []
self_.self_or_cls.param._watchers = []
param_values = dict(self_.get_param_values())
params = {name: param_values[name] for name in param_names}
self_.self_or_cls.param._TRIGGER = True
self_.set_param(**params)
self_.self_or_cls.param._TRIGGER = False
self_.self_or_cls.param._events = events
self_.self_or_cls.param._watchers = watchers
def _update_event_type(self_, watcher, event, triggered):
"""
Returns an updated Event object with the type field set appropriately.
"""
if triggered:
event_type = 'triggered'
else:
event_type = 'changed' if watcher.onlychanged else 'set'
return Event(what=event.what, name=event.name, obj=event.obj, cls=event.cls,
old=event.old, new=event.new, type=event_type)
def _call_watcher(self_, watcher, event):
"""
Invoke the given the watcher appropriately given a Event object.
"""
if self_.self_or_cls.param._TRIGGER:
pass
elif watcher.onlychanged and (not self_._changed(event)):
return
if self_.self_or_cls.param._BATCH_WATCH:
self_._events.append(event)
if watcher not in self_._watchers:
self_._watchers.append(watcher)
elif watcher.mode == 'args':
with batch_watch(self_.self_or_cls, run=False):
watcher.fn(self_._update_event_type(watcher, event, self_.self_or_cls.param._TRIGGER))
else:
with batch_watch(self_.self_or_cls, run=False):
event = self_._update_event_type(watcher, event, self_.self_or_cls.param._TRIGGER)
watcher.fn(**{event.name: event.new})
def _batch_call_watchers(self_):
"""
Batch call a set of watchers based on the parameter value
settings in kwargs using the queued Event and watcher objects.
"""
while self_.self_or_cls.param._events:
event_dict = OrderedDict([((event.name, event.what), event)
for event in self_.self_or_cls.param._events])
watchers = self_.self_or_cls.param._watchers[:]
self_.self_or_cls.param._events = []
self_.self_or_cls.param._watchers = []
for watcher in watchers:
events = [self_._update_event_type(watcher, event_dict[(name, watcher.what)],
self_.self_or_cls.param._TRIGGER)
for name in watcher.parameter_names
if (name, watcher.what) in event_dict]
with batch_watch(self_.self_or_cls, run=False):
if watcher.mode == 'args':
watcher.fn(*events)
else:
watcher.fn(**{c.name:c.new for c in events})
def set_dynamic_time_fn(self_,time_fn,sublistattr=None):
"""
Set time_fn for all Dynamic Parameters of this class or
instance object that are currently being dynamically
generated.
Additionally, sets _Dynamic_time_fn=time_fn on this class or
instance object, so that any future changes to Dynamic
Parmeters can inherit time_fn (e.g. if a Number is changed
from a float to a number generator, the number generator will
inherit time_fn).
If specified, sublistattr is the name of an attribute of this
class or instance that contains an iterable collection of
subobjects on which set_dynamic_time_fn should be called. If
the attribute sublistattr is present on any of the subobjects,
set_dynamic_time_fn() will be called for those, too.
"""
self_or_cls = self_.self_or_cls
self_or_cls._Dynamic_time_fn = time_fn
if isinstance(self_or_cls,type):
a = (None,self_or_cls)
else:
a = (self_or_cls,)
for n,p in self_or_cls.param.objects('existing').items():
if hasattr(p, '_value_is_dynamic'):
if p._value_is_dynamic(*a):
g = self_or_cls.param.get_value_generator(n)
g._Dynamic_time_fn = time_fn
if sublistattr:
try:
sublist = getattr(self_or_cls,sublistattr)
except AttributeError:
sublist = []
for obj in sublist:
obj.param.set_dynamic_time_fn(time_fn,sublistattr)
def get_param_values(self_,onlychanged=False):
"""
Return a list of name,value pairs for all Parameters of this
object.
When called on an instance with onlychanged set to True, will
only return values that are not equal to the default value
(onlychanged has no effect when called on a class).
"""
self_or_cls = self_.self_or_cls
# CEB: we'd actually like to know whether a value has been
# explicitly set on the instance, but I'm not sure that's easy
# (would need to distinguish instantiation of default from
# user setting of value).
vals = []
for name,val in self_or_cls.param.objects('existing').items():
value = self_or_cls.param.get_value_generator(name)
# (this is pointless for cls)
if not onlychanged or not all_equal(value,val.default):
vals.append((name,value))
vals.sort(key=itemgetter(0))
return vals
def force_new_dynamic_value(self_, name): # pylint: disable-msg=E0213
"""
Force a new value to be generated for the dynamic attribute
name, and return it.
If name is not dynamic, its current value is returned
(i.e. equivalent to getattr(name).
"""
cls_or_slf = self_.self_or_cls
param_obj = cls_or_slf.param.objects('existing').get(name)
if not param_obj:
return getattr(cls_or_slf, name)
cls, slf = None, None
if isinstance(cls_or_slf,type):
cls = cls_or_slf
else:
slf = cls_or_slf
if not hasattr(param_obj,'_force'):
return param_obj.__get__(slf, cls)
else:
return param_obj._force(slf, cls)
def get_value_generator(self_,name): # pylint: disable-msg=E0213
"""
Return the value or value-generating object of the named
attribute.
For most parameters, this is simply the parameter's value
(i.e. the same as getattr()), but Dynamic parameters have
their value-generating object returned.
"""
cls_or_slf = self_.self_or_cls
param_obj = cls_or_slf.param.objects('existing').get(name)
if not param_obj:
value = getattr(cls_or_slf,name)
# CompositeParameter detected by being a Parameter and having 'attribs'
elif hasattr(param_obj,'attribs'):
value = [cls_or_slf.param.get_value_generator(a) for a in param_obj.attribs]
# not a Dynamic Parameter
elif not hasattr(param_obj,'_value_is_dynamic'):
value = getattr(cls_or_slf,name)
# Dynamic Parameter...
else:
internal_name = "_%s_param_value"%name
if hasattr(cls_or_slf,internal_name):
# dealing with object and it's been set on this object
value = getattr(cls_or_slf,internal_name)
else:
# dealing with class or isn't set on the object
value = param_obj.default
return value
def inspect_value(self_,name): # pylint: disable-msg=E0213
"""
Return the current value of the named attribute without modifying it.
Same as getattr() except for Dynamic parameters, which have their
last generated value returned.
"""
cls_or_slf = self_.self_or_cls
param_obj = cls_or_slf.param.objects('existing').get(name)
if not param_obj:
value = getattr(cls_or_slf,name)
elif hasattr(param_obj,'attribs'):
value = [cls_or_slf.param.inspect_value(a) for a in param_obj.attribs]
elif not hasattr(param_obj,'_inspect'):
value = getattr(cls_or_slf,name)
else:
if isinstance(cls_or_slf,type):
value = param_obj._inspect(None,cls_or_slf)
else:
value = param_obj._inspect(cls_or_slf,None)
return value
def params_depended_on(self_,name):
return _params_depended_on(MInfo(cls=self_.cls,inst=self_.self,name=name,method=getattr(self_.self_or_cls,name)))
def outputs(self_):
"""
Returns a mapping between any declared outputs and a tuple
of the declared Parameter type, the output method, and the
index into the output if multiple outputs are returned.
"""
outputs = {}
for cls in classlist(self_.cls):
for name in dir(cls):
method = getattr(self_.self_or_cls, name)
dinfo = getattr(method, '_dinfo', {})
if 'outputs' not in dinfo:
continue
for override, otype, idx in dinfo['outputs']:
if override is not None:
name = override
outputs[name] = (otype, method, idx)
return outputs
def _spec_to_obj(self_,spec):
# TODO: when we decide on spec, this method should be
# rewritten
assert spec.count(":")<=1
spec = spec.strip()
m = re.match("(?P<path>[^:]*):?(?P<what>.*)", spec)
what = m.group('what')
path = "."+m.group('path')
m = re.match(r"(?P<obj>.*)(\.)(?P<attr>.*)",path)
obj = m.group('obj')
attr = m.group("attr")
src = self_.self_or_cls if obj=='' else _getattrr(self_.self_or_cls,obj[1::])
cls,inst = (src, None) if isinstance(src, type) else (type(src), src)
if attr == 'param':
dependencies = self_._spec_to_obj(obj[1:])
for p in src.param:
dependencies += src.param._spec_to_obj(p)
return dependencies
elif attr in src.param:
what = what if what != '' else 'value'
info = PInfo(inst=inst, cls=cls, name=attr,
pobj=src.param[attr], what=what)
else:
info = MInfo(inst=inst, cls=cls, name=attr,
method=getattr(src,attr))
return [info]
def _watch(self_, action, watcher, what='value', operation='add'): #'add' | 'remove'
parameter_names = watcher.parameter_names
for parameter_name in parameter_names:
assert parameter_name in self_.cls.param
if self_.self is not None and what == "value":
watchers = self_.self._param_watchers
if parameter_name not in watchers:
watchers[parameter_name] = {}
if what not in watchers[parameter_name]:
watchers[parameter_name][what] = []
getattr(watchers[parameter_name][what], action)(watcher)
else:
watchers = self_[parameter_name].watchers
if what not in watchers:
watchers[what] = []
getattr(watchers[what], action)(watcher)
def watch(self_,fn,parameter_names, what='value', onlychanged=True):
parameter_names = tuple(parameter_names) if isinstance(parameter_names, list) else (parameter_names,)
watcher = Watcher(inst=self_.self, cls=self_.cls, fn=fn, mode='args',
onlychanged=onlychanged, parameter_names=parameter_names,
what=what)
self_._watch('append', watcher, what)
return watcher
def unwatch(self_,watcher):
"""
Unwatch watchers set either with watch or watch_values.
"""
try:
self_._watch('remove',watcher)
except:
self_.warning('No such watcher {watcher} to remove.'.format(watcher=watcher))
def watch_values(self_,fn,parameter_names,what='value', onlychanged=True):
parameter_names = tuple(parameter_names) if isinstance(parameter_names, list) else (parameter_names,)
watcher = Watcher(inst=self_.self, cls=self_.cls, fn=fn,
mode='kwargs', onlychanged=onlychanged,
parameter_names=parameter_names, what='value')
self_._watch('append', watcher, what)
return watcher
# Instance methods
def defaults(self_):
"""
Return {parameter_name:parameter.default} for all non-constant
Parameters.
Note that a Parameter for which instantiate==True has its default
instantiated.
"""
self = self_.self
d = {}
for param_name,param in self.param.objects('existing').items():
if param.constant:
pass
elif param.instantiate:
self.param._instantiate_param(param,dict_=d,key=param_name)
else:
d[param_name]=param.default
return d
# CEBALERT: designed to avoid any processing unless the print
# level is high enough, but not all callers of message(),
# verbose(), debug(), etc are taking advantage of this. Need to
# document, and also check other ioam projects.
def __db_print(self_,level,msg,*args,**kw):
"""
Calls the logger returned by the get_logger() function,
prepending the result of calling dbprint_prefix() (if any).
See python's logging module for details.
"""
self_or_cls = self_.self_or_cls
if get_logger(name=self_or_cls.name).isEnabledFor(level):
if dbprint_prefix and callable(dbprint_prefix):
msg = dbprint_prefix() + ": " + msg # pylint: disable-msg=E1102
get_logger(name=self_or_cls.name).log(level, msg, *args, **kw)
def print_param_values(self_):
"""Print the values of all this object's Parameters."""
self = self_.self
for name,val in self.param.get_param_values():
print('%s.%s = %s' % (self.name,name,val))
def warning(self_, msg,*args,**kw):
"""
Print msg merged with args as a warning, unless module variable
warnings_as_exceptions is True, then raise an Exception
containing the arguments.
See Python's logging module for details of message formatting.
"""
if not warnings_as_exceptions:
global warning_count
warning_count+=1
self_.__db_print(WARNING,msg,*args,**kw)
else:
raise Exception("Warning: " + msg % args)
def message(self_,msg,*args,**kw):
"""
Print msg merged with args as a message.
See Python's logging module for details of message formatting.
"""
self_.__db_print(INFO,msg,*args,**kw)
def verbose(self_,msg,*args,**kw):
"""
Print msg merged with args as a verbose message.
See Python's logging module for details of message formatting.
"""
self_.__db_print(VERBOSE,msg,*args,**kw)
def debug(self_,msg,*args,**kw):
"""
Print msg merged with args as a debugging statement.
See Python's logging module for details of message formatting.
"""
self_.__db_print(DEBUG,msg,*args,**kw)
# CEBALERT: I think I've noted elsewhere the fact that we
# sometimes have a method on Parameter that requires passing the
# owning Parameterized instance or class, and other times we have
# the method on Parameterized itself. In case I haven't written
# that down elsewhere, here it is again. We should clean that up
# (at least we should be consistent).
# cebalert: it's really time to stop and clean up this bothmethod
# stuff and repeated code in methods using it.
class ParameterizedMetaclass(type):
"""
The metaclass of Parameterized (and all its descendents).
The metaclass overrides type.__setattr__ to allow us to set
Parameter values on classes without overwriting the attribute
descriptor. That is, for a Parameterized class of type X with a
Parameter y, the user can type X.y=3, which sets the default value
of Parameter y to be 3, rather than overwriting y with the
constant value 3 (and thereby losing all other info about that
Parameter, such as the doc string, bounds, etc.).
The __init__ method is used when defining a Parameterized class,
usually when the module where that class is located is imported
for the first time. That is, the __init__ in this metaclass
initializes the *class* object, while the __init__ method defined
in each Parameterized class is called for each new instance of
that class.
Additionally, a class can declare itself abstract by having an
attribute __abstract set to True. The 'abstract' attribute can be
used to find out if a class is abstract or not.
"""
def __init__(mcs,name,bases,dict_):
"""
Initialize the class object (not an instance of the class, but
the class itself).
Initializes all the Parameters by looking up appropriate
default values (see __param_inheritance()) and setting
attrib_names (see _set_names()).
"""
type.__init__(mcs,name,bases,dict_)
# Give Parameterized classes a useful 'name' attribute.
# (Could instead consider changing the instance Parameter
# 'name' to '__name__'?)
mcs.name = name
mcs.param = Parameters(mcs)
# All objects (with their names) of type Parameter that are
# defined in this class
parameters = [(n,o) for (n,o) in dict_.items()
if isinstance(o,Parameter)]
for param_name,param in parameters:
mcs._initialize_parameter(param_name,param)
# retrieve depends info from methods and store more conveniently
dependers = [(n,m._dinfo) for (n,m) in dict_.items()
if hasattr(m,'_dinfo')]
_watch = []
# TODO: probably copy dependencies here too and have
# everything else access from here rather than from method
# object
for n,dinfo in dependers:
if dinfo.get('watch', False):
_watch.append(n)
mcs.param._depends = {'watch':_watch}
if docstring_signature:
mcs.__class_docstring_signature()
def __class_docstring_signature(mcs, max_repr_len=15):
"""
Autogenerate a keyword signature in the class docstring for
all available parameters. This is particularly useful in the
IPython Notebook as IPython will parse this signature to allow
tab-completion of keywords.
max_repr_len: Maximum length (in characters) of value reprs.
"""
processed_kws, keyword_groups = set(), []
for cls in reversed(mcs.mro()):
keyword_group = []
for (k,v) in sorted(cls.__dict__.items()):
if isinstance(v, Parameter) and k not in processed_kws:
param_type = v.__class__.__name__
keyword_group.append("%s=%s" % (k, param_type))
processed_kws.add(k)
keyword_groups.append(keyword_group)
keywords = [el for grp in reversed(keyword_groups) for el in grp]
class_docstr = "\n"+mcs.__doc__ if mcs.__doc__ else ''
signature = "params(%s)" % (", ".join(keywords))
description = param_pager(mcs) if (docstring_describe_params and param_pager) else ''
mcs.__doc__ = signature + class_docstr + '\n' + description
def _initialize_parameter(mcs,param_name,param):
# parameter has no way to find out the name a
# Parameterized class has for it
param._set_names(param_name)
mcs.__param_inheritance(param_name,param)
# Python 2.6 added abstract base classes; see
# https://github.com/ioam/param/issues/84
def __is_abstract(mcs):
"""
Return True if the class has an attribute __abstract set to True.
Subclasses will return False unless they themselves have
__abstract set to true. This mechanism allows a class to
declare itself to be abstract (e.g. to avoid it being offered
as an option in a GUI), without the "abstract" property being
inherited by its subclasses (at least one of which is
presumably not abstract).
"""
# Can't just do ".__abstract", because that is mangled to
# _ParameterizedMetaclass__abstract before running, but
# the actual class object will have an attribute
# _ClassName__abstract. So, we have to mangle it ourselves at
# runtime.
try:
return getattr(mcs,'_%s__abstract'%mcs.__name__)
except AttributeError:
return False
abstract = property(__is_abstract)
def __setattr__(mcs,attribute_name,value):
"""
Implements 'self.attribute_name=value' in a way that also supports Parameters.
If there is already a descriptor named attribute_name, and
that descriptor is a Parameter, and the new value is *not* a
Parameter, then call that Parameter's __set__ method with the
specified value.
In all other cases set the attribute normally (i.e. overwrite
the descriptor). If the new value is a Parameter, once it has
been set we make sure that the value is inherited from
Parameterized superclasses as described in __param_inheritance().
"""
# Find out if there's a Parameter called attribute_name as a
# class attribute of this class - if not, parameter is None.
parameter,owning_class = mcs.get_param_descriptor(attribute_name)
if parameter and not isinstance(value,Parameter):
if owning_class != mcs:
parameter = copy.copy(parameter)
parameter.owner = mcs
type.__setattr__(mcs,attribute_name,parameter)
mcs.__dict__[attribute_name].__set__(None,value)
else:
type.__setattr__(mcs,attribute_name,value)
if isinstance(value,Parameter):
mcs.__param_inheritance(attribute_name,value)
elif isinstance(value,Parameters):
pass
else:
# the purpose of the warning below is to catch
# mistakes ("thinking you are setting a parameter, but
# you're not"). There are legitimate times when
# something needs be set on the class, and we don't
# want to see a warning then. Such attributes should
# presumably be prefixed by at least one underscore.
# (For instance, python's own pickling mechanism
# caches __slotnames__ on the class:
# http://mail.python.org/pipermail/python-checkins/2003-February/033517.html.)
# CEBALERT: this warning bypasses the usual
# mechanisms, which has have consequences for warning
# counts, warnings as exceptions, etc.
if not attribute_name.startswith('_'):
get_logger().log(WARNING,
"Setting non-Parameter class attribute %s.%s = %s ",
mcs.__name__,attribute_name,repr(value))
def __param_inheritance(mcs,param_name,param):
"""
Look for Parameter values in superclasses of this
Parameterized class.
Ordinarily, when a Python object is instantiated, attributes
not given values in the constructor will inherit the value
given in the object's class, or in its superclasses. For
Parameters owned by Parameterized classes, we have implemented
an additional level of default lookup, should this ordinary
lookup return only None.
In such a case, i.e. when no non-None value was found for a
Parameter by the usual inheritance mechanisms, we explicitly
look for Parameters with the same name in superclasses of this
Parameterized class, and use the first such value that we
find.
The goal is to be able to set the default value (or other
slots) of a Parameter within a Parameterized class, just as we
can set values for non-Parameter objects in Parameterized
classes, and have the values inherited through the
Parameterized hierarchy as usual.
Note that instantiate is handled differently: if there is a
parameter with the same name in one of the superclasses with
instantiate set to True, this parameter will inherit
instatiate=True.
"""
# get all relevant slots (i.e. slots defined in all
# superclasses of this parameter)
slots = {}
for p_class in classlist(type(param))[1::]:
slots.update(dict.fromkeys(p_class.__slots__))
# note for some eventual future: python 3.6+ descriptors grew
# __set_name__, which could replace this and _set_names
setattr(param,'owner',mcs)
del slots['owner']
# backwards compatibility (see Composite parameter)
if 'objtype' in slots:
setattr(param,'objtype',mcs)
del slots['objtype']
# instantiate is handled specially
for superclass in classlist(mcs)[::-1]:
super_param = superclass.__dict__.get(param_name)
if isinstance(super_param, Parameter) and super_param.instantiate is True:
param.instantiate=True
del slots['instantiate']
for slot in slots.keys():
superclasses = iter(classlist(mcs)[::-1])
# Search up the hierarchy until param.slot (which has to
# be obtained using getattr(param,slot)) is not None, or
# we run out of classes to search.
while getattr(param,slot) is None:
try:
param_super_class = next(superclasses)
except StopIteration:
break
new_param = param_super_class.__dict__.get(param_name)
if new_param is not None and hasattr(new_param,slot):
# (slot might not be there because could be a more
# general type of Parameter)
new_value = getattr(new_param,slot)
setattr(param,slot,new_value)
def get_param_descriptor(mcs,param_name):
"""
Goes up the class hierarchy (starting from the current class)
looking for a Parameter class attribute param_name. As soon as
one is found as a class attribute, that Parameter is returned
along with the class in which it is declared.
"""
classes = classlist(mcs)
for c in classes[::-1]:
attribute = c.__dict__.get(param_name)
if isinstance(attribute,Parameter):
return attribute,c
return None,None
# JABALERT: Only partially achieved so far -- objects of the same
# type and parameter values are treated as different, so anything
# for which instantiate == True is reported as being non-default.
# Whether script_repr should avoid reporting the values of parameters
# that are just inheriting their values from the class defaults.
script_repr_suppress_defaults=True
# CEBALERT: How about some defaults?
# Also, do we need an option to return repr without path, if desired?
# E.g. to get 'pre_plot_hooks()' instead of
# 'topo.command.analysis.pre_plot_hooks()' in the gui?
def script_repr(val,imports,prefix,settings):
"""
Variant of repr() designed for generating a runnable script.
Instances of types that require special handling can use the
script_repr_reg dictionary. Using the type as a key, add a
function that returns a suitable representation of instances of
that type, and adds the required import statement.
The repr of a parameter can be suppressed by returning None from
the appropriate hook in script_repr_reg.
"""
return pprint(val,imports,prefix,settings,unknown_value=None,
qualify=True,separator="\n")
# CB: when removing script_repr, merge its docstring here and improve.
# And the ALERT by script_repr about defaults can go.
# CEBALERT: remove settings, add default argument for imports
def pprint(val,imports, prefix="\n ", settings=[],
unknown_value='<?>', qualify=False, separator=''):
"""
(Experimental) Pretty printed representation of a parameterized
object that may be evaluated with eval.
Similar to repr except introspection of the constructor (__init__)
ensures a valid and succinct representation is generated.
Only parameters are represented (whether specified as standard,
positional, or keyword arguments). Parameters specified as
positional arguments are always shown, followed by modified
parameters specified as keyword arguments, sorted by precedence.
unknown_value determines what to do where a representation cannot be
generated for something required to recreate the object. Such things
include non-parameter positional and keyword arguments, and certain
values of parameters (e.g. some random state objects).
Supplying an unknown_value of None causes unrepresentable things
to be silently ignored. If unknown_value is a string, that
string will appear in place of any unrepresentable things. If
unknown_value is False, an Exception will be raised if an
unrepresentable value is encountered.
If supplied, imports should be a list, and it will be populated
with the set of imports required for the object and all of its
parameter values.
If qualify is True, the class's path will be included (e.g. "a.b.C()"),
otherwise only the class will appear ("C()").
Parameters will be separated by a comma only by default, but the
separator parameter allows an additional separator to be supplied
(e.g. a newline could be supplied to have each Parameter appear on a
separate line).
NOTE: pprint will replace script_repr in a future version of
param, but is not yet a complete replacement for script_repr.
"""
# CB: doc prefix & settings or realize they don't need to be
# passed around, etc.
# JLS: The settings argument is not used anywhere. To be removed
# in a separate PR.
if isinstance(val,type):
rep = type_script_repr(val,imports,prefix,settings)
elif type(val) in script_repr_reg:
rep = script_repr_reg[type(val)](val,imports,prefix,settings)
# CEBALERT: remove with script_repr
elif hasattr(val,'script_repr'):
rep=val.script_repr(imports, prefix+" ")
elif hasattr(val,'pprint'):
rep=val.pprint(imports=imports, prefix=prefix+" ",
qualify=qualify, unknown_value=unknown_value,
separator=separator)
else:
rep=repr(val)
return rep
#: see script_repr()
script_repr_reg = {}
# currently only handles list and tuple
def container_script_repr(container,imports,prefix,settings):
result=[]
for i in container:
result.append(pprint(i,imports,prefix,settings))
## (hack to get container brackets)
if isinstance(container,list):
d1,d2='[',']'
elif isinstance(container,tuple):
d1,d2='(',')'
else:
raise NotImplementedError
rep=d1+','.join(result)+d2
# no imports to add for built-in types
return rep
def empty_script_repr(*args): # pyflakes:ignore (unused arguments):
return None
try:
# Suppress scriptrepr for objects not yet having a useful string representation
import numpy
script_repr_reg[random.Random] = empty_script_repr
script_repr_reg[numpy.random.RandomState] = empty_script_repr
except ImportError:
pass # Support added only if those libraries are available
# why I have to type prefix and settings?
def function_script_repr(fn,imports,prefix,settings):
name = fn.__name__
module = fn.__module__
imports.append('import %s'%module)
return module+'.'+name
def type_script_repr(type_,imports,prefix,settings):
module = type_.__module__
if module!='__builtin__':
imports.append('import %s'%module)
return module+'.'+type_.__name__
script_repr_reg[list]=container_script_repr
script_repr_reg[tuple]=container_script_repr
script_repr_reg[FunctionType]=function_script_repr
#: If not None, the value of this Parameter will be called (using '()')
#: before every call to __db_print, and is expected to evaluate to a
#: string that is suitable for prefixing messages and warnings (such
#: as some indicator of the global state).
dbprint_prefix=None
@add_metaclass(ParameterizedMetaclass)
class Parameterized(object):
"""
Base class for named objects that support Parameters and message
formatting.
Automatic object naming: Every Parameterized instance has a name
parameter. If the user doesn't designate a name=<str> argument
when constructing the object, the object will be given a name
consisting of its class name followed by a unique 5-digit number.
Automatic parameter setting: The Parameterized __init__ method
will automatically read the list of keyword parameters. If any
keyword matches the name of a Parameter (see Parameter class)
defined in the object's class or any of its superclasses, that
parameter in the instance will get the value given as a keyword
argument. For example:
class Foo(Parameterized):
xx = Parameter(default=1)
foo = Foo(xx=20)
in this case foo.xx gets the value 20.
When initializing a Parameterized instance ('foo' in the example
above), the values of parameters can be supplied as keyword
arguments to the constructor (using parametername=parametervalue);
these values will override the class default values for this one
instance.
If no 'name' parameter is supplied, self.name defaults to the
object's class name with a unique number appended to it.
Message formatting: Each Parameterized instance has several
methods for optionally printing output. This functionality is
based on the standard Python 'logging' module; using the methods
provided here, wraps calls to the 'logging' module's root logger
and prepends each message with information about the instance
from which the call was made. For more information on how to set
the global logging level and change the default message prefix,
see documentation for the 'logging' module.
"""
name = String(default=None,constant=True,doc="""
String identifier for this object.""")
def __init__(self,**params):
global object_count
# Flag that can be tested to see if e.g. constant Parameters
# can still be set
self.initialized=False
# Override class level param namespace with instance namespace
self.param = Parameters(self.__class__, self=self)
self._instance__params = {}
self._param_watchers = {}
self.param._generate_name()
self.param._setup_params(**params)
object_count += 1
# add watched dependencies
for cls in classlist(self.__class__):
if not issubclass(cls, Parameterized):
continue
for n in cls.param._depends['watch']:
# TODO: should improve this - will happen for every
# instantiation of Parameterized with watched deps. Will
# probably store expanded deps on class - see metaclass
# 'dependers'.
for p in self.param.params_depended_on(n):
# TODO: can't remember why not just pass m (rather than _m_caller) here
(p.inst or p.cls).param.watch(_m_caller(self,n),p.name,p.what)
self.initialized=True
# 'Special' methods
def __getstate__(self):
"""
Save the object's state: return a dictionary that is a shallow
copy of the object's __dict__ and that also includes the
object's __slots__ (if it has any).
"""
# remind me, why is it a copy? why not just state.update(self.__dict__)?
state = self.__dict__.copy()
for slot in get_occupied_slots(self):
state[slot] = getattr(self,slot)
# Note that Parameterized object pickling assumes that
# attributes to be saved are only in __dict__ or __slots__
# (the standard Python places to store attributes, so that's a
# reasonable assumption). (Additionally, class attributes that
# are Parameters are also handled, even when they haven't been
# instantiated - see PickleableClassAttributes.)
return state
def __setstate__(self, state):
"""
Restore objects from the state dictionary to this object.
During this process the object is considered uninitialized.
"""
self.initialized=False
if '_instance__params' not in state:
state['_instance__params'] = {}
if '_param_watchers' not in state:
state['_param_watchers'] = {}
for name,value in state.items():
setattr(self,name,value)
self.initialized=True
def __repr__(self):
"""
Provide a nearly valid Python representation that could be used to recreate
the item with its parameters, if executed in the appropriate environment.
Returns 'classname(parameter1=x,parameter2=y,...)', listing
all the parameters of this object.
"""
try:
settings = ['%s=%s' % (name, repr(val))
for name,val in self.param.get_param_values()]
except RuntimeError: # Handle recursion in parameter depth
settings = []
return self.__class__.__name__ + "(" + ", ".join(settings) + ")"
def __str__(self):
"""Return a short representation of the name and class of this object."""
return "<%s %s>" % (self.__class__.__name__,self.name)
def script_repr(self,imports=[],prefix=" "):
"""
Variant of __repr__ designed for generating a runnable script.
"""
return self.pprint(imports,prefix, unknown_value=None, qualify=True,
separator="\n")
# CEBALERT: not yet properly documented
def pprint(self, imports=None, prefix=" ", unknown_value='<?>',
qualify=False, separator=""):
"""
(Experimental) Pretty printed representation that may be
evaluated with eval. See pprint() function for more details.
"""
if imports is None:
imports = []
# CEBALERT: imports should just be a set rather than a list;
# change in next release?
imports[:] = list(set(imports))
# Generate import statement
mod = self.__module__
bits = mod.split('.')
imports.append("import %s"%mod)
imports.append("import %s"%bits[0])
changed_params = dict(self.param.get_param_values(onlychanged=script_repr_suppress_defaults))
values = dict(self.param.get_param_values())
spec = inspect.getargspec(self.__init__)
args = spec.args[1:] if spec.args[0] == 'self' else spec.args
if spec.defaults is not None:
posargs = spec.args[:-len(spec.defaults)]
kwargs = dict(zip(spec.args[-len(spec.defaults):], spec.defaults))
else:
posargs, kwargs = args, []
parameters = self.param.objects('existing')
ordering = sorted(
sorted(changed_params), # alphanumeric tie-breaker
key=lambda k: (- float('inf') # No precedence is lowest possible precendence
if parameters[k].precedence is None else
parameters[k].precedence))
arglist, keywords, processed = [], [], []
for k in args + ordering:
if k in processed: continue
# Suppresses automatically generated names.
if k == 'name' and (values[k] is not None
and re.match('^'+self.__class__.__name__+'[0-9]+$', values[k])):
continue
value = pprint(values[k], imports, prefix=prefix,settings=[],
unknown_value=unknown_value,
qualify=qualify) if k in values else None
if value is None:
if unknown_value is False:
raise Exception("%s: unknown value of %r" % (self.name,k))
elif unknown_value is None:
# i.e. suppress repr
continue
else:
value = unknown_value
# Explicit kwarg (unchanged, known value)
if (k in kwargs) and (k in values) and kwargs[k] == values[k]: continue
if k in posargs:
# value will be unknown_value unless k is a parameter
arglist.append(value)
elif k in kwargs or (spec.keywords is not None):
# Explicit modified keywords or parameters in
# precendence order (if **kwargs present)
keywords.append('%s=%s' % (k, value))
processed.append(k)
qualifier = mod + '.' if qualify else ''
arguments = arglist + keywords + (['**%s' % spec.varargs] if spec.varargs else [])
return qualifier + '%s(%s)' % (self.__class__.__name__, (','+separator+prefix).join(arguments))
# CEBALERT: note there's no state_push method on the class, so
# dynamic parameters set on a class can't have state saved. This
# is because, to do this, state_push() would need to be a
# @bothmethod, but that complicates inheritance in cases where we
# already have a state_push() method. I need to decide what to do
# about that. (isinstance(g,Parameterized) below is used to exclude classes.)
def state_push(self):
"""
Save this instance's state.
For Parameterized instances, this includes the state of
dynamically generated values.
Subclasses that maintain short-term state should additionally
save and restore that state using state_push() and
state_pop().
Generally, this method is used by operations that need to test
something without permanently altering the objects' state.
"""
for pname, p in self.param.objects('existing').items():
g = self.param.get_value_generator(pname)
if hasattr(g,'_Dynamic_last'):
g._saved_Dynamic_last.append(g._Dynamic_last)
g._saved_Dynamic_time.append(g._Dynamic_time)
# CB: not storing the time_fn: assuming that doesn't
# change.
elif hasattr(g,'state_push') and isinstance(g,Parameterized):
g.state_push()
def state_pop(self):
"""
Restore the most recently saved state.
See state_push() for more details.
"""
for pname, p in self.param.objects('existing').items():
g = self.param.get_value_generator(pname)
if hasattr(g,'_Dynamic_last'):
g._Dynamic_last = g._saved_Dynamic_last.pop()
g._Dynamic_time = g._saved_Dynamic_time.pop()
elif hasattr(g,'state_pop') and isinstance(g,Parameterized):
g.state_pop()
# API to be accessed via param namespace
@classmethod
@Parameters.deprecate
def _add_parameter(cls, param_name,param_obj):
return cls.param._add_parameter(param_name,param_obj)
@bothmethod
@Parameters.deprecate
def params(cls,parameter_name=None):
return cls.param.params(parameter_name=parameter_name)
@classmethod
@Parameters.deprecate
def set_default(cls,param_name,value):
return cls.param.set_default(param_name,value)
@classmethod
@Parameters.deprecate
def print_param_defaults(cls):
return cls.param.print_param_defaults()
@bothmethod
@Parameters.deprecate
def set_param(self_or_cls,*args,**kwargs):
return self_or_cls.param.set_param(*args,**kwargs)
@bothmethod
@Parameters.deprecate
def set_dynamic_time_fn(self_or_cls,time_fn,sublistattr=None):
return self_or_cls.param.set_dynamic_time_fn(time_fn,sublistattr=sublistattr)
@bothmethod
@Parameters.deprecate
def get_param_values(self_or_cls,onlychanged=False):
return self_or_cls.param.get_param_values(onlychanged=onlychanged)
@bothmethod
@Parameters.deprecate
def force_new_dynamic_value(cls_or_slf,name): # pylint: disable-msg=E0213
return cls_or_slf.param.force_new_dynamic_value(name)
@bothmethod
@Parameters.deprecate
def get_value_generator(cls_or_slf,name): # pylint: disable-msg=E0213
return cls_or_slf.param.get_value_generator(name)
@bothmethod
@Parameters.deprecate
def inspect_value(cls_or_slf,name): # pylint: disable-msg=E0213
return cls_or_slf.param.inspect_value(name)
@Parameters.deprecate
def _set_name(self,name):
return self.param._set_name(name)
@Parameters.deprecate
def __db_print(self,level,msg,*args,**kw):
return self.param.__db_print(level,msg,*args,**kw)
@Parameters.deprecate
def warning(self,msg,*args,**kw):
return self.param.warning(msg,*args,**kw)
@Parameters.deprecate
def message(self,msg,*args,**kw):
return self.param.message(msg,*args,**kw)
@Parameters.deprecate
def verbose(self,msg,*args,**kw):
return self.param.verbose(msg,*args,**kw)
@Parameters.deprecate
def debug(self,msg,*args,**kw):
return self.param.debug(msg,*args,**kw)
@Parameters.deprecate
def print_param_values(self):
return self.param.print_param_values()
@Parameters.deprecate
def defaults(self):
return self.param.defaults()
def print_all_param_defaults():
"""Print the default values for all imported Parameters."""
print("_______________________________________________________________________________")
print("")
print(" Parameter Default Values")
print("")
classes = descendents(Parameterized)
classes.sort(key=lambda x:x.__name__)
for c in classes:
c.print_param_defaults()
print("_______________________________________________________________________________")
# Note that with Python 2.6, a fn's **args no longer has to be a
# dictionary. This might allow us to use a decorator to simplify using
# ParamOverrides (if that does indeed make them simpler to use).
# http://docs.python.org/whatsnew/2.6.html
class ParamOverrides(dict):
"""
A dictionary that returns the attribute of a specified object if
that attribute is not present in itself.
Used to override the parameters of an object.
"""
# NOTE: Attribute names of this object block parameters of the
# same name, so all attributes of this object should have names
# starting with an underscore (_).
def __init__(self,overridden,dict_,allow_extra_keywords=False):
"""
If allow_extra_keywords is False, then all keys in the
supplied dict_ must match parameter names on the overridden
object (otherwise a warning will be printed).
If allow_extra_keywords is True, then any items in the
supplied dict_ that are not also parameters of the overridden
object will be available via the extra_keywords() method.
"""
# we'd like __init__ to be fast because it's going to be
# called a lot. What's the fastest way to move the existing
# params dictionary into this one? Would
# def __init__(self,overridden,**kw):
# ...
# dict.__init__(self,**kw)
# be faster/easier to use?
self._overridden = overridden
dict.__init__(self,dict_)
if allow_extra_keywords:
self._extra_keywords=self._extract_extra_keywords(dict_)
else:
self._check_params(dict_)
def extra_keywords(self):
"""
Return a dictionary containing items from the originally
supplied dict_ whose names are not parameters of the
overridden object.
"""
return self._extra_keywords
def param_keywords(self):
"""
Return a dictionary containing items from the originally
supplied dict_ whose names are parameters of the
overridden object (i.e. not extra keywords/parameters).
"""
return dict((key, self[key]) for key in self if key not in self.extra_keywords())
def __missing__(self,name):
# Return 'name' from the overridden object
return getattr(self._overridden,name)
def __repr__(self):
# As dict.__repr__, but indicate the overridden object
return dict.__repr__(self)+" overriding params from %s"%repr(self._overridden)
def __getattr__(self,name):
# Provide 'dot' access to entries in the dictionary.
# (This __getattr__ method is called only if 'name' isn't an
# attribute of self.)
return self.__getitem__(name)
def __setattr__(self,name,val):
# Attributes whose name starts with _ are set on self (as
# normal), but all other attributes are inserted into the
# dictionary.
if not name.startswith('_'):
self.__setitem__(name,val)
else:
dict.__setattr__(self,name,val)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
return key in self.__dict__ or key in self._overridden.param
def _check_params(self,params):
"""
Print a warning if params contains something that is not a
Parameter of the overridden object.
"""
overridden_object_params = list(self._overridden.param)
for item in params:
if item not in overridden_object_params:
self.param.warning("'%s' will be ignored (not a Parameter).",item)
def _extract_extra_keywords(self,params):
"""
Return any items in params that are not also
parameters of the overridden object.
"""
extra_keywords = {}
overridden_object_params = list(self._overridden.param)
for name, val in params.items():
if name not in overridden_object_params:
extra_keywords[name]=val
# CEBALERT: should we remove name from params
# (i.e. del params[name]) so that it's only available
# via extra_keywords()?
return extra_keywords
# Helper function required by ParameterizedFunction.__reduce__
def _new_parameterized(cls):
return Parameterized.__new__(cls)
class ParameterizedFunction(Parameterized):
"""
Acts like a Python function, but with arguments that are Parameters.
Implemented as a subclass of Parameterized that, when instantiated,
automatically invokes __call__ and returns the result, instead of
returning an instance of the class.
To obtain an instance of this class, call instance().
"""
__abstract = True
# CEBALERT: shouldn't this have come from a parent class
# somewhere?
def __str__(self):
return self.__class__.__name__+"()"
@bothmethod
def instance(self_or_cls,**params):
"""
Return an instance of this class, copying parameters from any
existing instance provided.
"""
if isinstance (self_or_cls,ParameterizedMetaclass):
cls = self_or_cls
else:
p = params
params = dict(self_or_cls.get_param_values())
params.update(p)
params.pop('name')
cls = self_or_cls.__class__
inst=Parameterized.__new__(cls)
Parameterized.__init__(inst,**params)
if 'name' in params: inst.__name__ = params['name']
else: inst.__name__ = self_or_cls.name
return inst
def __new__(class_,*args,**params):
# Create and __call__() an instance of this class.
inst = class_.instance()
inst.param._set_name(class_.__name__)
return inst.__call__(*args,**params)
def __call__(self,*args,**kw):
raise NotImplementedError("Subclasses must implement __call__.")
def __reduce__(self):
# Control reconstruction (during unpickling and copying):
# ensure that ParameterizedFunction.__new__ is skipped
state = ParameterizedFunction.__getstate__(self)
# CB: here it's necessary to use a function defined at the
# module level rather than Parameterized.__new__ directly
# because otherwise pickle will find .__new__'s module to be
# __main__. Pretty obscure aspect of pickle.py, or a bug?
return (_new_parameterized,(self.__class__,),state)
def script_repr(self,imports=[],prefix=" "):
"""
Same as Parameterized.script_repr, except that X.classname(Y
is replaced with X.classname.instance(Y
"""
return self.pprint(imports,prefix,unknown_value='',qualify=True,
separator="\n")
def pprint(self, imports=None, prefix="\n ",unknown_value='<?>',
qualify=False, separator=""):
"""
Same as Parameterized.pprint, except that X.classname(Y
is replaced with X.classname.instance(Y
"""
r = Parameterized.pprint(self,imports,prefix,
unknown_value=unknown_value,
qualify=qualify,separator=separator)
classname=self.__class__.__name__
return r.replace(".%s("%classname,".%s.instance("%classname)
class default_label_formatter(ParameterizedFunction):
"Default formatter to turn parameter names into appropriate widget labels."
capitalize = Parameter(default=True, doc="""
Whether or not the label should be capitalized.""")
replace_underscores = Parameter(default=True, doc="""
Whether or not underscores should be replaced with spaces.""")
overrides = Parameter(default={}, doc="""
Allows custom labels to be specified for specific parameter
names using a dictionary where key is the parameter name and the
value is the desired label.""")
def __call__(self, pname):
if pname in self.overrides:
return self.overrides[pname]
if self.replace_underscores:
pname = pname.replace('_',' ')
if self.capitalize:
pname = pname[:1].upper() + pname[1:]
return pname
label_formatter = default_label_formatter
# CBENHANCEMENT: should be able to remove overridable_property when we
# switch to Python 2.6:
# "Properties now have three attributes, getter, setter and deleter,
# that are decorators providing useful shortcuts for adding a getter,
# setter or deleter function to an existing property."
# http://docs.python.org/whatsnew/2.6.html
# Renamed & documented version of OProperty from
# infinitesque.net/articles/2005/enhancing%20Python's%20property.xhtml
class overridable_property(object):
"""
The same as Python's "property" attribute, but allows the accessor
methods to be overridden in subclasses.
"""
# Delays looking up the accessors until they're needed, rather
# than finding them when the class is first created.
# Based on the emulation of PyProperty_Type() in Objects/descrobject.c
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
self.__doc__ = doc
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
if self.fget.__name__ == '<lambda>' or not self.fget.__name__:
return self.fget(obj)
else:
return getattr(obj, self.fget.__name__)()
def __set__(self, obj, value):
if self.fset is None:
raise AttributeError("can't set attribute")
if self.fset.__name__ == '<lambda>' or not self.fset.__name__:
self.fset(obj, value)
else:
getattr(obj, self.fset.__name__)(value)
def __delete__(self, obj):
if self.fdel is None:
raise AttributeError("can't delete attribute")
if self.fdel.__name__ == '<lambda>' or not self.fdel.__name__:
self.fdel(obj)
else:
getattr(obj, self.fdel.__name__)()
|
{
"content_hash": "94b96423e19a4c36c252d6d1dae66b92",
"timestamp": "",
"source": "github",
"line_count": 2841,
"max_line_length": 131,
"avg_line_length": 37.54347060894052,
"alnum_prop": 0.6119012572542917,
"repo_name": "ioam/param",
"id": "d9abd0462787cf20feff1efaa1367d96dcc05f13",
"size": "106661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "param/parameterized.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "387425"
}
],
"symlink_target": ""
}
|
import webbrowser
import time
n_Break = 3
n_count = 1
print ("This program started on:"+ time.ctime())
#print ("Let's go to watch the movie!")
while (n_count<= n_Break):
time.sleep(2*60*60)
webbrowser.open("https://www.youtube.com/watch?v=10r9ozshGVE", new=0, autoraise=True)
n_count+=1
|
{
"content_hash": "0120ba13c64e498010cb113ddf540c15",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 89,
"avg_line_length": 25.083333333333332,
"alnum_prop": 0.6777408637873754,
"repo_name": "jungjung917/ProgramingFundation",
"id": "8ddec195d85d67d112cb6ad3998edc2396434d50",
"size": "301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "break_time/break_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "652"
}
],
"symlink_target": ""
}
|
from google.cloud import dialogflow_v2
async def sample_list_versions():
# Create a client
client = dialogflow_v2.VersionsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.ListVersionsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_versions(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dialogflow_generated_dialogflow_v2_Versions_ListVersions_async]
|
{
"content_hash": "4a699b2653da210c6833456b5e817e27",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 70,
"avg_line_length": 25.6,
"alnum_prop": 0.708984375,
"repo_name": "googleapis/python-dialogflow",
"id": "05bf63aaf06123446afe9b6ec9bc570bae078b70",
"size": "1522",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_generated_dialogflow_v2_versions_list_versions_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "11184005"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
import logging
from .sim_state import SimState
from .calling_conventions import DEFAULT_CC, SimRegArg, SimStackArg, PointerWrapper
from .callable import Callable
from .errors import AngrAssemblyError
l = logging.getLogger("angr.factory")
_deprecation_cache = set()
def deprecate(name, replacement):
def wrapper(func):
def inner(*args, **kwargs):
if name not in _deprecation_cache:
l.warning("factory.%s is deprecated! Please use factory.%s instead.", name, replacement)
_deprecation_cache.add(name)
return func(*args, **kwargs)
return inner
return wrapper
class AngrObjectFactory(object):
"""
This factory provides access to important analysis elements.
"""
def __init__(self, project):
self.project = project
self._default_cc = DEFAULT_CC[project.arch.name]
@property
def default_engine(self):
return self.project.engines.default_engine
@property
def procedure_engine(self):
return self.project.engines.procedure_engine
def snippet(self, addr, jumpkind=None, **block_opts):
if self.project.is_hooked(addr) and jumpkind != 'Ijk_NoHook':
hook = self.project._sim_procedures[addr]
size = hook.kwargs.get('length', 0)
return HookNode(addr, size, self.project.hooked_by(addr))
else:
return self.block(addr, **block_opts).codenode # pylint: disable=no-member
def successors(self, *args, **kwargs):
"""
Perform execution using any applicable engine. Enumerate the current engines and use the
first one that works. Return a SimSuccessors object classifying the results of the run.
:param state: The state to analyze
:param addr: optional, an address to execute at instead of the state's ip
:param jumpkind: optional, the jumpkind of the previous exit
:param inline: This is an inline execution. Do not bother copying the state.
Additional keyword arguments will be passed directly into each engine's process method.
"""
return self.project.engines.successors(*args, **kwargs)
def blank_state(self, **kwargs):
"""
Returns a mostly-uninitialized state object. All parameters are optional.
:param addr: The address the state should start at instead of the entry point.
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: A dictionary of file names with associated preset SimFile objects.
:param concrete_fs: bool describing whether the host filesystem should be consulted when opening files.
:param chroot: A path to use as a fake root directory, Behaves similarly to a real chroot. Used only
when concrete_fs is set to True.
:param kwargs: Any additional keyword args will be passed to the SimState constructor.
:return: The blank state.
:rtype: SimState
"""
return self.project.simos.state_blank(**kwargs)
def entry_state(self, **kwargs):
"""
Returns a state object representing the program at its entry point. All parameters are optional.
:param addr: The address the state should start at instead of the entry point.
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: a dictionary of file names with associated preset SimFile objects.
:param concrete_fs: boolean describing whether the host filesystem should be consulted when opening files.
:param chroot: a path to use as a fake root directory, behaves similar to a real chroot. used only when
concrete_fs is set to True.
:param argc: a custom value to use for the program's argc. May be either an int or a bitvector. If
not provided, defaults to the length of args.
:param args: a list of values to use as the program's argv. May be mixed strings and bitvectors.
:param env: a dictionary to use as the environment for the program. Both keys and values may be
mixed strings and bitvectors.
:return: The entry state.
:rtype: SimState
"""
return self.project.simos.state_entry(**kwargs)
def full_init_state(self, **kwargs):
"""
Very much like :meth:`entry_state()`, except that instead of starting execution at the program entry point,
execution begins at a special SimProcedure that plays the role of the dynamic loader, calling each of the
initializer functions that should be called before execution reaches the entry point.
:param addr: The address the state should start at instead of the entry point.
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: a dictionary of file names with associated preset SimFile objects.
:param concrete_fs: boolean describing whether the host filesystem should be consulted when opening files.
:param chroot: a path to use as a fake root directory, behaves similar to a real chroot. used only when
concrete_fs is set to True.
:param argc: a custom value to use for the program's argc. May be either an int or a bitvector. If
not provided, defaults to the length of args.
:param args: a list of values to use as arguments to the program. May be mixed strings and bitvectors.
:param env: a dictionary to use as the environment for the program. Both keys and values may be
mixed strings and bitvectors.
:return: The fully initialized state.
:rtype: SimState
"""
return self.project.simos.state_full_init(**kwargs)
def call_state(self, addr, *args, **kwargs):
"""
Returns a state object initialized to the start of a given function, as if it were called with given parameters.
:param addr: The address the state should start at instead of the entry point.
:param args: Any additional positional arguments will be used as arguments to the function call.
The following parametrs are optional.
:param base_state: Use this SimState as the base for the new state instead of a blank state.
:param cc: Optionally provide a SimCC object to use a specific calling convention.
:param ret_addr: Use this address as the function's return target.
:param stack_base: An optional pointer to use as the top of the stack, circa the function entry point
:param alloc_base: An optional pointer to use as the place to put excess argument data
:param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses
:param toc: The address of the table of contents for ppc64
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: A dictionary of file names with associated preset SimFile objects.
:param concrete_fs: bool describing whether the host filesystem should be consulted when opening files.
:param chroot: A path to use as a fake root directory, Behaves similarly to a real chroot. Used only
when concrete_fs is set to True.
:param kwargs: Any additional keyword args will be passed to the SimState constructor.
:return: The state at the beginning of the function.
:rtype: SimState
The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a
binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the
same type and size, while tuples (representing structs) can be elements of any type and size.
If you'd like there to be a pointer to a given value, wrap the value in a `SimCC.PointerWrapper`. Any value
that can't fit in a register will be automatically put in a
PointerWrapper.
If stack_base is not provided, the current stack pointer will be used, and it will be updated.
If alloc_base is not provided, the current stack pointer will be used, and it will be updated.
You might not like the results if you provide stack_base but not alloc_base.
grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped
in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you
set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequencial
allocations happen at increasing addresses.
"""
return self.project.simos.state_call(addr, *args, **kwargs)
def simgr(self, thing=None, **kwargs):
return self.simulation_manager(thing=thing, **kwargs)
def simulation_manager(self, thing=None, **kwargs):
"""
Constructs a new simulation manager.
:param thing: Optional - What to put in the new SimulationManager's active stash (either a SimState or a list of SimStates).
:param kwargs: Any additional keyword arguments will be passed to the SimulationManager constructor
:returns: The new SimulationManager
:rtype: angr.sim_manager.SimulationManager
Many different types can be passed to this method:
* If nothing is passed in, the SimulationManager is seeded with a state initialized for the program
entry point, i.e. :meth:`entry_state()`.
* If a :class:`SimState` is passed in, the SimulationManager is seeded with that state.
* If a list is passed in, the list must contain only SimStates and the whole list will be used to seed the SimulationManager.
"""
if thing is None:
thing = [ self.entry_state() ]
elif isinstance(thing, (list, tuple)):
if any(not isinstance(val, SimState) for val in thing):
raise AngrError("Bad type to initialize SimulationManager")
elif isinstance(thing, SimState):
thing = [ thing ]
else:
raise AngrError("BadType to initialze SimulationManager: %s" % repr(thing))
return SimulationManager(self.project, active_states=thing, **kwargs)
def callable(self, addr, concrete_only=False, perform_merge=True, base_state=None, toc=None, cc=None):
"""
A Callable is a representation of a function in the binary that can be interacted with like a native python
function.
:param addr: The address of the function to use
:param concrete_only: Throw an exception if the execution splits into multiple states
:param perform_merge: Merge all result states into one at the end (only relevant if concrete_only=False)
:param base_state: The state from which to do these runs
:param toc: The address of the table of contents for ppc64
:param cc: The SimCC to use for a calling convention
:returns: A Callable object that can be used as a interface for executing guest code like a
python function.
:rtype: angr.surveyors.caller.Callable
"""
return Callable(self.project,
addr=addr,
concrete_only=concrete_only,
perform_merge=perform_merge,
base_state=base_state,
toc=toc,
cc=cc)
def cc(self, args=None, ret_val=None, sp_delta=None, func_ty=None):
"""
Return a SimCC (calling convention) parametrized for this project and, optionally, a given function.
:param args: A list of argument storage locations, as SimFunctionArguments.
:param ret_val: The return value storage location, as a SimFunctionArgument.
:param sp_delta: Does this even matter??
:param func_ty: The protoype for the given function, as a SimType.
Relevant subclasses of SimFunctionArgument are SimRegArg and SimStackArg, and shortcuts to them can be found on
this `cc` object.
For stack arguments, offsets are relative to the stack pointer on function entry.
"""
return self._default_cc(arch=self.project.arch,
args=args,
ret_val=ret_val,
sp_delta=sp_delta,
func_ty=func_ty)
def cc_from_arg_kinds(self, fp_args, ret_fp=None, sizes=None, sp_delta=None, func_ty=None):
"""
Get a SimCC (calling convention) that will extract floating-point/integral args correctly.
:param arch: The Archinfo arch for this CC
:param fp_args: A list, with one entry for each argument the function can take. True if the argument is fp,
false if it is integral.
:param ret_fp: True if the return value for the function is fp.
:param sizes: Optional: A list, with one entry for each argument the function can take. Each entry is the
size of the corresponding argument in bytes.
:param sp_delta: The amount the stack pointer changes over the course of this function - CURRENTLY UNUSED
:parmm func_ty: A SimType for the function itself
"""
return self._default_cc.from_arg_kinds(arch=self.project.arch,
fp_args=fp_args,
ret_fp=ret_fp,
sizes=sizes,
sp_delta=sp_delta,
func_ty=func_ty)
def block(self, addr, size=None, max_size=None, byte_string=None, vex=None, thumb=False, backup_state=None,
opt_level=None, num_inst=None, traceflags=0,
insn_bytes=None, insn_text=None, # backward compatibility
strict_block_end=None,
):
if insn_bytes is not None and insn_text is not None:
raise AngrError("You cannot provide both 'insn_bytes' and 'insn_text'!")
if insn_bytes is not None:
byte_string = insn_bytes
if insn_text is not None:
byte_string = self.project.arch.asm(insn_text, addr=addr, as_bytes=True, thumb=thumb)
if byte_string is None:
# assembly failed
raise AngrAssemblyError("Assembling failed. Please make sure keystone is installed, and the assembly"
" string is correct.")
if max_size is not None:
l.warning('Keyword argument "max_size" has been deprecated for block(). Please use "size" instead.')
size = max_size
return Block(addr, project=self.project, size=size, byte_string=byte_string, vex=vex, thumb=thumb,
backup_state=backup_state, opt_level=opt_level, num_inst=num_inst, traceflags=traceflags,
strict_block_end=strict_block_end
)
def fresh_block(self, addr, size, backup_state=None):
return Block(addr, project=self.project, size=size, backup_state=backup_state)
cc.SimRegArg = SimRegArg
cc.SimStackArg = SimStackArg
_default_cc = None
callable.PointerWrapper = PointerWrapper
call_state.PointerWrapper = PointerWrapper
#
# Private methods
#
@deprecate('sim_run()', 'successors()')
def sim_run(self, *args, **kwargs):
return self.successors(*args, **kwargs)
@deprecate('sim_block()', 'successors(default_engine=True)')
def sim_block(self, *args, **kwargs):
kwargs['default_engine'] = True
return self.successors(*args, **kwargs)
#
# Compatibility layer
#
@deprecate('path_group()', 'simulation_manager()')
def path_group(self, thing=None, **kwargs):
return self.simgr(thing, **kwargs)
@deprecate('path()', 'entry_state()')
def path(self, state=None, **kwargs):
if state is not None:
return state
return self.entry_state(**kwargs)
from .errors import AngrError
from .sim_manager import SimulationManager
from .codenode import HookNode
from .block import Block
|
{
"content_hash": "56e4165714e98030b95b3324860e5a29",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 142,
"avg_line_length": 52.09036144578313,
"alnum_prop": 0.6250722794032613,
"repo_name": "tyb0807/angr",
"id": "f07ed2709437f2fdc645a3d662775c9795243c98",
"size": "17294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/factory.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39875"
},
{
"name": "Makefile",
"bytes": "610"
},
{
"name": "Python",
"bytes": "3884780"
}
],
"symlink_target": ""
}
|
"""
sphinx.builders.qthelp
~~~~~~~~~~~~~~~~~~~~~~
Build input files for the Qt collection generator.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import codecs
import posixpath
from os import path
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util import force_decode
from sphinx.util.pycompat import htmlescape
_idpattern = re.compile(
r'(?P<title>.+) (\((class in )?(?P<id>[\w\.]+)( (?P<descr>\w+))?\))$')
# Qt Help Collection Project (.qhcp).
# Is the input file for the help collection generator.
# It contains references to compressed help files which should be
# included in the collection.
# It may contain various other information for customizing Qt Assistant.
collection_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QHelpCollectionProject version="1.0">
<assistant>
<title>%(title)s</title>
<homePage>%(homepage)s</homePage>
<startPage>%(startpage)s</startPage>
</assistant>
<docFiles>
<generate>
<file>
<input>%(outname)s.qhp</input>
<output>%(outname)s.qch</output>
</file>
</generate>
<register>
<file>%(outname)s.qch</file>
</register>
</docFiles>
</QHelpCollectionProject>
'''
# Qt Help Project (.qhp)
# This is the input file for the help generator.
# It contains the table of contents, indices and references to the
# actual documentation files (*.html).
# In addition it defines a unique namespace for the documentation.
project_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QtHelpProject version="1.0">
<namespace>%(namespace)s</namespace>
<virtualFolder>doc</virtualFolder>
<customFilter name="%(project)s %(version)s">
<filterAttribute>%(outname)s</filterAttribute>
<filterAttribute>%(version)s</filterAttribute>
</customFilter>
<filterSection>
<filterAttribute>%(outname)s</filterAttribute>
<filterAttribute>%(version)s</filterAttribute>
<toc>
<section title="%(title)s" ref="%(masterdoc)s.html">
%(sections)s
</section>
</toc>
<keywords>
%(keywords)s
</keywords>
<files>
%(files)s
</files>
</filterSection>
</QtHelpProject>
'''
section_template = '<section title="%(title)s" ref="%(ref)s"/>'
file_template = ' '*12 + '<file>%(filename)s</file>'
class QtHelpBuilder(StandaloneHTMLBuilder):
"""
Builder that also outputs Qt help project, contents and index files.
"""
name = 'qthelp'
# don't copy the reST source
copysource = False
supported_image_types = ['image/svg+xml', 'image/png', 'image/gif',
'image/jpeg']
# don't add links
add_permalinks = False
# don't add sidebar etc.
embedded = True
def init(self):
StandaloneHTMLBuilder.init(self)
# the output files for HTML help must be .html only
self.out_suffix = '.html'
#self.config.html_style = 'traditional.css'
def handle_finish(self):
self.build_qhp(self.outdir, self.config.qthelp_basename)
def build_qhp(self, outdir, outname):
self.info('writing project file...')
# sections
tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self,
prune_toctrees=False)
istoctree = lambda node: (
isinstance(node, addnodes.compact_paragraph)
and node.has_key('toctree'))
sections = []
for node in tocdoc.traverse(istoctree):
sections.extend(self.write_toc(node))
for indexname, indexcls, content, collapse in self.domain_indices:
item = section_template % {'title': indexcls.localname,
'ref': '%s.html' % indexname}
sections.append(' ' * 4 * 4 + item)
# sections may be unicode strings or byte strings, we have to make sure
# they are all unicode strings before joining them
new_sections = []
for section in sections:
if not isinstance(section, unicode):
new_sections.append(force_decode(section, None))
else:
new_sections.append(section)
sections = u'\n'.join(new_sections)
# keywords
keywords = []
index = self.env.create_index(self, group_entries=False)
for (key, group) in index:
for title, (refs, subitems) in group:
keywords.extend(self.build_keywords(title, refs, subitems))
keywords = u'\n'.join(keywords)
# files
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
projectfiles = []
staticdir = path.join(outdir, '_static')
imagesdir = path.join(outdir, '_images')
for root, dirs, files in os.walk(outdir):
resourcedir = root.startswith(staticdir) or \
root.startswith(imagesdir)
for fn in files:
if (resourcedir and not fn.endswith('.js')) or \
fn.endswith('.html'):
filename = path.join(root, fn)[olen:]
projectfiles.append(file_template %
{'filename': htmlescape(filename)})
projectfiles = '\n'.join(projectfiles)
# it seems that the "namespace" may not contain non-alphanumeric
# characters, and more than one successive dot, or leading/trailing
# dots, are also forbidden
nspace = 'org.sphinx.%s.%s' % (outname, self.config.version)
nspace = re.sub('[^a-zA-Z0-9.]', '', nspace)
nspace = re.sub(r'\.+', '.', nspace).strip('.')
nspace = nspace.lower()
# write the project file
f = codecs.open(path.join(outdir, outname+'.qhp'), 'w', 'utf-8')
try:
f.write(project_template % {
'outname': htmlescape(outname),
'title': htmlescape(self.config.html_title),
'version': htmlescape(self.config.version),
'project': htmlescape(self.config.project),
'namespace': htmlescape(nspace),
'masterdoc': htmlescape(self.config.master_doc),
'sections': sections,
'keywords': keywords,
'files': projectfiles})
finally:
f.close()
homepage = 'qthelp://' + posixpath.join(
nspace, 'doc', self.get_target_uri(self.config.master_doc))
startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html')
self.info('writing collection project file...')
f = codecs.open(path.join(outdir, outname+'.qhcp'), 'w', 'utf-8')
try:
f.write(collection_template % {
'outname': htmlescape(outname),
'title': htmlescape(self.config.html_short_title),
'homepage': htmlescape(homepage),
'startpage': htmlescape(startpage)})
finally:
f.close()
def isdocnode(self, node):
if not isinstance(node, nodes.list_item):
return False
if len(node.children) != 2:
return False
if not isinstance(node.children[0], addnodes.compact_paragraph):
return False
if not isinstance(node.children[0][0], nodes.reference):
return False
if not isinstance(node.children[1], nodes.bullet_list):
return False
return True
def write_toc(self, node, indentlevel=4):
# XXX this should return a Unicode string, not a bytestring
parts = []
if self.isdocnode(node):
refnode = node.children[0][0]
link = refnode['refuri']
title = htmlescape(refnode.astext()).replace('"', '"')
item = '<section title="%(title)s" ref="%(ref)s">' % \
{'title': title, 'ref': link}
parts.append(' '*4*indentlevel + item)
for subnode in node.children[1]:
parts.extend(self.write_toc(subnode, indentlevel+1))
parts.append(' '*4*indentlevel + '</section>')
elif isinstance(node, nodes.list_item):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
elif isinstance(node, nodes.reference):
link = node['refuri']
title = htmlescape(node.astext()).replace('"','"')
item = section_template % {'title': title, 'ref': link}
item = u' ' * 4 * indentlevel + item
parts.append(item.encode('ascii', 'xmlcharrefreplace'))
elif isinstance(node, nodes.bullet_list):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
elif isinstance(node, addnodes.compact_paragraph):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
return parts
def keyword_item(self, name, ref):
matchobj = _idpattern.match(name)
if matchobj:
groupdict = matchobj.groupdict()
shortname = groupdict['title']
id = groupdict.get('id')
#descr = groupdict.get('descr')
if shortname.endswith('()'):
shortname = shortname[:-2]
id = '%s.%s' % (id, shortname)
else:
id = None
if id:
item = ' '*12 + '<keyword name="%s" id="%s" ref="%s"/>' % (
name, id, ref[1])
else:
item = ' '*12 + '<keyword name="%s" ref="%s"/>' % (name, ref[1])
item.encode('ascii', 'xmlcharrefreplace')
return item
def build_keywords(self, title, refs, subitems):
keywords = []
title = htmlescape(title)
# if len(refs) == 0: # XXX
# write_param('See Also', title)
if len(refs) == 1:
keywords.append(self.keyword_item(title, refs[0]))
elif len(refs) > 1:
for i, ref in enumerate(refs): # XXX
# item = (' '*12 +
# '<keyword name="%s [%d]" ref="%s"/>' % (
# title, i, ref))
# item.encode('ascii', 'xmlcharrefreplace')
# keywords.append(item)
keywords.append(self.keyword_item(title, ref))
if subitems:
for subitem in subitems:
keywords.extend(self.build_keywords(subitem[0], subitem[1], []))
return keywords
|
{
"content_hash": "8dfe4adbe7bc8e9048be66a6f519a79e",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 80,
"avg_line_length": 36.59322033898305,
"alnum_prop": 0.558406669754516,
"repo_name": "SurfasJones/icecream-info",
"id": "ce07315dbd29db917d2cc2a18fc35940ea5857c5",
"size": "10819",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "icecream/lib/python2.7/site-packages/sphinx/builders/qthelp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "288937"
},
{
"name": "JavaScript",
"bytes": "589933"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "18137514"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "10274"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
import time
import datetime
from app import models
import config
from sqlalchemy.sql import func
def get_columns_model_record():
columnCollection = models.CdrRecord.__table__.columns
columns = []
for c in columnCollection._all_columns:
column = {
'key': c.key,
'info_name': None if 'name' not in c.info else c.info['name'],
'type': str(c.type)
}
columns.append(column)
return columns
class UCdrRecordFilter(object):
def __init__(self):
self.__filter_coll = None
self.__columns = None
self.__query = None
self.__result = None
def __reInit(self):
self.__result = None
self.__columns = get_columns_model_record()
self.__query = models.db.session.query(models.CdrRecord)
def build(self, filters_collection):
self.__reInit()
for f in filters_collection:
cname = f['f']
cond = f['c']
val = f['v']
t = self.__get_column_type(cname)
if cond == '<':
# ---- begin COND : <
if t in ('BIGINT', 'INTEGER',):
self.__query = self.__query.filter(getattr(models.CdrRecord, cname) < int(val))
elif t in ('DATETIME',):
dt_val = datetime.datetime.fromtimestamp(time.mktime(
time.strptime(val, config.FORMAT_DATETIME_FILTER)
))
self.__query = self.__query.filter(getattr(models.CdrRecord, cname) < dt_val)
pass
# ---- end COND : <
elif cond == '>':
# ---- begin COND : >
if t in ('BIGINT', 'INTEGER',):
self.__query = self.__query.filter(getattr(models.CdrRecord, cname) > int(val))
elif t in ('DATETIME',):
dt_val = datetime.datetime.fromtimestamp(time.mktime(
time.strptime(val, config.FORMAT_DATETIME_FILTER)
))
self.__query = self.__query.filter(getattr(models.CdrRecord, cname) > dt_val)
pass
# ---- end COND : <
elif cond == '=':
# ---- begin COND : =
if t in ('BIGINT', 'INTEGER',):
self.__query = self.__query.filter(getattr(models.CdrRecord, cname) == int(val))
elif t in ('VARCHAR(255)',):
self.__query = self.__query.filter(getattr(models.CdrRecord, cname) == val)
pass
# ---- end COND : =
elif cond == '!=':
# ---- begin COND : !=
if t in ('BIGINT', 'INTEGER',):
self.__query = self.__query.filter(getattr(models.CdrRecord, cname) != int(val))
pass
# ---- end COND : !=
elif cond == '<=':
# ---- begin COND : <=
if t in ('BIGINT', 'INTEGER',):
self.__query = self.__query.filter(getattr(models.CdrRecord, cname) <= int(val))
pass
# ---- end COND : <=
elif cond == '>=':
# ---- begin COND : >=
if t in ('BIGINT', 'INTEGER',):
self.__query = self.__query.filter(getattr(models.CdrRecord, cname) >= int(val))
pass
# ---- end COND : >=
elif cond == '%':
# ---- begin COND : %
if len(val) >= 2:
if val[:1] == '%':
val = '%' + val
elif val[-1:] == '%':
val += '%'
self.__query = self.__query.filter(getattr(models.CdrRecord, cname).like(val))
pass
# ---- end COND : %
elif cond == 'not(%)':
# ---- begin COND : not(%)
if len(val) >= 2:
if val[:1] == '%':
val = '%' + val
elif val[-1:] == '%':
val += '%'
self.__query = self.__query.filter(getattr(models.CdrRecord, cname).notlike(val))
pass
# ---- end COND : not(%)
elif cond == '= len(x)':
# ---- begin COND : = len(x)
self.__query = self.__query.filter(func.length(getattr(models.CdrRecord, cname)) == int(val))
pass
# ---- end COND : = len(x)
elif cond == '!= len(x)':
# ---- begin COND : = != len(x)
self.__query = self.__query.filter(func.length(getattr(models.CdrRecord, cname)) != int(val))
pass
# ---- end COND : = != len(x)
elif cond == '> len(x)':
# ---- begin COND : = > len(x)
self.__query = self.__query.filter(func.length(getattr(models.CdrRecord, cname)) > int(val))
pass
# ---- end COND : = > len(x)
elif cond == '< len(x)':
# ---- begin COND : = < len(x)
self.__query = self.__query.filter(func.length(getattr(models.CdrRecord, cname)) < int(val))
pass
# ---- end COND : = < len(x)
self.__result = self.__query \
.order_by(models.CdrRecord.unix_time.desc()) \
.limit(config.VIEW_LIMIT_VISIBLE_RECORDS) \
.all()
return self.__result
def __get_column_type(self, col_name):
for c in self.__columns:
if c['key'] == col_name:
return c['type']
|
{
"content_hash": "bf1dc2bebdfc1d04bc6f9513afd5926d",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 109,
"avg_line_length": 34.19760479041916,
"alnum_prop": 0.42707056557520573,
"repo_name": "m2xim/collogisco",
"id": "aaaa5de92f1e383caef83c088b7c295379999b54",
"size": "5711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/utils_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3600"
},
{
"name": "HTML",
"bytes": "6706"
},
{
"name": "JavaScript",
"bytes": "22900"
},
{
"name": "Python",
"bytes": "17176"
}
],
"symlink_target": ""
}
|
from nose.tools import *
import unittest
import shoppingtrends.receipt as receipt
from shoppingtrends.receipt import Receipt, Item
from shoppingtrends.localization import Province, Store
class test_Item(unittest.TestCase):
def setUp(self):
self.bananas = receipt.Item('Bananas', 0.79, 1.8)
self.pears = receipt.Item('Pears', 1.49, 4)
self.napkins = receipt.Item('Napkins', 2.0, 1, True)
def tearDown(self):
self.bananas = None
self.pears = None
self.napkins = None
def test_Item_total_cost(self):
#Item without tax, but tax still has to be passed
self.assertAlmostEqual(self.bananas.total_cost(13.0),1.422)
#Item with tax
self.assertAlmostEqual(self.napkins.total_cost(13.0), 2.26)
class test_Receipt(unittest.TestCase):
def setUp(self):
self.quebec = Province("Quebec", "QC", 13)
self.loblaws = Store("Loblaws", self.quebec)
self.loblaws_receipt = Receipt(self.loblaws)
self.bananas = Item('Bananas', 0.79, 1.8)
self.napkins = Item('Napkins', 2.0, 1, True)
def tearDown(self):
self.quebec = None
self.loblaws = None
self.loblaws_receipt = None
self.bananas = None
self.pears = None
self.napkins = None
def test_Receipt_add_remove_item(self):
assert not self.loblaws_receipt.items
self.loblaws_receipt.add_item(self.bananas)
assert self.loblaws_receipt.items
self.loblaws_receipt.remove_item_by_name(self.bananas.name)
assert not self.loblaws_receipt.items
def test_Receipt_total(self):
self.assertAlmostEqual(self.loblaws_receipt.total(),0.0)
self.loblaws_receipt.add_item(self.bananas)
self.assertAlmostEqual(self.loblaws_receipt.total(),1.422)
self.loblaws_receipt.add_item(self.napkins)
self.assertAlmostEqual(self.loblaws_receipt.total(),3.682)
self.loblaws_receipt.remove_item_by_name(self.bananas.name)
self.assertAlmostEqual(self.loblaws_receipt.total(),2.26)
|
{
"content_hash": "3e571a748ff48d619015ec0ca73300a2",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 67,
"avg_line_length": 33.33846153846154,
"alnum_prop": 0.6340562990309183,
"repo_name": "jhooey/shopping-cart-trends",
"id": "70cefe902f01dc6345a58cfb431018dce02ebb6c",
"size": "2167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/receipt_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "48854"
}
],
"symlink_target": ""
}
|
import os
import shutil
import tempfile
import unittest
from trac.attachment import Attachment
from trac.mimeview.api import Context
from trac.resource import Resource
from trac.search.web_ui import SearchModule
from trac.test import MockPerm
from trac.web.href import Href
from trac.wiki.tests import formatter
SEARCH_TEST_CASES = """
============================== search: link resolver
search:foo
search:"foo bar"
[search:bar Bar]
[search:bar]
[search:]
------------------------------
<p>
<a class="search" href="/search?q=foo">search:foo</a>
<a class="search" href="/search?q=foo+bar">search:"foo bar"</a>
<a class="search" href="/search?q=bar">Bar</a>
<a class="search" href="/search?q=bar">bar</a>
<a class="search" href="/search?q=">search</a>
</p>
------------------------------
============================== search: link resolver with query arguments
search:?q=foo&wiki=on
search:"?q=foo bar&wiki=on"
[search:?q=bar&ticket=on Bar in Tickets]
------------------------------
<p>
<a class="search" href="/search?q=foo&wiki=on">search:?q=foo&wiki=on</a>
<a class="search" href="/search?q=foo+bar&wiki=on">search:"?q=foo bar&wiki=on"</a>
<a class="search" href="/search?q=bar&ticket=on">Bar in Tickets</a>
</p>
------------------------------
"""
ATTACHMENT_TEST_CASES = """
============================== attachment: link resolver (deprecated)
attachment:wiki:WikiStart:file.txt (deprecated)
attachment:ticket:123:file.txt (deprecated)
[attachment:wiki:WikiStart:file.txt file.txt] (deprecated)
[attachment:ticket:123:file.txt] (deprecated)
------------------------------
<p>
<a class="attachment" href="/attachment/wiki/WikiStart/file.txt" title="Attachment 'file.txt' in WikiStart">attachment:wiki:WikiStart:file.txt</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/wiki/WikiStart/file.txt" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span> (deprecated)
<a class="attachment" href="/attachment/ticket/123/file.txt" title="Attachment 'file.txt' in Ticket #123">attachment:ticket:123:file.txt</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/ticket/123/file.txt" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span> (deprecated)
<a class="attachment" href="/attachment/wiki/WikiStart/file.txt" title="Attachment 'file.txt' in WikiStart">file.txt</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/wiki/WikiStart/file.txt" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span> (deprecated)
<a class="attachment" href="/attachment/ticket/123/file.txt" title="Attachment 'file.txt' in Ticket #123">ticket:123:file.txt</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/ticket/123/file.txt" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span> (deprecated)
</p>
------------------------------
============================== attachment: "foreign" links
attachment:file.txt:wiki:WikiStart
attachment:file.txt:ticket:123
[attachment:file.txt:wiki:WikiStart file.txt]
[attachment:file.txt:ticket:123]
attachment:foo.txt:wiki:SomePage/SubPage
------------------------------
<p>
<a class="attachment" href="/attachment/wiki/WikiStart/file.txt" title="Attachment 'file.txt' in WikiStart">attachment:file.txt:wiki:WikiStart</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/wiki/WikiStart/file.txt" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span>
<a class="attachment" href="/attachment/ticket/123/file.txt" title="Attachment 'file.txt' in Ticket #123">attachment:file.txt:ticket:123</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/ticket/123/file.txt" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span>
<a class="attachment" href="/attachment/wiki/WikiStart/file.txt" title="Attachment 'file.txt' in WikiStart">file.txt</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/wiki/WikiStart/file.txt" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span>
<a class="attachment" href="/attachment/ticket/123/file.txt" title="Attachment 'file.txt' in Ticket #123">file.txt:ticket:123</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/ticket/123/file.txt" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span>
<a class="attachment" href="/attachment/wiki/SomePage/SubPage/foo.txt" title="Attachment 'foo.txt' in SomePage/SubPage">attachment:foo.txt:wiki:SomePage/SubPage</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/wiki/SomePage/SubPage/foo.txt" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span>
</p>
------------------------------
============================== attachment: "local" links
attachment:file.txt
[attachment:file.txt that file]
------------------------------
<p>
<a class="attachment" href="/attachment/wiki/WikiStart/file.txt" title="Attachment 'file.txt' in WikiStart">attachment:file.txt</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/wiki/WikiStart/file.txt" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span>
<a class="attachment" href="/attachment/wiki/WikiStart/file.txt" title="Attachment 'file.txt' in WikiStart">that file</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/wiki/WikiStart/file.txt" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span>
</p>
------------------------------
============================== attachment: "missing" links
attachment:foo.txt
[attachment:foo.txt other file]
------------------------------
<p>
<a class="missing attachment">attachment:foo.txt</a>
<a class="missing attachment">other file</a>
</p>
------------------------------
============================== attachment: "raw" links
raw-attachment:file.txt
[raw-attachment:file.txt that file]
------------------------------
<p>
<a class="attachment" href="/raw-attachment/wiki/WikiStart/file.txt" title="Attachment 'file.txt' in WikiStart">raw-attachment:file.txt</a>
<a class="attachment" href="/raw-attachment/wiki/WikiStart/file.txt" title="Attachment 'file.txt' in WikiStart">that file</a>
</p>
------------------------------
============================== attachment: raw format as explicit argument
attachment:file.txt?format=raw
[attachment:file.txt?format=raw that file]
------------------------------
<p>
<a class="attachment" href="/attachment/wiki/WikiStart/file.txt?format=raw" title="Attachment 'file.txt' in WikiStart">attachment:file.txt?format=raw</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/wiki/WikiStart/file.txt?format=raw" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span>
<a class="attachment" href="/attachment/wiki/WikiStart/file.txt?format=raw" title="Attachment 'file.txt' in WikiStart">that file</a><span class="noprint"> <a class="trac-rawlink" href="/raw-attachment/wiki/WikiStart/file.txt?format=raw" title="Download"><img src="/chrome/common/download.png" alt="Download"/></a></span>
</p>
------------------------------
""" # "
def attachment_setup(tc):
import trac.ticket.api
import trac.wiki.api
tc.env.path = os.path.join(tempfile.gettempdir(), 'trac-tempenv')
os.mkdir(tc.env.path)
attachment = Attachment(tc.env, 'wiki', 'WikiStart')
attachment.insert('file.txt', tempfile.TemporaryFile(), 0)
attachment = Attachment(tc.env, 'ticket', 123)
attachment.insert('file.txt', tempfile.TemporaryFile(), 0)
attachment = Attachment(tc.env, 'wiki', 'SomePage/SubPage')
attachment.insert('foo.txt', tempfile.TemporaryFile(), 0)
def attachment_teardown(tc):
shutil.rmtree(tc.env.path)
tc.env.reset_db()
EMAIL_TEST_CASE_DEFAULT = u"""
============================== mailto: obfuscated by default, like plain email
user@example.org vs. mailto:user@example.org
and [mailto:user@example.org Joe User]
------------------------------
<p>
user@\u2026 vs. mailto:user@\u2026
and Joe User
</p>
------------------------------
"""
def email_default_context():
class NoEmailViewPerm(MockPerm):
def has_permission(self, action, realm_or_resource=None, id=False,
version=False):
return action != 'EMAIL_VIEW'
__contains__ = has_permission
context = Context(Resource('wiki', 'WikiStart'), href=Href('/'),
perm=NoEmailViewPerm())
context.req = None # 0.12 FIXME .req shouldn't be required by formatter
return context
EMAIL_TEST_CASE_NEVER_OBFUSCATE = u"""
============================== mailto: not obfuscated, unlike plain email
user@example.org vs. mailto:user@example.org
and [mailto:user@example.org Joe User]
------------------------------
<p>
user@\u2026 vs. <a class="mail-link" href="mailto:user@example.org"><span class="icon">\xa0</span>mailto:user@example.org</a>
and <a class="mail-link" href="mailto:user@example.org"><span class="icon">\xa0</span>Joe User</a>
</p>
------------------------------
"""
def email_never_obfuscate_setup(tc):
tc.env.config.set('trac', 'never_obfuscate_mailto', True)
def suite():
suite = unittest.TestSuite()
suite.addTest(formatter.suite(SEARCH_TEST_CASES, file=__file__))
suite.addTest(formatter.suite(ATTACHMENT_TEST_CASES, file=__file__,
context=('wiki', 'WikiStart'),
setup=attachment_setup,
teardown=attachment_teardown))
suite.addTest(formatter.suite(EMAIL_TEST_CASE_DEFAULT, file=__file__,
context=email_default_context()))
suite.addTest(formatter.suite(EMAIL_TEST_CASE_NEVER_OBFUSCATE,
file=__file__,
context=email_default_context(),
setup=email_never_obfuscate_setup))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
{
"content_hash": "23acb2582f29b3b617b53082290940ee",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 347,
"avg_line_length": 54.68108108108108,
"alnum_prop": 0.6416567813364966,
"repo_name": "dokipen/trac",
"id": "e176a7554c3169a74c4949bbf965e8290286a71f",
"size": "10116",
"binary": false,
"copies": "2",
"ref": "refs/heads/announcer",
"path": "trac/tests/wikisyntax.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "11612"
},
{
"name": "JavaScript",
"bytes": "45742"
},
{
"name": "Python",
"bytes": "2183584"
}
],
"symlink_target": ""
}
|
from pymongo import MongoClient
MONGO_DB_HOST = 'localhost'
MONGO_DB_PORT = '27017'
DB_NAME = 'test'
USER_NAME = "tester"
PWD = "099484"
client = MongoClient('%s:%s' % (MONGO_DB_HOST, MONGO_DB_PORT))
mongodb = client[DB_NAME]
def getDB(db= DB_NAME):
mongodb.authenticate(name=USER_NAME, password=PWD)
return mongodb
|
{
"content_hash": "4f5dc51fe49190cb9ccbca871d490a76",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 21.866666666666667,
"alnum_prop": 0.6951219512195121,
"repo_name": "stevensshi/smart-realestate",
"id": "f95f6fc6fb53a7a8fcde3f4787bc51816d738bd0",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/mongodb_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18929"
},
{
"name": "Shell",
"bytes": "392"
}
],
"symlink_target": ""
}
|
from django import forms
from .models import Report
class ReportForm(forms.ModelForm):
class Meta:
model = Report
exclude = ('geometry',)
widgets = {
'location': forms.TextInput(attrs={'style': 'width:88%'}),
'description': forms.Textarea(attrs={'rows': '7'}),
'contact_info': forms.Textarea(attrs={'rows': '7'})
}
|
{
"content_hash": "74d77ac76f50453d550a85c5adf6b678",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 70,
"avg_line_length": 27.857142857142858,
"alnum_prop": 0.5666666666666667,
"repo_name": "codeforamerica/straymapper",
"id": "2d334a4a15cee14bed051045aaccfc3376f7ec4d",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reports/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "23508"
},
{
"name": "JavaScript",
"bytes": "601987"
},
{
"name": "Python",
"bytes": "28581"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import User
from junction.base.models import AuditModel
from django.db import models
class Profile(AuditModel):
'''
It stores the City/Phone Details of the User.
'''
user = models.OneToOneField(User)
city = models.CharField(max_length=100, blank=True, null=True)
contact_no = models.CharField(max_length=15, blank=True, null=True)
def __unicode__(self):
return self.user.username
|
{
"content_hash": "440a2bb92e129a3a6b5cce404deb3a68",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 71,
"avg_line_length": 30.066666666666666,
"alnum_prop": 0.7095343680709535,
"repo_name": "ChillarAnand/junction",
"id": "db5e4198901c61646a67474cc00ec5507b24f6b0",
"size": "451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "junction/profiles/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190835"
},
{
"name": "HTML",
"bytes": "159139"
},
{
"name": "JavaScript",
"bytes": "48999"
},
{
"name": "Python",
"bytes": "314195"
},
{
"name": "Shell",
"bytes": "599"
}
],
"symlink_target": ""
}
|
import unittest
from unittest import mock
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.operators.spanner import (
SpannerDeleteDatabaseInstanceOperator,
SpannerDeleteInstanceOperator,
SpannerDeployDatabaseInstanceOperator,
SpannerDeployInstanceOperator,
SpannerQueryDatabaseInstanceOperator,
SpannerUpdateDatabaseInstanceOperator,
)
PROJECT_ID = 'project-id'
INSTANCE_ID = 'instance-id'
DB_ID = 'db1'
CONFIG_NAME = 'projects/project-id/instanceConfigs/eur3'
NODE_COUNT = '1'
DISPLAY_NAME = 'Test Instance'
INSERT_QUERY = "INSERT my_table1 (id, name) VALUES (1, 'One')"
INSERT_QUERY_2 = "INSERT my_table2 (id, name) VALUES (1, 'One')"
CREATE_QUERY = "CREATE TABLE my_table1 (id INT64, name STRING(100))"
CREATE_QUERY_2 = "CREATE TABLE my_table2 (id INT64, name STRING(100))"
DDL_STATEMENTS = [CREATE_QUERY, CREATE_QUERY_2]
class TestCloudSpanner(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_create(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = SpannerDeployInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id",
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.create_instance.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
)
mock_hook.return_value.update_instance.assert_not_called()
assert result is None
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_create_missing_project_id(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = SpannerDeployInstanceOperator(
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id",
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.create_instance.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
)
mock_hook.return_value.update_instance.assert_not_called()
assert result is None
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_update(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = SpannerDeployInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id",
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.update_instance.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
)
mock_hook.return_value.create_instance.assert_not_called()
assert result is None
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_update_missing_project_id(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = SpannerDeployInstanceOperator(
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id",
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.update_instance.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
)
mock_hook.return_value.create_instance.assert_not_called()
assert result is None
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_create_aborts_and_succeeds_if_instance_exists(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = SpannerDeployInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id",
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.create_instance.assert_not_called()
assert result is None
@parameterized.expand(
[
("", INSTANCE_ID, "project_id"),
(PROJECT_ID, "", "instance_id"),
]
)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_create_ex_if_param_missing(self, project_id, instance_id, exp_msg, mock_hook):
with pytest.raises(AirflowException) as ctx:
SpannerDeployInstanceOperator(
project_id=project_id,
instance_id=instance_id,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id",
)
err = ctx.value
assert f"The required parameter '{exp_msg}' is empty" in str(err)
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_delete(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = SpannerDeleteInstanceOperator(project_id=PROJECT_ID, instance_id=INSTANCE_ID, task_id="id")
result = op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID
)
assert result
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_delete_missing_project_id(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = SpannerDeleteInstanceOperator(instance_id=INSTANCE_ID, task_id="id")
result = op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID
)
assert result
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_delete_aborts_and_succeeds_if_instance_does_not_exist(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = SpannerDeleteInstanceOperator(project_id=PROJECT_ID, instance_id=INSTANCE_ID, task_id="id")
result = op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.delete_instance.assert_not_called()
assert result
@parameterized.expand(
[
("", INSTANCE_ID, "project_id"),
(PROJECT_ID, "", "instance_id"),
]
)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_delete_ex_if_param_missing(self, project_id, instance_id, exp_msg, mock_hook):
with pytest.raises(AirflowException) as ctx:
SpannerDeleteInstanceOperator(project_id=project_id, instance_id=instance_id, task_id="id")
err = ctx.value
assert f"The required parameter '{exp_msg}' is empty" in str(err)
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_query(self, mock_hook):
mock_hook.return_value.execute_sql.return_value = None
op = SpannerQueryDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
query=INSERT_QUERY,
task_id="id",
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.execute_dml.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID, queries=[INSERT_QUERY]
)
assert result is None
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_query_missing_project_id(self, mock_hook):
mock_hook.return_value.execute_sql.return_value = None
op = SpannerQueryDatabaseInstanceOperator(
instance_id=INSTANCE_ID, database_id=DB_ID, query=INSERT_QUERY, task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.execute_dml.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID, database_id=DB_ID, queries=[INSERT_QUERY]
)
assert result is None
@parameterized.expand(
[
("", INSTANCE_ID, DB_ID, INSERT_QUERY, "project_id"),
(PROJECT_ID, "", DB_ID, INSERT_QUERY, "instance_id"),
(PROJECT_ID, INSTANCE_ID, "", INSERT_QUERY, "database_id"),
(PROJECT_ID, INSTANCE_ID, DB_ID, "", "query"),
]
)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_query_ex_if_param_missing(
self, project_id, instance_id, database_id, query, exp_msg, mock_hook
):
with pytest.raises(AirflowException) as ctx:
SpannerQueryDatabaseInstanceOperator(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
query=query,
task_id="id",
)
err = ctx.value
assert f"The required parameter '{exp_msg}' is empty" in str(err)
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_query_dml(self, mock_hook):
mock_hook.return_value.execute_dml.return_value = None
op = SpannerQueryDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
query=INSERT_QUERY,
task_id="id",
)
op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.execute_dml.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID, queries=[INSERT_QUERY]
)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_instance_query_dml_list(self, mock_hook):
mock_hook.return_value.execute_dml.return_value = None
op = SpannerQueryDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
query=[INSERT_QUERY, INSERT_QUERY_2],
task_id="id",
)
op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.execute_dml.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
queries=[INSERT_QUERY, INSERT_QUERY_2],
)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_create(self, mock_hook):
mock_hook.return_value.get_database.return_value = None
op = SpannerDeployDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id",
)
result = op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.create_database.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID, ddl_statements=DDL_STATEMENTS
)
mock_hook.return_value.update_database.assert_not_called()
assert result
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_create_missing_project_id(self, mock_hook):
mock_hook.return_value.get_database.return_value = None
op = SpannerDeployDatabaseInstanceOperator(
instance_id=INSTANCE_ID, database_id=DB_ID, ddl_statements=DDL_STATEMENTS, task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.create_database.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID, database_id=DB_ID, ddl_statements=DDL_STATEMENTS
)
mock_hook.return_value.update_database.assert_not_called()
assert result
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_create_with_pre_existing_db(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = SpannerDeployDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id",
)
result = op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.create_database.assert_not_called()
mock_hook.return_value.update_database.assert_not_called()
assert result
@parameterized.expand(
[
("", INSTANCE_ID, DB_ID, DDL_STATEMENTS, 'project_id'),
(PROJECT_ID, "", DB_ID, DDL_STATEMENTS, 'instance_id'),
(PROJECT_ID, INSTANCE_ID, "", DDL_STATEMENTS, 'database_id'),
]
)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_create_ex_if_param_missing(
self, project_id, instance_id, database_id, ddl_statements, exp_msg, mock_hook
):
with pytest.raises(AirflowException) as ctx:
SpannerDeployDatabaseInstanceOperator(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
ddl_statements=ddl_statements,
task_id="id",
)
err = ctx.value
assert f"The required parameter '{exp_msg}' is empty" in str(err)
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_update(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = SpannerUpdateDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id",
)
result = op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.update_database.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
operation_id=None,
)
assert result
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_update_missing_project_id(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = SpannerUpdateDatabaseInstanceOperator(
instance_id=INSTANCE_ID, database_id=DB_ID, ddl_statements=DDL_STATEMENTS, task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.update_database.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
operation_id=None,
)
assert result
@parameterized.expand(
[
("", INSTANCE_ID, DB_ID, DDL_STATEMENTS, 'project_id'),
(PROJECT_ID, "", DB_ID, DDL_STATEMENTS, 'instance_id'),
(PROJECT_ID, INSTANCE_ID, "", DDL_STATEMENTS, 'database_id'),
]
)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_update_ex_if_param_missing(
self, project_id, instance_id, database_id, ddl_statements, exp_msg, mock_hook
):
with pytest.raises(AirflowException) as ctx:
SpannerUpdateDatabaseInstanceOperator(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
ddl_statements=ddl_statements,
task_id="id",
)
err = ctx.value
assert f"The required parameter '{exp_msg}' is empty" in str(err)
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_update_ex_if_database_not_exist(self, mock_hook):
mock_hook.return_value.get_database.return_value = None
with pytest.raises(AirflowException) as ctx:
op = SpannerUpdateDatabaseInstanceOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id",
)
op.execute(None)
err = ctx.value
assert (
"The Cloud Spanner database 'db1' in project 'project-id' and "
"instance 'instance-id' is missing" in str(err)
)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_delete(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = SpannerDeleteDatabaseInstanceOperator(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID, task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.delete_database.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID
)
assert result
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_delete_missing_project_id(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = SpannerDeleteDatabaseInstanceOperator(instance_id=INSTANCE_ID, database_id=DB_ID, task_id="id")
result = op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.delete_database.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID, database_id=DB_ID
)
assert result
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_delete_exits_and_succeeds_if_database_does_not_exist(self, mock_hook):
mock_hook.return_value.get_database.return_value = None
op = SpannerDeleteDatabaseInstanceOperator(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID, task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
)
mock_hook.return_value.delete_database.assert_not_called()
assert result
@parameterized.expand(
[
("", INSTANCE_ID, DB_ID, DDL_STATEMENTS, 'project_id'),
(PROJECT_ID, "", DB_ID, DDL_STATEMENTS, 'instance_id'),
(PROJECT_ID, INSTANCE_ID, "", DDL_STATEMENTS, 'database_id'),
]
)
@mock.patch("airflow.providers.google.cloud.operators.spanner.SpannerHook")
def test_database_delete_ex_if_param_missing(
self, project_id, instance_id, database_id, ddl_statements, exp_msg, mock_hook
):
with pytest.raises(AirflowException) as ctx:
SpannerDeleteDatabaseInstanceOperator(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
ddl_statements=ddl_statements,
task_id="id",
)
err = ctx.value
assert f"The required parameter '{exp_msg}' is empty" in str(err)
mock_hook.assert_not_called()
|
{
"content_hash": "54be0280c2d5ac29430e075a6c7c37be",
"timestamp": "",
"source": "github",
"line_count": 562,
"max_line_length": 108,
"avg_line_length": 41.855871886120994,
"alnum_prop": 0.6218169451175445,
"repo_name": "sekikn/incubator-airflow",
"id": "6347fd24be4b90bea3ed1a5278a6ceaba8985535",
"size": "24310",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/operators/test_spanner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
import mock
from nova import objects
from nova.scheduler.filters import core_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestAggregateCoreFilter(test.NoDBTestCase):
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_core_filter_value_error(self, agg_mock):
self.filt_cls = core_filter.AggregateCoreFilter()
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx, flavor=objects.Flavor(vcpus=1))
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7,
'cpu_allocation_ratio': 2})
agg_mock.return_value = set(['XXX'])
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
agg_mock.assert_called_once_with(host, 'cpu_allocation_ratio')
self.assertEqual(4 * 2, host.limits['vcpu'])
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_core_filter_default_value(self, agg_mock):
self.filt_cls = core_filter.AggregateCoreFilter()
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx, flavor=objects.Flavor(vcpus=1))
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8,
'cpu_allocation_ratio': 2})
agg_mock.return_value = set([])
# False: fallback to default flag w/o aggregates
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
agg_mock.assert_called_once_with(host, 'cpu_allocation_ratio')
# True: use ratio from aggregates
agg_mock.return_value = set(['3'])
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
self.assertEqual(4 * 3, host.limits['vcpu'])
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_core_filter_conflict_values(self, agg_mock):
self.filt_cls = core_filter.AggregateCoreFilter()
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx, flavor=objects.Flavor(vcpus=1))
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8,
'cpu_allocation_ratio': 1})
agg_mock.return_value = set(['2', '3'])
# use the minimum ratio from aggregates
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
self.assertEqual(4 * 2, host.limits['vcpu'])
|
{
"content_hash": "a3bc41d89345bb7af42e9d4ce730658f",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 73,
"avg_line_length": 47.63461538461539,
"alnum_prop": 0.6459426725878078,
"repo_name": "rahulunair/nova",
"id": "05ac1cf984a8fc16c4662e368dc6af5c1b4583d2",
"size": "3050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/scheduler/filters/test_core_filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "22804450"
},
{
"name": "Shell",
"bytes": "41649"
},
{
"name": "Smarty",
"bytes": "472764"
}
],
"symlink_target": ""
}
|
import unittest
from SimPEG import *
import simpegDCIP as DC
class DCProblemTests(unittest.TestCase):
def setUp(self):
mesh, survey, problem = DC.Examples.WennerArray.example()
mSynth = np.ones(mesh.nC)
survey.makeSyntheticData(mSynth)
# Now set up the problem to do some minimization
dmis = DataMisfit.l2_DataMisfit(survey)
reg = Regularization.Tikhonov(mesh)
opt = Optimization.InexactGaussNewton(maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6)
invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=1e4)
inv = Inversion.BaseInversion(invProb)
self.inv = inv
self.reg = reg
self.p = problem
self.mesh = mesh
self.m0 = mSynth
self.survey = survey
self.dmis = dmis
def test_misfit(self):
derChk = lambda m: [self.survey.dpred(m), lambda mx: self.p.Jvec(self.m0, mx)]
passed = Tests.checkDerivative(derChk, self.m0, plotIt=False)
self.assertTrue(passed)
def test_adjoint(self):
# Adjoint Test
u = np.random.rand(self.mesh.nC*self.survey.nSrc)
v = np.random.rand(self.mesh.nC)
w = np.random.rand(self.survey.dobs.shape[0])
wtJv = w.dot(self.p.Jvec(self.m0, v))
vtJtw = v.dot(self.p.Jtvec(self.m0, w))
passed = np.abs(wtJv - vtJtw) < 1e-10
print 'Adjoint Test', np.abs(wtJv - vtJtw), passed
self.assertTrue(passed)
def test_dataObj(self):
derChk = lambda m: [self.dmis.eval(m), self.dmis.evalDeriv(m)]
passed = Tests.checkDerivative(derChk, self.m0, plotIt=False)
self.assertTrue(passed)
def test_massMatrices(self):
Gu = np.random.rand(self.mesh.nF)
def derChk(m):
self.p.curModel = m
return [self.p.Msig * Gu, self.p.dMdsig(Gu)]
passed = Tests.checkDerivative(derChk, self.m0, plotIt=False)
self.assertTrue(passed)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "2bc316d3804c0b11570cdeebdb5c784c",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 117,
"avg_line_length": 32.15873015873016,
"alnum_prop": 0.6174728529121422,
"repo_name": "simpeg/simpegdc",
"id": "97e92383a31b19f4e3d57eb685141ea8d6c17d6a",
"size": "2026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simpegDCIP/Tests/test_forward_DCproblem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1194633"
},
{
"name": "Python",
"bytes": "48343"
}
],
"symlink_target": ""
}
|
import argparse
import json
import logging
import subprocess
import yaml
import re
import collections
import os
import time
import requests
import urllib
from distutils.spawn import find_executable
import teuthology
from . import misc
from . import provision
from .config import config
from .lockstatus import get_status
log = logging.getLogger(__name__)
# Don't need to see connection pool INFO messages
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARNING)
is_vpm = lambda name: 'vpm' in name
def get_distro_from_downburst():
"""
Return a table of valid distros.
If downburst is in path use it. If either downburst is unavailable,
or if downburst is unable to produce a json list, then use a default
table.
"""
default_table = {u'rhel_minimal': [u'6.4', u'6.5'],
u'fedora': [u'17', u'18', u'19', u'20'],
u'centos': [u'6.3', u'6.4', u'6.5', u'7.0'],
u'opensuse': [u'12.2'],
u'rhel': [u'6.3', u'6.4', u'6.5', u'7.0', u'7beta'],
u'centos_minimal': [u'6.4', u'6.5'],
u'ubuntu': [u'8.04(hardy)', u'9.10(karmic)',
u'10.04(lucid)', u'10.10(maverick)',
u'11.04(natty)', u'11.10(oneiric)',
u'12.04(precise)', u'12.10(quantal)',
u'13.04(raring)', u'13.10(saucy)',
u'14.04(trusty)', u'utopic(utopic)'],
u'sles': [u'11-sp2'],
u'debian': [u'6.0', u'7.0']}
executable_cmd = find_executable('downburst')
if not executable_cmd:
log.warn("Downburst not found!")
log.info('Using default values for supported os_type/os_version')
return default_table
try:
output = subprocess.check_output([executable_cmd, 'list-json'])
downburst_data = json.loads(output)
return downburst_data
except (subprocess.CalledProcessError, OSError):
log.exception("Error calling downburst!")
log.info('Using default values for supported os_type/os_version')
return default_table
def vps_version_or_type_valid(machine_type, os_type, os_version):
"""
Check os-type and os-version parameters when locking a vps.
Os-type will always be set (defaults to ubuntu).
In the case where downburst does not handle list-json (an older version
of downburst, for instance), a message is printed and this checking
is skipped (so that this code should behave as it did before this
check was added).
"""
if not machine_type == 'vps':
return True
if os_type is None or os_version is None:
# we'll use the defaults provided by provision.create_if_vm
# later on during provisioning
return True
valid_os_and_version = get_distro_from_downburst()
if os_type not in valid_os_and_version:
log.error("os-type '%s' is invalid", os_type)
return False
if not validate_distro_version(os_version,
valid_os_and_version[os_type]):
log.error("os-version '%s' is invalid", os_version)
return False
return True
def validate_distro_version(version, supported_versions):
"""
Return True if the version is valid. For Ubuntu, possible
supported version values are of the form '12.04 (precise)' where
either the number of the version name is acceptable.
"""
if version in supported_versions:
return True
for parts in supported_versions:
part = parts.split('(')
if len(part) == 2:
if version == part[0]:
return True
if version == part[1][0:len(part[1])-1]:
return True
def get_statuses(machines):
if machines:
statuses = []
for machine in machines:
machine = misc.canonicalize_hostname(machine)
status = get_status(machine)
if status:
statuses.append(status)
else:
log.error("Lockserver doesn't know about machine: %s" %
machine)
else:
statuses = list_locks()
return statuses
def main(ctx):
if ctx.verbose:
teuthology.log.setLevel(logging.DEBUG)
misc.read_config(ctx)
ret = 0
user = ctx.owner
machines = [misc.canonicalize_hostname(m, user=False)
for m in ctx.machines]
machines_to_update = []
if ctx.targets:
try:
with file(ctx.targets) as f:
g = yaml.safe_load_all(f)
for new in g:
if 'targets' in new:
for t in new['targets'].iterkeys():
machines.append(t)
except IOError as e:
raise argparse.ArgumentTypeError(str(e))
if ctx.f:
assert ctx.lock or ctx.unlock, \
'-f is only supported by --lock and --unlock'
if machines:
assert ctx.lock or ctx.unlock or ctx.list or ctx.list_targets \
or ctx.update, \
'machines cannot be specified with that operation'
else:
assert ctx.num_to_lock or ctx.list or ctx.list_targets or \
ctx.summary or ctx.brief, \
'machines must be specified for that operation'
if ctx.all:
assert ctx.list or ctx.list_targets or ctx.brief, \
'--all can only be used with --list, --list-targets, and --brief'
assert ctx.owner is None, \
'--all and --owner are mutually exclusive'
assert not machines, \
'--all and listing specific machines are incompatible'
if ctx.num_to_lock:
assert ctx.machine_type, \
'must specify machine type to lock'
if ctx.brief or ctx.list or ctx.list_targets:
assert ctx.desc is None, '--desc does nothing with --list/--brief'
# we may need to update host keys for vms. Don't do it for
# every vm; however, update any vms included in the list given
# to the CLI (machines), or any owned by the specified owner or
# invoking user if no machines are specified.
vmachines = []
statuses = get_statuses(machines)
owner = ctx.owner or misc.get_user()
for machine in statuses:
if machine['is_vm'] and machine['locked'] and \
(machines or machine['locked_by'] == owner):
vmachines.append(machine['name'])
if vmachines:
log.info("updating host keys for %s", ' '.join(sorted(vmachines)))
do_update_keys(vmachines)
# get statuses again to refresh any updated keys
statuses = get_statuses(machines)
if statuses:
if ctx.machine_type:
statuses = [_status for _status in statuses
if _status['machine_type'] == ctx.machine_type]
if not machines and ctx.owner is None and not ctx.all:
ctx.owner = misc.get_user()
if ctx.owner is not None:
statuses = [_status for _status in statuses
if _status['locked_by'] == ctx.owner]
if ctx.status is not None:
statuses = [_status for _status in statuses
if _status['up'] == (ctx.status == 'up')]
if ctx.locked is not None:
statuses = [_status for _status in statuses
if _status['locked'] == (ctx.locked == 'true')]
if ctx.desc is not None:
statuses = [_status for _status in statuses
if _status['description'] == ctx.desc]
if ctx.desc_pattern is not None:
statuses = [_status for _status in statuses
if _status['description'] is not None and
_status['description'].find(ctx.desc_pattern) >= 0]
# When listing, only show the vm_host's name, not every detail
for s in statuses:
if not s.get('is_vm', False):
continue
vm_host_name = s.get('vm_host', dict())['name']
if vm_host_name:
s['vm_host'] = vm_host_name
if ctx.list:
print json.dumps(statuses, indent=4)
elif ctx.brief:
for s in sorted(statuses, key=lambda s: s.get('name')):
locked = "un" if s['locked'] == 0 else " "
mo = re.match('\w+@(\w+?)\..*', s['name'])
host = mo.group(1) if mo else s['name']
print '{host} {locked}locked {owner} "{desc}"'.format(
locked=locked, host=host,
owner=s['locked_by'], desc=s['description'])
else:
frag = {'targets': {}}
for f in statuses:
frag['targets'][f['name']] = f['ssh_pub_key']
print yaml.safe_dump(frag, default_flow_style=False)
else:
log.error('error retrieving lock statuses')
ret = 1
elif ctx.summary:
do_summary(ctx)
return 0
elif ctx.lock:
if not vps_version_or_type_valid(ctx.machine_type, ctx.os_type,
ctx.os_version):
log.error('Invalid os-type or version detected -- lock failed')
return 1
for machine in machines:
if not lock_one(machine, user, ctx.desc):
ret = 1
if not ctx.f:
return ret
else:
machines_to_update.append(machine)
provision.create_if_vm(ctx, machine)
elif ctx.unlock:
if ctx.owner is None and user is None:
user = misc.get_user()
# If none of them are vpm, do them all in one shot
if not filter(is_vpm, machines):
res = unlock_many(machines, user)
return 0 if res else 1
for machine in machines:
if not unlock_one(ctx, machine, user):
ret = 1
if not ctx.f:
return ret
else:
machines_to_update.append(machine)
elif ctx.num_to_lock:
result = lock_many(ctx, ctx.num_to_lock, ctx.machine_type, user,
ctx.desc, ctx.os_type, ctx.os_version)
if not result:
ret = 1
else:
machines_to_update = result.keys()
if ctx.machine_type == 'vps':
shortnames = ' '.join(
[misc.decanonicalize_hostname(name) for name in
result.keys()]
)
if len(result) < ctx.num_to_lock:
log.error("Locking failed.")
for machn in result:
unlock_one(ctx, machn)
ret = 1
else:
log.info("Successfully Locked:\n%s\n" % shortnames)
log.info(
"Unable to display keys at this time (virtual " +
"machines are booting).")
log.info(
"Please run teuthology-lock --list-targets %s once " +
"these machines come up.",
shortnames)
else:
print yaml.safe_dump(
dict(targets=result),
default_flow_style=False)
elif ctx.update:
assert ctx.desc is not None or ctx.status is not None, \
'you must specify description or status to update'
assert ctx.owner is None, 'only description and status may be updated'
machines_to_update = machines
if ctx.desc is not None or ctx.status is not None:
for machine in machines_to_update:
update_lock(machine, ctx.desc, ctx.status)
return ret
def lock_many(ctx, num, machine_type, user=None, description=None,
os_type=None, os_version=None, arch=None):
if user is None:
user = misc.get_user()
if not vps_version_or_type_valid(ctx.machine_type, os_type, os_version):
log.error('Invalid os-type or version detected -- lock failed')
return
# In the for loop below we can safely query for all bare-metal machine_type
# values at once. So, if we're being asked for 'plana,mira,burnupi', do it
# all in one shot. If we are passed 'plana,mira,burnupi,vps', do one query
# for 'plana,mira,burnupi' and one for 'vps'
machine_types_list = misc.get_multi_machine_types(machine_type)
if machine_types_list == ['vps']:
machine_types = machine_types_list
elif 'vps' in machine_types_list:
machine_types_non_vps = list(machine_types_list)
machine_types_non_vps.remove('vps')
machine_types_non_vps = '|'.join(machine_types_non_vps)
machine_types = [machine_types_non_vps, 'vps']
else:
machine_types_str = '|'.join(machine_types_list)
machine_types = [machine_types_str, ]
for machine_type in machine_types:
uri = os.path.join(config.lock_server, 'nodes', 'lock_many', '')
data = dict(
locked_by=user,
count=num,
machine_type=machine_type,
description=description,
)
# Only query for os_type/os_version if non-vps, since in that case we
# just create them.
if machine_type != 'vps':
if os_type:
data['os_type'] = os_type
if os_version:
data['os_version'] = os_version
if arch:
data['arch'] = arch
log.debug("lock_many request: %s", repr(data))
response = requests.post(
uri,
data=json.dumps(data),
headers={'content-type': 'application/json'},
)
if response.ok:
machines = {misc.canonicalize_hostname(machine['name']):
machine['ssh_pub_key'] for machine in response.json()}
log.debug('locked {machines}'.format(
machines=', '.join(machines.keys())))
if machine_type == 'vps':
ok_machs = {}
for machine in machines:
if provision.create_if_vm(ctx, machine):
ok_machs[machine] = machines[machine]
else:
log.error('Unable to create virtual machine: %s',
machine)
unlock_one(ctx, machine)
return ok_machs
return machines
elif response.status_code == 503:
log.error('Insufficient nodes available to lock %d %s nodes.',
num, machine_type)
log.error(response.text)
else:
log.error('Could not lock %d %s nodes, reason: unknown.',
num, machine_type)
return []
def lock_one(name, user=None, description=None):
name = misc.canonicalize_hostname(name, user=None)
if user is None:
user = misc.get_user()
request = dict(name=name, locked=True, locked_by=user,
description=description)
uri = os.path.join(config.lock_server, 'nodes', name, 'lock', '')
response = requests.put(uri, json.dumps(request))
success = response.ok
if success:
log.debug('locked %s as %s', name, user)
else:
try:
reason = response.json().get('message')
except ValueError:
reason = str(response.status_code)
log.error('failed to lock {node}. reason: {reason}'.format(
node=name, reason=reason))
return response
def unlock_many(names, user):
fixed_names = [misc.canonicalize_hostname(name, user=None) for name in
names]
names = fixed_names
uri = os.path.join(config.lock_server, 'nodes', 'unlock_many', '')
data = dict(
locked_by=user,
names=names,
)
response = requests.post(
uri,
data=json.dumps(data),
headers={'content-type': 'application/json'},
)
if response.ok:
log.debug("Unlocked: %s", ', '.join(names))
else:
log.error("Failed to unlock: %s", ', '.join(names))
return response.ok
def unlock_one(ctx, name, user=None):
if user is None:
user = misc.get_user()
name = misc.canonicalize_hostname(name, user=None)
if not provision.destroy_if_vm(ctx, name, user):
log.error('downburst destroy failed for %s', name)
request = dict(name=name, locked=False, locked_by=user, description=None)
uri = os.path.join(config.lock_server, 'nodes', name, 'lock', '')
response = requests.put(uri, json.dumps(request))
success = response.ok
if success:
log.info('unlocked %s', name)
else:
try:
reason = response.json().get('message')
except ValueError:
reason = str(response.status_code)
log.error('failed to unlock {node}. reason: {reason}'.format(
node=name, reason=reason))
return success
def list_locks(keyed_by_name=False, **kwargs):
uri = os.path.join(config.lock_server, 'nodes', '')
if kwargs:
if 'machine_type' in kwargs:
kwargs['machine_type'] = kwargs['machine_type'].replace(',','|')
uri += '?' + urllib.urlencode(kwargs)
try:
response = requests.get(uri)
except requests.ConnectionError:
success = False
log.exception("Could not contact lock server: %s", config.lock_server)
else:
success = response.ok
if success:
if not keyed_by_name:
return response.json()
else:
return {node['name']: node
for node in response.json()}
return dict()
def find_stale_locks(owner=None):
"""
Return a list of node dicts corresponding to nodes that were locked to run
a job, but the job is no longer running. The purpose of this is to enable
us to nuke nodes that were left locked due to e.g. infrastructure failures
and return them to the pool.
:param owner: If non-None, return nodes locked by owner. Default is None.
"""
def might_be_stale(node_dict):
"""
Answer the question: "might this be a stale lock?"
The answer is yes if:
It is locked
It has a non-null description containing multiple '/' characters
... because we really want "nodes that were locked for a particular job
and are still locked" and the above is currently the best way to guess.
"""
if (node_dict['locked'] is True and
node_dict['description'] is not None and
node_dict['description'].count('/') > 1):
return True
return False
# Which nodes are locked for jobs?
nodes = list_locks()
if owner is not None:
nodes = [node for node in nodes if node['locked_by'] == owner]
nodes = filter(might_be_stale, nodes)
# What jobs are currently running?
url = os.path.join(config.results_server, 'runs', 'status', 'running', '')
resp = requests.get(url)
running_runs = resp.json()
running_jobs = []
for run in running_runs:
url = os.path.join(run['href'][0], 'jobs',
'?status=running&fields=name,job_id,targets,href')
resp = requests.get(url)
jobs = resp.json()
running_jobs.extend(jobs)
def node_matches_job(node, job):
"""
Is or was this node used by this job?
"""
job_str = '/'.join((job['name'], job['job_id']))
if node['description'].endswith(job_str):
return True
elif job['targets'] and node['name'] in job['targets'].keys():
return True
return False
result = list()
# Here we build the list of of nodes that are locked, for a job (as opposed
# to being locked manually for random monkeying), where the job is not
# running
for node in nodes:
matched = False
for job in running_jobs:
if node_matches_job(node, job):
matched = True
break
if matched:
continue
result.append(node)
return result
def update_lock(name, description=None, status=None, ssh_pub_key=None):
name = misc.canonicalize_hostname(name, user=None)
# Only do VM specific things (key lookup) if we are not
# Just updating the status (like marking down).
if not status:
status_info = get_status(name)
if status_info['is_vm']:
ssh_key = None
while not ssh_key:
time.sleep(10)
ssh_key = ssh_keyscan([name])
updated = {}
if description is not None:
updated['description'] = description
if status is not None:
updated['up'] = (status == 'up')
if ssh_pub_key is not None:
updated['ssh_pub_key'] = ssh_pub_key
if updated:
uri = os.path.join(config.lock_server, 'nodes', name, '')
response = requests.put(
uri,
json.dumps(updated))
return response.ok
return True
def update_inventory(node_dict):
"""
Like update_lock(), but takes a dict and doesn't try to do anything smart
by itself
"""
name = node_dict.get('name')
if not name:
raise ValueError("must specify name")
if not config.lock_server:
return
uri = os.path.join(config.lock_server, 'nodes', name, '')
log.info("Updating %s on lock server", name)
response = requests.put(
uri,
json.dumps(node_dict),
headers={'content-type': 'application/json'},
)
if response.status_code == 404:
log.info("Creating new node %s on lock server", name)
uri = os.path.join(config.lock_server, 'nodes', '')
response = requests.post(
uri,
json.dumps(node_dict),
headers={'content-type': 'application/json'},
)
if not response.ok:
log.error("Node update/creation failed for %s: %s",
name, response.text)
return response.ok
def ssh_keyscan(hostnames):
"""
Fetch the SSH public key of one or more hosts
"""
if isinstance(hostnames, basestring):
raise TypeError("'hostnames' must be a list")
hostnames = [misc.canonicalize_hostname(name, user=None) for name in
hostnames]
args = ['ssh-keyscan', '-t', 'rsa'] + hostnames
p = subprocess.Popen(
args=args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
p.wait()
keys_dict = dict()
for line in p.stderr.readlines():
if not line.startswith('#'):
log.error(line)
for line in p.stdout.readlines():
host, key = line.strip().split(' ', 1)
keys_dict[host] = key
return keys_dict
def updatekeys(args):
loglevel = logging.DEBUG if args['--verbose'] else logging.INFO
logging.basicConfig(
level=loglevel,
)
all_ = args['--all']
if all_:
machines = []
elif args['<machine>']:
machines = [misc.canonicalize_hostname(m, user=None)
for m in args['<machine>']]
elif args['--targets']:
targets = args['--targets']
with file(targets) as f:
docs = yaml.safe_load_all(f)
for doc in docs:
machines = [n for n in doc.get('targets', dict()).iterkeys()]
return do_update_keys(machines, all_)
def do_update_keys(machines, all_=False):
reference = list_locks(keyed_by_name=True)
if all_:
machines = reference.keys()
keys_dict = ssh_keyscan(machines)
return push_new_keys(keys_dict, reference)
def push_new_keys(keys_dict, reference):
ret = 0
for hostname, pubkey in keys_dict.iteritems():
log.info('Checking %s', hostname)
if reference[hostname]['ssh_pub_key'] != pubkey:
log.info('New key found. Updating...')
if not update_lock(hostname, ssh_pub_key=pubkey):
log.error('failed to update %s!', hostname)
ret = 1
return ret
def do_summary(ctx):
lockd = collections.defaultdict(lambda: [0, 0, 'unknown'])
if ctx.machine_type:
locks = list_locks(machine_type=ctx.machine_type)
else:
locks = list_locks()
for l in locks:
who = l['locked_by'] if l['locked'] == 1 \
else '(free)', l['machine_type']
lockd[who][0] += 1
lockd[who][1] += 1 if l['up'] else 0
lockd[who][2] = l['machine_type']
locks = sorted([p for p in lockd.iteritems()
], key=lambda sort: (sort[1][2], sort[1][0]))
total_count, total_up = 0, 0
print "TYPE COUNT UP OWNER"
for (owner, (count, upcount, machinetype)) in locks:
# if machinetype == spectype:
print "{machinetype:8s} {count:3d} {up:3d} {owner}".format(
count=count, up=upcount, owner=owner[0],
machinetype=machinetype)
total_count += count
total_up += upcount
print " --- ---"
print "{cnt:12d} {up:3d}".format(cnt=total_count, up=total_up)
|
{
"content_hash": "0d9ee34c2c41f87cc0366e16df0e4d71",
"timestamp": "",
"source": "github",
"line_count": 698,
"max_line_length": 79,
"avg_line_length": 36.5214899713467,
"alnum_prop": 0.5517024949003609,
"repo_name": "tchaikov/teuthology",
"id": "02a83e4434586dda1abcfb40c23513f93de758b2",
"size": "25492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teuthology/lock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6817"
},
{
"name": "Python",
"bytes": "640228"
},
{
"name": "Shell",
"bytes": "9589"
}
],
"symlink_target": ""
}
|
import unittest
import happy
class TestSquare(unittest.TestCase):
def setUp(self):
self.squares = happy.squares
def test_accuracy(self):
''' calculates the squares correctly '''
for n in xrange(10):
self.assertEqual(self.squares[str(n)], n**2)
class TestBin_search(unittest.TestCase):
def setUp(self):
self.lst = range(1,10)
self.length = len(self.lst) - 1
def test_find(self):
''' finding elements in the list '''
for n in (reversed(self.lst)):
self.assertTrue(happy.bin_search(n, self.lst, 0, self.length))
def test_not_find(self):
''' not finding elements not in the list '''
for n in range(11, 21):
self.assertFalse(happy.bin_search(n, self.lst, 0, self.length))
def test_edge(self):
''' index edge cases for binary search '''
self.assertTrue(happy.bin_search(1, self.lst, 0, 1))
self.assertTrue(happy.bin_search(1, self.lst, 0, 0))
class TestIs_happy(unittest.TestCase):
def test(self):
''' testing some easy cases for is_happy '''
self.assertTrue(happy.is_happy(1))
self.assertFalse(happy.is_happy(2))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9ae9603a1873b01003f5a6502df1a41b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 75,
"avg_line_length": 31.175,
"alnum_prop": 0.6054530874097834,
"repo_name": "shiandy/happy",
"id": "8e8a5531dff308cd871a2a49aea1fbe064bbf034",
"size": "1247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_happy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4272"
}
],
"symlink_target": ""
}
|
'''
Author: Hans Erik Heggem
Email: hans.erik.heggem@gmail.com
Project: Master's Thesis - Autonomous Inspection Of Wind Blades
Repository: Master's Thesis - CV (Computer Vision)
'''
################### UNIT TEST ########################
import unittest
from Settings.TestData import TestData
from TestUnits.Test_main import Test_main
'''
@brief Test unit for RecordFrames
'''
class Test_RecordFrames(unittest.TestCase, Test_main, TestData):
def setUp(self):
'''
@brief Give all setups to the unit test.
'''
self.SetAllKey()
self.InitTestData()
#### IMPORTS #####
from Settings import Settings
from src.DroneVision.DroneVision_src.hardware import RecordFrames
self.Settings = Settings
self.RecordFrames = RecordFrames
##################
def tearDown(self):
'''
@brief Give all tear down steps.
Is runned even if the test failed.
'''
pass
def test_RecordFrames(self):
'''
@brief Main start test function.
Append functions to test for this unit.
'''
###### START TEST #####
for folder, left_frames, right_frames, actual_distances, baselines, use_set in self.GetFrameSets():
if use_set:
self.TestRecordFrames(folder + left_frames[0][0])
###########################
def TestRecordFrames(self, input_image_fn):
'''
@brief Test unit for RecordFrames
@param input_image_fn
'''
import time
from src.bin.tools import CheckDir
CheckDir(self.vid_rec_folder)
from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage
from src.DroneVision.DroneVision_src.hardware.VideoLink import VideoLink
video_recorder = self.RecordFrames.RecordFrames(self.vid_rec_fps, self.vid_rec_folder, self.video_rec_output_fname)
while video_recorder.GetNumberOfRecordedFrames() < self.max_rec_frames:
frame = GetImage(input_image_fn)
#video_recorder.WriteFrameThread(frame)
#time.sleep(0.5)
video_recorder.WriteFrame(frame)
print 'N recorded frames: ', video_recorder.GetNumberOfRecordedFrames()
video_recorder.CloseRecording()
|
{
"content_hash": "ca2df46fa791b314bae915d46f9db5ed",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 118,
"avg_line_length": 29.042857142857144,
"alnum_prop": 0.6945400885391048,
"repo_name": "hansehe/Wind-Blade-Inspection",
"id": "5b270fade10e47b65bcadaf7e626d12b5b7608b4",
"size": "2033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TestUnits/Test_src/Test_DroneVision/Test_DroneVision_src/Test_hardware/Test_RecordFrames.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2183232"
}
],
"symlink_target": ""
}
|
from SetExpression import *
class SymbolicSet(SetExpression):
def __init__(self):
SetExpression.__init__(self)
def getSymbolName(self, codeGenerator):
return self.generateCode(codeGenerator)
def getDependencies(self, codeGenerator):
return []
def setupEnvironment(self, codeSetup):
"""
Setup environment
"""
codeSetup.setupEnvironment(self)
def prepare(self, codePrepare):
"""
Prepare environment
"""
codePrepare.prepare(self)
def generateCode(self, codeGenerator):
"""
Generate the MiniZinc code for this Number
"""
return codeGenerator.generateCode(self)
|
{
"content_hash": "8e50e953ddb4ea6caeea053b78ca6bdf",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 50,
"avg_line_length": 24.379310344827587,
"alnum_prop": 0.6195190947666195,
"repo_name": "rafaellc28/Latex2MiniZinc",
"id": "e49239126173e59a8b5ace93ef50a4201a6c12f5",
"size": "707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "latex2minizinc/SymbolicSet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2918583"
}
],
"symlink_target": ""
}
|
"""Module for the admin pages."""
from google.appengine.api import taskqueue
from google.appengine.ext import db
from google.appengine.ext import ndb
from django import forms as djangoforms
from django import http
from django.utils.translation import ugettext
from melange.models import profile as profile_model
from melange.models import user as user_model
from melange.request import access
from melange.request import exception
from soc.logic import cleaning
from soc.views.dashboard import Dashboard
from soc.views.helper import lists
from soc.views.helper import url_patterns
from soc.views.template import Template
from soc.modules.gsoc.logic import project as project_logic
from soc.modules.gsoc.logic.proposal import getProposalsToBeAcceptedForOrg
from soc.modules.gsoc.models.grading_project_survey import GradingProjectSurvey
from soc.modules.gsoc.models.grading_survey_group import GSoCGradingSurveyGroup
from soc.modules.gsoc.models.project import GSoCProject
from soc.modules.gsoc.models.project_survey import ProjectSurvey
from soc.modules.gsoc.models.proposal import GSoCProposal
from soc.modules.gsoc.models.proposal_duplicates import GSoCProposalDuplicate
from soc.modules.gsoc.views import base
from soc.modules.gsoc.views import forms as gsoc_forms
from soc.modules.gsoc.views.helper import url_names
from soc.modules.gsoc.views.helper.url_patterns import url
from soc.modules.gsoc.views import projects_list
from summerofcode.request import links
from summerofcode.views.helper import urls
class LookupForm(gsoc_forms.GSoCModelForm):
"""Django form for the lookup profile page."""
class Meta:
model = None
def __init__(self, request_data=None, **kwargs):
super(LookupForm, self).__init__(**kwargs)
self.request_data = request_data
user_id = djangoforms.CharField(label='Username')
def clean_user_id(self):
user_id_cleaner = cleaning.clean_link_id('user_id')
try:
user_id = user_id_cleaner(self)
except djangoforms.ValidationError as e:
if e.code != 'invalid':
raise
msg = ugettext(u'Enter a valid username.')
raise djangoforms.ValidationError(msg, code='invalid')
user = user_model.User.get_by_id(user_id)
if not user:
raise djangoforms.ValidationError(
'There is no user with that email address')
self.cleaned_data['user'] = user
query = profile_model.Profile.query(
profile_model.Profile.program == ndb.Key.from_old_key(
self.request_data.program.key()),
ancestor=user.key)
self.cleaned_data['profile'] = query.get()
class DashboardPage(base.GSoCRequestHandler):
"""Dashboard for admins."""
access_checker = access.PROGRAM_ADMINISTRATOR_ACCESS_CHECKER
def djangoURLPatterns(self):
return [
url(r'admin/%s$' % url_patterns.PROGRAM, self,
name='gsoc_admin_dashboard'),
]
def templatePath(self):
return 'modules/gsoc/admin/base.html'
def context(self, data, check, mutator):
"""Context for dashboard page."""
dashboards = []
dashboards.append(MainDashboard(data))
dashboards.append(ProgramSettingsDashboard(data))
dashboards.append(ManageOrganizationsDashboard(data))
dashboards.append(EvaluationsDashboard(data))
dashboards.append(MentorEvaluationsDashboard(data))
dashboards.append(StudentEvaluationsDashboard(data))
dashboards.append(EvaluationGroupDashboard(data))
dashboards.append(StudentsDashboard(data))
dashboards.append(ParticipantsDashboard(data))
dashboards.append(ShipmentTrackingDashboard(data))
dashboards.append(ShipmentInfoDashboard(data))
return {
'dashboards': dashboards,
'page_name': 'Admin dashboard',
}
def post(self, data, check, mutator):
"""Handles a post request.
Do nothing, since toggle button posting to this handler
without expecting any response.
"""
return http.HttpResponse()
class MainDashboard(Dashboard):
"""Dashboard for admin's main-dashboard."""
def __init__(self, data):
"""Initializes the dashboard.
Args:
data: The RequestData object
"""
super(MainDashboard, self).__init__(data)
def context(self):
"""Returns the context of main dashboard."""
# TODO(nathaniel): Eliminate this state-setting call.
self.data.redirect.program()
manage_orgs = ManageOrganizationsDashboard(self.data)
program_settings = ProgramSettingsDashboard(self.data)
evaluations = EvaluationsDashboard(self.data)
participants = ParticipantsDashboard(self.data)
students = StudentsDashboard(self.data)
shipment_tracking = ShipmentTrackingDashboard(self.data)
subpages = [
{
'name': 'lookup_profile',
'description': ugettext(
'Lookup profile of mentor or student from various program.'),
'title': 'Lookup profile',
'link': self.data.redirect.urlOf('lookup_gsoc_profile')
},
{
'name': 'allocate_slots',
'description': ugettext(
'Allocate slots (number of acceptable projects) per '
'organization'),
'title': 'Allocate slots',
'link': self.data.redirect.urlOf('gsoc_slots')
},
{
'name': 'slots_transfer',
'description': ugettext(
'Transfer slots for organizations'),
'title': 'Slots transfer',
'link': self.data.redirect.urlOf('gsoc_admin_slots_transfer')
},
{
'name': 'duplicates',
'description': ugettext(
'Calculate how many duplicate proposals, students that have '
'accepted proposals more than one'),
'title': 'Duplicates',
'link': self.data.redirect.urlOf('gsoc_view_duplicates')
},
{
'name': 'accept_proposals',
'description': ugettext(
'Start proposals into projects conversion'),
'title': 'Bulk accept proposals and send acceptance/rejection '
'emails',
'link': self.data.redirect.urlOf('gsoc_accept_proposals')
},
{
'name': 'manage_proposals',
'description': ugettext(
'Lists all the proposals submitted to the program and lets '
'accept individual proposals.'),
'title': 'Proposals submitted',
'link': self.data.redirect.urlOf('gsoc_admin_accept_proposals')
},
{
'name': 'withdraw_projects',
'description': ugettext(
'Withdraw accepted projects or accept withdrawn projects'),
'title': 'Accept/withdraw projects',
'link': self.data.redirect.urlOf('gsoc_withdraw_projects')
},
{
'name': 'participants',
'description': ugettext(
'List of all participants in this program.'),
'title': 'Participants',
'link': '',
'subpage_links': participants.getSubpagesLink(),
},
{
'name': 'students',
'description': ugettext(
'Manage all the Student\'s projects.'),
'title': 'Students',
'link': '',
'subpage_links': students.getSubpagesLink(),
},
{
'name': 'manage_organizations',
'description': ugettext(
'Manage organizations from active program. You can allocate '
'slots for organizations, list mentors and administrators '
'from various organizations'),
'title': 'Manage organizations',
'link': '',
'subpage_links': manage_orgs.getSubpagesLink(),
},
{
'name': 'evaluations',
'description': ugettext(
'Send reminder, evaluation group, create, edit, '
'view evaluations for mentors and students'),
'title': 'Evaluations',
'link': '',
'subpage_links': evaluations.getSubpagesLink(),
},
{
'name': 'program_settings',
'description': ugettext(
'Edit program settings and timeline'),
'title': 'Program settings',
'link': '',
'subpage_links': program_settings.getSubpagesLink(),
},
{
'name': 'shipment_tracking',
'description': ugettext(
'Shipment tracking for students'),
'title': 'Tracking Information',
'link': '',
'subpage_links': shipment_tracking.getSubpagesLink(),
},
]
return {
'title': 'Admin Dashboard',
'name': 'main',
'subpages': self._divideSubPages(subpages),
'enabled': True
}
class ProgramSettingsDashboard(Dashboard):
"""Dashboard for admin's program-settings-dashboard."""
def __init__(self, data):
"""Initializes the dashboard.
Args:
data: The RequestData object
"""
# TODO(nathaniel): Eliminate this state-setting call.
data.redirect.program()
subpages = [
{
'name': 'edit_program',
'description': ugettext(
'Edit your program settings such as information, slots, '
'documents, etc.'),
'title': 'Edit program settings',
'link': data.redirect.urlOf(url_names.GSOC_PROGRAM_EDIT)
},
{
'name': 'edit_timeline',
'description': ugettext(
'Edit your program timeline such as program start/end date, '
'student signup start/end date, etc.'),
'title': 'Edit timeline',
'link': data.redirect.urlOf('edit_gsoc_timeline')
},
{
'name': 'edit_program_messages',
'description': ugettext(
'Edit program messages which will be sent in emails '
'to the specified participants.'),
'title': 'Edit messages',
'link': data.redirect.urlOf(url_names.GSOC_EDIT_PROGRAM_MESSAGES)
},
{
'name': 'documents',
'description': ugettext(
'List of documents from various program.'),
'title': 'List of documents',
'link': data.redirect.urlOf('list_gsoc_documents')
},
{
'name': 'create_program',
'description': ugettext(
'Create a new program.'),
'title': 'Create a program',
'link': links.SOC_LINKER.sponsor(
data.sponsor, url_names.GSOC_PROGRAM_CREATE),
},
]
super(ProgramSettingsDashboard, self).__init__(data, subpages)
def context(self):
"""Returns the context of program settings dashboard.
"""
subpages = self._divideSubPages(self.subpages)
return {
'title': 'Program Settings',
'name': 'program_settings',
'backlinks': [
{
'to': 'main',
'title': 'Admin dashboard'
},
],
'subpages': subpages
}
class ManageOrganizationsDashboard(Dashboard):
"""Dashboard for admin's manage-organizations-dashboard."""
def __init__(self, data):
"""Initializes the dashboard.
Args:
data: The RequestData object
"""
# TODO(nathaniel): Eliminate this state-setting call.
data.redirect.program()
subpages = [
{
'name': 'edit_org_app',
'description': ugettext(
'Create or edit organization application'),
'title': 'Edit organization application',
'link': data.redirect.urlOf('gsoc_edit_org_app')
},
{
'name': 'preview_org_app',
'description': ugettext(
'Preview of the organization application.'),
'title': 'Preview organization application',
'link': data.redirect.urlOf('gsoc_preview_org_app')
},
{
'name': 'org_app_records',
'description': ugettext(
'List of organization application that have been '
'submitted to the program'),
'title': 'Submitted organization applications',
'link': links.SOC_LINKER.program(
data.program, urls.UrlNames.ORG_APPLICATION_LIST)
},
]
super(ManageOrganizationsDashboard, self).__init__(data, subpages)
def context(self):
"""Returns the context of manage organizations dashboard."""
subpages = self._divideSubPages(self.subpages)
return {
'title': 'Manage Organizations',
'name': 'manage_organizations',
'backlinks': [
{
'to': 'main',
'title': 'Admin dashboard'
},
],
'subpages': subpages
}
class EvaluationsDashboard(Dashboard):
"""Dashboard for admin's evaluations-dashboard."""
def __init__(self, data):
"""Initializes the dashboard.
Args:
data: The RequestData object
"""
mentor_evaluations = MentorEvaluationsDashboard(data)
student_evaluations = StudentEvaluationsDashboard(data)
evaluation_group = EvaluationGroupDashboard(data)
# TODO(nathaniel): Eliminate this state-setting call.
data.redirect.program()
subpages = [
{
'name': 'reminder_emails',
'description': ugettext(
'Send reminder emails for evaluations.'),
'title': 'Send reminder',
'link': data.redirect.urlOf('gsoc_survey_reminder_admin')
},
{
'name': 'mentor_evaluations',
'description': ugettext(
'Create, edit and view evaluations for mentors'),
'title': 'Mentor Evaluations',
'link': '',
'subpage_links': mentor_evaluations.getSubpagesLink(),
},
{
'name': 'student_evaluations',
'description': ugettext(
'Create, edit and view evaluations for students'),
'title': 'Student Evaluations',
'link': '',
'subpage_links': student_evaluations.getSubpagesLink(),
},
{
'name': 'evaluation_group',
'description': ugettext('Manage the results of the evaluation'),
'title': 'Evaluation Group',
'link': '',
'subpage_links': evaluation_group.getSubpagesLink(),
},
]
super(EvaluationsDashboard, self).__init__(data, subpages)
def context(self):
"""Returns the context of manage organizations dashboard.
"""
subpages = self._divideSubPages(self.subpages)
return {
'title': 'Evaluations',
'name': 'evaluations',
'backlinks': [
{
'to': 'main',
'title': 'Admin dashboard'
},
],
'subpages': subpages
}
class MentorEvaluationsDashboard(Dashboard):
"""Dashboard for mentor's evaluations."""
def __init__(self, data):
"""Initializes the dashboard.
Args:
data: The RequestData object
"""
survey_key = db.Key.from_path(
GradingProjectSurvey.kind(), '%s/%s' % (
data.program.key().name(), 'midterm'))
subpages = [
{
'name': 'edit_mentor_evaluation',
'description': ugettext('Create or edit midterm evaluation for '
'mentors in active program'),
'title': 'Create or Edit Midterm',
'link': links.SOC_LINKER.survey(
survey_key, 'gsoc_edit_mentor_evaluation')
},
{
'name': 'preview_mentor_evaluation',
'description': ugettext('Preview midterm evaluation to be '
'administered mentors.'),
'title': 'Preview Midterm Evaluation',
'link': links.SOC_LINKER.survey(
survey_key, 'gsoc_preview_mentor_evaluation')
},
{
'name': 'view_mentor_evaluation',
'description': ugettext('View midterm evaluation for mentors'),
'title': 'View Midterm Records',
'link': links.SOC_LINKER.survey(
survey_key, 'gsoc_list_mentor_eval_records')
},
]
survey_key = db.Key.from_path(
GradingProjectSurvey.kind(), '%s/%s' % (
data.program.key().name(), 'final'))
subpages += [
{
'name': 'edit_mentor_evaluation',
'description': ugettext('Create or edit midterm evaluation for '
'mentors in active program'),
'title': 'Create or Edit Final Evaluation',
'link': links.SOC_LINKER.survey(
survey_key, 'gsoc_edit_mentor_evaluation')
},
{
'name': 'preview_mentor_evaluation',
'description': ugettext('Preview final evaluation to be '
'administered mentors.'),
'title': 'Preview Final Evaluation',
'link': links.SOC_LINKER.survey(
survey_key, 'gsoc_preview_mentor_evaluation')
},
{
'name': 'view_mentor_evaluation',
'description': ugettext('View final evaluation for mentors'),
'title': 'View Final Evaluation Records',
'link': links.SOC_LINKER.survey(
survey_key, 'gsoc_list_mentor_eval_records')
},
]
super(MentorEvaluationsDashboard, self).__init__(data, subpages)
def context(self):
"""Returns the context of mentor evaluations dashboard.
"""
subpages = self._divideSubPages(self.subpages)
return {
'title': 'Mentor Evaluations',
'name': 'mentor_evaluations',
'backlinks': [
{
'to': 'main',
'title': 'Admin dashboard'
},
{
'to': 'evaluations',
'title': 'Evaluations'
},
],
'subpages': subpages
}
class StudentEvaluationsDashboard(Dashboard):
"""Dashboard for student's evaluations."""
def __init__(self, data):
"""Initializes the dashboard.
Args:
data: The RequestData object
"""
survey_key = db.Key.from_path(
ProjectSurvey.kind(), '%s/%s' % (data.program.key().name(), 'midterm'))
subpages = [
{
'name': 'edit_student_evaluation',
'description': ugettext('Create or edit midterm evaluation for '
'students in active program'),
'title': 'Create or Edit Midterm Evaluation',
'link': links.SOC_LINKER.survey(
survey_key, 'gsoc_edit_student_evaluation')
},
{
'name': 'preview_student_evaluation',
'description': ugettext('Preview midterm evaluation to be '
'administered to the students.'),
'title': 'Preview Midterm Evaluation',
'link': links.SOC_LINKER.survey(
survey_key, 'gsoc_preview_student_evaluation')
},
{
'name': 'view_student_evaluation',
'description': ugettext('View midterm evaluation for students'),
'title': 'View Midterm Evaluation Records',
'link': links.SOC_LINKER.survey(
survey_key, 'gsoc_list_student_eval_records')
},
]
survey_key = db.Key.from_path(
ProjectSurvey.kind(), '%s/%s' % (data.program.key().name(), 'final'))
subpages += [
{
'name': 'edit_student_evaluation',
'description': ugettext('Create or edit final evaluation for '
'students in active program'),
'title': 'Create or Edit Final Evaluation',
'link': links.SOC_LINKER.survey(
survey_key, 'gsoc_edit_student_evaluation')
},
{
'name': 'preview_student_evaluation',
'description': ugettext('Preview final evaluation to be '
'administered to the students.'),
'title': 'Preview Final Evaluation',
'link': links.SOC_LINKER.survey(
survey_key, 'gsoc_preview_student_evaluation')
},
{
'name': 'view_student_evaluation',
'description': ugettext('View final evaluation for students'),
'title': 'View Final Evaluation Records',
'link': links.SOC_LINKER.survey(
survey_key, 'gsoc_list_student_eval_records')
},
]
super(StudentEvaluationsDashboard, self).__init__(data, subpages)
def context(self):
"""Returns the context of student evaluations dashboard.
"""
subpages = self._divideSubPages(self.subpages)
return {
'title': 'Student Evaluations',
'name': 'student_evaluations',
'backlinks': [
{
'to': 'main',
'title': 'Admin dashboard'
},
{
'to': 'evaluations',
'title': 'Evaluations'
},
],
'subpages': subpages
}
class EvaluationGroupDashboard(Dashboard):
"""Dashboard for evaluation group."""
def __init__(self, data):
"""Initializes the dashboard.
Args:
data: The RequestData object
"""
# TODO(nathaniel): Eliminate this state-setting call.
data.redirect.program()
subpages = [
{
'name': 'edit_evaluation_group',
'description': ugettext('Create evaluation group'),
'title': 'Create',
'link': data.redirect.urlOf('gsoc_grading_group')
},
]
q = GSoCGradingSurveyGroup.all()
q.filter('program', data.program)
for group in q:
data.redirect.id(group.key().id())
subpages.append(
{
'name': 'view_evaluation_group_%s' % group.key().id(),
'description': ugettext('View this group'),
'title': 'View %s' % group.name,
'link': data.redirect.urlOf('gsoc_grading_record_overview')
}
)
super(EvaluationGroupDashboard, self).__init__(data, subpages)
def context(self):
"""Returns the context of evaluation group dashboard."""
subpages = self._divideSubPages(self.subpages)
return {
'title': 'Evaluation Group',
'name': 'evaluation_group',
'backlinks': [
{
'to': 'main',
'title': 'Admin dashboard'
},
{
'to': 'evaluations',
'title': 'Evaluations'
},
],
'subpages': subpages
}
class ParticipantsDashboard(Dashboard):
"""Dashboard for admin's all participants dashboard
"""
def __init__(self, data):
"""Initializes the dashboard.
Args:
data: The RequestData object
"""
# TODO(nathaniel): Eliminate this state-setting call.
data.redirect.program()
subpages = [
{
'name': 'list_mentors',
'description': ugettext(
'List of all the organization admins and mentors'),
'title': 'List mentors and admins',
'link': data.redirect.urlOf('gsoc_list_mentors')
},
{
'name': 'list_students',
'description': ugettext(
'List of all participating students'),
'title': 'List students',
'link': data.redirect.urlOf('gsoc_students_list_admin')
},
]
super(ParticipantsDashboard, self).__init__(data, subpages)
def context(self):
"""Returns the context of participants dashboard.
"""
subpages = self._divideSubPages(self.subpages)
return {
'title': 'Participants',
'name': 'participants',
'backlinks': [
{
'to': 'main',
'title': 'Admin dashboard'
},
],
'subpages': subpages
}
class StudentsDashboard(Dashboard):
"""Dashboard for student related items."""
def __init__(self, data):
"""Initializes the dashboard.
Args:
data: The RequestData object
"""
# TODO(nathaniel): Eliminate this state-setting call.
data.redirect.program()
subpages = [
{
'name': 'list_projects',
'description': ugettext(
'List of all the projects who have accepted to the program.'),
'title': 'View All Projects',
'link': data.redirect.urlOf('gsoc_projects_list_admin')
},
{
'name': 'manage_projects',
'description': ugettext(
'Manage the projects that have accepted to the program.'),
'title': 'Manage Projects',
'link': data.redirect.urlOf(
url_names.GSOC_ADMIN_MANAGE_PROJECTS_LIST)
},
]
super(StudentsDashboard, self).__init__(data, subpages)
def context(self):
"""Returns the context of manage students dashboard."""
subpages = self._divideSubPages(self.subpages)
return {
'title': 'Students',
'name': 'students',
'backlinks': [
{
'to': 'main',
'title': 'Admin dashboard'
},
],
'subpages': subpages
}
class ShipmentTrackingDashboard(Dashboard):
"""Dashboard for shipment tracking.
"""
def __init__(self, data):
"""Initializes the dashboard.
Args:
request: The HTTPRequest object
data: The RequestData object
"""
shipment_info = ShipmentInfoDashboard(data)
subpages = [
{
'name': 'shipment_infos',
'description': ugettext('Manage Shipment Information'),
'title': 'Shipment Information',
'link': '',
'subpage_links': shipment_info.getSubpagesLink(),
},
{
'name': 'sync_data',
'description': ugettext('Sync Data'),
'title': 'Sync Data',
'link': links.SOC_LINKER.program(
data.program, url_names.GSOC_SHIPMENT_LIST),
},
]
super(ShipmentTrackingDashboard, self).__init__(data, subpages)
def context(self):
"""Returns the context of shipment tracking dashboard.
"""
subpages = self._divideSubPages(self.subpages)
return {
'title': 'Shipment Tracking Information',
'name': 'shipment_tracking',
'backlinks': [
{
'to': 'main',
'title': 'Admin dashboard'
},
],
'subpages': subpages
}
class ShipmentInfoDashboard(Dashboard):
"""Dashboard for shipment infos.
"""
def __init__(self, data):
"""Initializes the dashboard.
Args:
request: The HTTPRequest object
data: The RequestData object
"""
subpages = [
{
'name': 'create_shipment_info',
'description': ugettext('Create shipment information'),
'title': 'Create',
'link': links.SOC_LINKER.program(data.program, url_names.GSOC_CREATE_SHIPMENT_INFO),
},
{
'name': 'edit_shipment_infos',
'description': ugettext('Edit shipment informations'),
'title': 'Edit',
'link': links.SOC_LINKER.program(data.program, url_names.GSOC_SHIPMENT_INFO_RECORDS),
},
]
super(ShipmentInfoDashboard, self).__init__(data, subpages)
def context(self):
"""Returns the context of shipment infos dashboard.
"""
subpages = self._divideSubPages(self.subpages)
return {
'title': 'Shipment Information',
'name': 'shipment_infos',
'backlinks': [
{
'to': 'main',
'title': 'Admin dashboard'
},
{
'to': 'shipment_tracking',
'title': 'Shipment Tracking Information'
}
],
'subpages': subpages
}
class LookupLinkIdPage(base.GSoCRequestHandler):
"""View for the participant profile."""
access_checker = access.PROGRAM_ADMINISTRATOR_ACCESS_CHECKER
def djangoURLPatterns(self):
return [
url(r'admin/lookup/%s$' % url_patterns.PROGRAM, self,
name='lookup_gsoc_profile'),
]
def templatePath(self):
return 'modules/gsoc/admin/lookup.html'
def post(self, data, check, mutator):
# TODO(nathaniel): problematic self-call.
return self.get(data, check, mutator)
def context(self, data, check, mutator):
form = LookupForm(request_data=data, data=data.POST or None)
error = bool(form.errors)
forms = [form]
profile = None
if not form.errors and data.request.method == 'POST':
profile = form.cleaned_data.get('profile')
if profile:
raise exception.Redirect(
links.SOC_LINKER.profile(profile, urls.UrlNames.PROFILE_ADMIN))
else:
return {
'forms': forms,
'error': error,
'posted': error,
'page_name': 'Lookup profile',
}
class ProposalsList(Template):
"""Template for listing all the proposals sent to org.
"""
def __init__(self, data):
"""Initializes this proposals list."""
self.data = data
def getStudentEmail(entity, *args):
"""Helper function to get a value for Student Email column."""
profile = ndb.Key.from_old_key(entity.parent_key()).get()
return profile.contact.email
def getStudent(entity, *args):
"""Helper function to get a value for Student column."""
profile = ndb.Key.from_old_key(entity.parent_key()).get()
return profile.public_name
list_config = lists.ListConfiguration()
list_config.addSimpleColumn('title', 'Title')
list_config.addPlainTextColumn(
'email', 'Student Email', getStudentEmail, hidden=True)
list_config.addSimpleColumn('score', 'Score')
list_config.addSimpleColumn('nr_scores', '#scores', hidden=True)
def getAverage(ent):
if not ent.nr_scores:
return float(0)
average = float(ent.score) / float(ent.nr_scores)
return float("%.2f" % average)
list_config.addNumericalColumn(
'average', 'Average', lambda ent, *a: getAverage(ent))
def getStatusOnDashboard(proposal, accepted, duplicates):
"""Method for determining which status to show on the dashboard."""
# TODO(nathaniel): HTML in Python.
if proposal.status == 'pending':
if proposal.accept_as_project and (
not GSoCProposal.mentor.get_value_for_datastore(proposal)):
return """<strong><font color="red">No mentor assigned</font></strong>"""
elif proposal.key() in duplicates:
return """<strong><font color="red">Duplicate</font></strong>"""
elif proposal.key() in accepted:
return """<strong><font color="green">Pending acceptance</font><strong>"""
# not showing duplicates or proposal doesn't have an interesting state
return proposal.status
def getOrganizationKey(entity, *args):
"""Helper function to get value of organization key column."""
org_key = GSoCProposal.org.get_value_for_datastore(entity)
return ndb.Key.from_old_key(org_key).id()
options = [
# TODO(nathaniel): This looks like structured data that should be
# properly modeled in first-class structured Python objects.
('(pending|accepted|rejected|duplicate|mentor)', 'Valid'),
('(duplicate|mentor)', 'Needs attention'),
('(duplicate)', 'Duplicate'),
('(accepted)', 'Accepted'),
('(rejected)', 'Rejected'),
('(mentor)', 'No mentor assigned'),
('', 'All'),
('(invalid|withdrawn|ignored)', 'Invalid'),
]
list_config.addHtmlColumn('status', 'Status',
getStatusOnDashboard, options=options)
list_config.addSimpleColumn('last_modified_on', 'Last modified',
column_type=lists.DATE)
list_config.addSimpleColumn('created_on', 'Created on',
column_type=lists.DATE, hidden=True)
list_config.addPlainTextColumn('student', 'Student', getStudent)
list_config.addSimpleColumn('accept_as_project', 'Should accept')
# hidden keys
list_config.addPlainTextColumn(
'full_proposal_key', 'Full proposal key',
(lambda ent, *args: str(ent.key())), hidden=True)
list_config.addPlainTextColumn(
'org_key', 'Organization key', getOrganizationKey, hidden=True)
list_config.setDefaultSort('last_modified_on', 'desc')
self._list_config = list_config
def templatePath(self):
return'modules/gsoc/admin/_proposals_list.html'
def context(self):
description = (
'List of proposals submitted into %s' % self.data.url_ndb_org.name)
list_configuration_response = lists.ListConfigurationResponse(
self.data, self._list_config, idx=0, description=description)
return {
'name': 'proposals_submitted',
'title': 'PROPOSALS SUBMITTED TO MY ORGS',
'lists': [list_configuration_response],
}
def getListData(self):
idx = lists.getListIndex(self.data.request)
if idx != 0:
return None
program = self.data.program
# Hold all the accepted projects for orgs where this user is a member of
accepted = []
# Hold all duplicates for either the entire program or the orgs of the user.
duplicates = []
dupQ = GSoCProposalDuplicate.all()
dupQ.filter('is_duplicate', True)
dupQ.filter('org', self.data.url_ndb_org.key.to_old_key())
dupQ.filter('program', program)
accepted.extend(
p.key() for p in getProposalsToBeAcceptedForOrg(self.data.url_ndb_org))
duplicate_entities = dupQ.fetch(1000)
for dup in duplicate_entities:
duplicates.extend(dup.duplicates)
q = GSoCProposal.all()
q.filter('org', self.data.url_ndb_org.key.to_old_key())
q.filter('program', program)
starter = lists.keyStarter
# TODO(daniel): enable prefetching from ndb models ('org', 'parent')
# prefetcher = lists.ModelPrefetcher(GSoCProposal, [], parent=True)
response_builder = lists.RawQueryContentResponseBuilder(
self.data.request, self._list_config, q, starter, prefetcher=None)
return response_builder.build(accepted, duplicates)
class ProposalsPage(base.GSoCRequestHandler):
"""View for proposals for particular org."""
access_checker = access.PROGRAM_ADMINISTRATOR_ACCESS_CHECKER
def djangoURLPatterns(self):
return [
url(r'admin/proposals/%s$' % url_patterns.ORG, self,
name='gsoc_proposals_org'),
]
def templatePath(self):
return 'modules/gsoc/admin/list.html'
def jsonContext(self, data, check, mutator):
list_content = ProposalsList(data).getListData()
if list_content:
return list_content.content()
else:
raise exception.Forbidden(message='You do not have access to this data')
def post(self, data, check, mutator):
"""Handler for POST requests."""
proposals_list = ProposalsList(data)
if proposals_list.post():
return http.HttpResponse()
else:
raise exception.Forbidden(message='You cannot change this data')
def context(self, data, check, mutator):
return {
'page_name': 'Proposal page',
'list': ProposalsList(data),
}
class ProjectsList(Template):
"""Template for listing all projects of particular org."""
def __init__(self, request, data):
def getOrganization(entity, *args):
"""Helper function to get value of organization column."""
org_key = GSoCProject.org.get_value_for_datastore(entity)
return ndb.Key.from_old_key(org_key).get().name
self.data = data
list_config = lists.ListConfiguration()
list_config.addPlainTextColumn('student', 'Student',
lambda entity, *args: entity.parent().name())
list_config.addSimpleColumn('title', 'Title')
list_config.addPlainTextColumn('org', 'Organization', getOrganization)
list_config.addPlainTextColumn(
'mentors', 'Mentor',
lambda entity, m, *args: [m[i].name() for i in entity.mentors])
list_config.setDefaultPagination(False)
list_config.setDefaultSort('student')
self._list_config = list_config
def context(self):
list_configuration_response = lists.ListConfigurationResponse(
self.data, self._list_config, idx=0,
description='List of projects under %s that ' \
'accepted into %s' % (
self.data.url_ndb_org.name, self.data.program.name))
return {
'lists': [list_configuration_response],
}
def getListData(self):
"""Returns the list data as requested by the current request.
If the lists as requested is not supported by this component None is
returned.
"""
idx = lists.getListIndex(self.data.request)
if idx == 0:
list_query = project_logic.getAcceptedProjectsQuery(
program=self.data.program, org=self.data.organization)
starter = lists.keyStarter
prefetcher = lists.ListModelPrefetcher(
GSoCProject, ['org'], ['mentors'], parent=True)
response_builder = lists.RawQueryContentResponseBuilder(
self.data.request, self._list_config, list_query,
starter, prefetcher=prefetcher)
return response_builder.build()
else:
return None
def templatePath(self):
return "modules/gsoc/admin/_projects_list.html"
class SurveyReminderPage(base.GSoCRequestHandler):
"""Page to send out reminder emails to fill out a Survey."""
access_checker = access.PROGRAM_ADMINISTRATOR_ACCESS_CHECKER
def djangoURLPatterns(self):
return [
url(r'admin/survey_reminder/%s$' % url_patterns.PROGRAM, self,
name='gsoc_survey_reminder_admin'),
]
def templatePath(self):
return 'modules/gsoc/admin/survey_reminder.html'
def post(self, data, check, mutator):
post_dict = data.request.POST
task_params = {
'program_key': data.program.key().id_or_name(),
'survey_key': post_dict['key'],
'survey_type': post_dict['type']
}
task = taskqueue.Task(url=data.redirect.urlOf('spawn_survey_reminders'),
params=task_params)
task.add()
return http.HttpResponseRedirect(
data.request.path + '?msg=Reminders are being sent')
def context(self, data, check, mutator):
q = GradingProjectSurvey.all()
q.filter('scope', data.program)
mentor_surveys = q.fetch(1000)
q = ProjectSurvey.all()
q.filter('scope', data.program)
student_surveys = q.fetch(1000)
return {
'page_name': 'Sending Evaluation Reminders',
'mentor_surveys': mentor_surveys,
'student_surveys': student_surveys,
'msg': data.request.GET.get('msg', '')
}
class ProjectsListPage(base.GSoCRequestHandler):
"""View that lists all the projects associated with the program."""
LIST_IDX = 1
access_checker = access.PROGRAM_ADMINISTRATOR_ACCESS_CHECKER
def djangoURLPatterns(self):
return [
url(r'admin/all_projects/%s$' % url_patterns.PROGRAM, self,
name='gsoc_projects_list_admin'),
]
def templatePath(self):
return 'modules/gsoc/admin/list.html'
def jsonContext(self, data, check, mutator):
list_query = project_logic.getProjectsQuery(program=data.program)
list_content = projects_list.ProjectList(
data, list_query, idx=self.LIST_IDX).getListData()
if list_content:
return list_content.content()
else:
raise exception.Forbidden(message='You do not have access to this data')
def context(self, data, check, mutator):
list_query = project_logic.getProjectsQuery(program=data.program)
return {
'page_name': 'Projects list page',
'list': projects_list.ProjectList(data, list_query, idx=self.LIST_IDX),
}
class ManageProjectsListPage(base.GSoCRequestHandler):
"""View that lists all the projects associated with the program and
redirects admin to manage page."""
LIST_IDX = 1
access_checker = access.PROGRAM_ADMINISTRATOR_ACCESS_CHECKER
def djangoURLPatterns(self):
return [
url(r'admin/manage_projects/%s$' % url_patterns.PROGRAM, self,
name=url_names.GSOC_ADMIN_MANAGE_PROJECTS_LIST),
]
def templatePath(self):
return 'modules/gsoc/admin/list.html'
def jsonContext(self, data, check, mutator):
list_query = project_logic.getProjectsQuery(program=data.program)
list_content = projects_list.ProjectList(
data, list_query, idx=self.LIST_IDX,
row_action=_getManageProjectRowAction).getListData()
if list_content:
return list_content.content()
else:
raise exception.Forbidden(message='You do not have access to this data')
def context(self, data, check, mutator):
list_query = project_logic.getProjectsQuery(program=data.program)
return {
'page_name': 'Projects list page',
'list': projects_list.ProjectList(data, list_query, idx=self.LIST_IDX,
row_action=_getManageProjectRowAction),
}
def _getManageProjectRowAction(data):
"""Returns a row action that redirects to the manage project page.
Args:
data: request_data.RequestData object for the current request.
Returns:
A function takes a project entity as its first argument and returns
URL to the manage project page.
"""
return lambda e, *args: links.SOC_LINKER.userId(
e.parent_key(), e.key().id(), urls.UrlNames.PROJECT_MANAGE_ADMIN)
|
{
"content_hash": "e9ff0b5101741bf88182072ee8e24112",
"timestamp": "",
"source": "github",
"line_count": 1301,
"max_line_length": 97,
"avg_line_length": 31.926979246733282,
"alnum_prop": 0.5959505982617906,
"repo_name": "rhyolight/nupic.son",
"id": "99138b3d20eaec08b233450b6573052c3b7d446f",
"size": "42120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/soc/modules/gsoc/views/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "681301"
},
{
"name": "JavaScript",
"bytes": "392600"
},
{
"name": "PHP",
"bytes": "217376"
},
{
"name": "Python",
"bytes": "5162564"
}
],
"symlink_target": ""
}
|
"""
Contains classes for basic HTTP (authenticated) transport implementations.
"""
from suds.transport.http import HttpTransport
from logging import getLogger
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
HTTPBasicAuthHandler)
log = getLogger(__name__)
class HttpAuthenticated(HttpTransport):
"""
Provides basic http authentication that follows the RFC-2617 specification.
As defined by specifications, credentials are provided to the server
upon request (HTTP/1.0 401 Authorization Required) by the server only.
@ivar pm: The password manager.
@ivar handler: The authentication handler.
"""
def __init__(self, **kwargs):
"""
@param kwargs: Keyword arguments.
- B{proxy} - An http proxy to be specified on requests.
The proxy is defined as {protocol:proxy,}
- type: I{dict}
- default: {}
- B{timeout} - Set the url open timeout (seconds).
- type: I{float}
- default: 90
- B{username} - The username used for http authentication.
- type: I{str}
- default: None
- B{password} - The password used for http authentication.
- type: I{str}
- default: None
"""
HttpTransport.__init__(self, **kwargs)
self.pm = HTTPPasswordMgrWithDefaultRealm()
def open(self, request):
self.addcredentials(request)
return HttpTransport.open(self, request)
def send(self, request):
self.addcredentials(request)
return HttpTransport.send(self, request)
def addcredentials(self, request):
credentials = self.credentials()
if not (None in credentials):
u = credentials[0]
p = credentials[1]
self.pm.add_password(None, request.url, u, p)
def credentials(self):
return (self.options.username, self.options.password)
def u2handlers(self):
handlers = HttpTransport.u2handlers(self)
handlers.append(HTTPBasicAuthHandler(self.pm))
return handlers
class WindowsHttpAuthenticated(HttpAuthenticated):
"""
Provides Windows (NTLM) http authentication.
@ivar pm: The password manager.
@ivar handler: The authentication handler.
@author: Christopher Bess
"""
def u2handlers(self):
# try to import ntlm support
try:
from ntlm3 import HTTPNtlmAuthHandler
except ImportError:
raise Exception("Cannot import python-ntlm3 module")
handlers = HttpTransport.u2handlers(self)
handlers.append(HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(self.pm))
return handlers
|
{
"content_hash": "befb978e934670ff54f3346e3abc0179",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 33.626506024096386,
"alnum_prop": 0.621282694374776,
"repo_name": "ronreiter/interactive-tutorials",
"id": "4bb39a3f75a4bfe55f2c8fc4f499f959699219ba",
"size": "3623",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "suds/transport/https.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "58440"
},
{
"name": "Dockerfile",
"bytes": "304"
},
{
"name": "HTML",
"bytes": "38066"
},
{
"name": "JavaScript",
"bytes": "263446"
},
{
"name": "Makefile",
"bytes": "225"
},
{
"name": "Python",
"bytes": "634473"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
import json
import socket
import urlparse
import datetime
import firefly
import firefox_startup
from pyvirtualdisplay import Display
if __name__ == '__main__':
# quick argument sanity check
if len(sys.argv) < 3:
print 'usage: %s profile_name port' % sys.argv[0]
sys.exit(1)
# grab the command line args
profile_name = sys.argv[1]
profile_port = int(sys.argv[2])
# turn on headless environment
#vd = Display(visible=0, size=(640,480))
#vd.start()
# open a browsing window
firefox_startup.startup_firefox(sys.argv[1])
# getting all the classes we need
firefly = firefly.Firefly(profile_port)
##############################################
## ##
## Sample crawl (add desired functionality) ##
## ##
##############################################
urls = ['www.google.com',
'www.nytimes.com',
'www.yahoo.com']
errors = 0
for url in urls:
# collection receiving consecutive errors
if errors > 2:
print "3 Errors in a row. Terminating..."
break
try:
# navigate to the next collection site
print "Navigating to %s"%url
# collect the visual elements (will get images as well)
visual_elements = firefly.get_visual_elements(url)
visual_elements = visual_elements.decode('ascii', 'ignore')
except socket.timeout as ex:
print "Timeout connecting to %s...moving on."%url
errors += 1
continue
# dump the data to stdout
# materialize as needed (file, database, whatever...)
print visual_elements
# reset consecutive error counter
errors = 0
|
{
"content_hash": "84e28b9a8794a2b0c4d23424f15b3cc1",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 71,
"avg_line_length": 26.742857142857144,
"alnum_prop": 0.5389957264957265,
"repo_name": "ACAHNN/adscape",
"id": "dc60d64baded98cfa3b3001a32c7662058cc9286",
"size": "1872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/cube.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "25650"
},
{
"name": "Python",
"bytes": "9020"
}
],
"symlink_target": ""
}
|
try:
from bashmu.FormatSock import FormatSocket
from bashmu.serverconstants import *
except ImportError:
from .FormatSock import FormatSocket
from .serverconstants import *
from threading import Lock, Condition
import dill
import queue
class WorkerManager:
def __init__(self):
self.mutex = Lock()
self.workers = []
self.workerwait = Condition(self.mutex)
def addWorker(self,w):
with self.mutex:
for i in range(w.threads):
self.workers.append(w)
self.workerwait.notify()
def removeWorker(self,w):
with self.mutex:
while w in self.workers:
self.workers.remove(w)
def recruitWorker(self,w):
"""
Use a thread from the worker
:param w: worker to recruit
"""
with self.mutex:
if w in self.workers:
self.workers.remove(w)
def freeWorker(self,w):
"""
Return a thread under worker control
:param w: worker to free
"""
with self.mutex:
self.workers.append(w)
self.workerwait.notify()
def getBestWorker(self,reqfuncs=list(),reqresources=list()):
"""
:param reqfuncs: functions required for execution [hash-int]
:param reqresources: resources required for execution [str]
"""
with self.mutex:
while len(self.workers)==0:
try:
self.workerwait.wait()
except:
pass
return self.workers[0]
class Worker:
def __init__(self,sock,distserver):
self.fsock = FormatSocket(sock)
dillstr = self.fsock.recv()
dillobj = dill.loads(dillstr)
self.lock = Lock()
self.cond = Condition(self.lock)
self.threads = dillobj[THREADS_JSON]
self.functioncache = set()
self.deferobjs = {}
self.jobid = 0
self.distserver = distserver
def dispatchJob(self,f,args,kwargs,deferobj,callback,ecallback,addjobargs):
with self.lock:
# If all threads actually taken up to client to deal with it
bytecode = dill.dumps(f)
keyid = hash(bytecode)
if keyid not in self.functioncache:
dilldict = {FUNCTION_JSON: bytecode,
FID_JSON: keyid,
ARGS_JSON: args,
KWARGS_JSON: kwargs,
JOBID_JSON: self.jobid}
self.functioncache.add(keyid)
else:
dilldict = {FID_JSON: keyid,
ARGS_JSON: args,
KWARGS_JSON: kwargs,
JOBID_JSON: self.jobid}
dillbytes = dill.dumps(dilldict)
self.fsock.send(dillbytes)
self.deferobjs[self.jobid] = (deferobj,callback,ecallback,addjobargs)
self.jobid += 1
self.threads -= 1
return self.threads
def recvandwork(self):
msg = self.fsock.recv()
dillobj = dill.loads(msg)
jobid = dillobj[JOBID_JSON]
if RESULT_JSON in dillobj:
result = dillobj[RESULT_JSON]
with self.lock:
deferobj, callback, ecallback, addjobargs = self.deferobjs.pop(jobid)
callback(result)
deferobj.__setvalue__(result)
self.threads += 1
return self.threads
elif ERROR_JSON in dillobj:
error = dillobj[ERROR_JSON]
with self.lock:
deferobj, callback, ecallback, addjobargs = self.deferobjs.pop(jobid)
self.threads += 1
ecallback(error)
return self.threads
def hasdata(self):
return self.fsock.hasdata()
def getunfinishedjobs(self):
jobstoadd = []
with self.lock:
for jobid in self.deferobjs.keys():
deferobj, callback, ecallback, addjobargs = self.deferobjs[jobid]
jobstoadd.append(addjobargs)
return jobstoadd
def close(self):
self.fsock.close()
def fileno(self):
return self.fsock.fileno()
class CheckableQueue(queue.Queue):
def _init(self, maxsize):
self.queue = []
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop(0)
def __len__(self):
with self.mutex:
return len(self.queue)
def __contains__(self, item):
with self.mutex:
return item in self.queue
def remove(self, item):
with self.mutex:
self.queue.remove(item)
|
{
"content_hash": "3fecb8d11b010e5141d2334aaba9eb09",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 85,
"avg_line_length": 29.836477987421382,
"alnum_prop": 0.5427908937605397,
"repo_name": "Renmusxd/Bashmu",
"id": "8f4b3dcd9dd2635992ce1cc1c32d12fae2bd0103",
"size": "4744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bashmu/workermanager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40466"
}
],
"symlink_target": ""
}
|
install_requires = ['Sphinx', 'invoke', 'argh', 'six',
'future', 'Babel', 'unipath',
'python_dateutil', 'gitpython', 'pytest-cov']
# Explicitly install `importlib` under Python 2.6. Thanks to
# http://stackoverflow.com/questions/9418064
try:
import importlib
except ImportError:
install_requires.append('importlib')
SETUP_INFO = dict(
name='atelier',
version='1.1.8',
install_requires=install_requires,
scripts=['scripts/per_project'],
description="A collection of tools for software artists",
license='Free BSD',
test_suite='tests',
author='Luc Saffre',
author_email='luc@lino-framework.org',
url="http://atelier.lino-framework.org",
long_description="""\
.. image:: https://readthedocs.org/projects/atelier/badge/?version=latest
:target: http://atelier.readthedocs.io/en/latest/?badge=latest
.. image:: https://coveralls.io/repos/github/lino-framework/atelier/badge.svg?branch=master
:target: https://coveralls.io/github/lino-framework/atelier?branch=master
.. image:: https://travis-ci.org/lino-framework/atelier.svg?branch=master
:target: https://travis-ci.org/lino-framework/atelier?branch=master
.. image:: https://img.shields.io/pypi/v/atelier.svg
:target: https://pypi.python.org/pypi/atelier/
.. image:: https://img.shields.io/pypi/l/atelier.svg
:target: https://pypi.python.org/pypi/atelier/
`atelier` is a collection of tools for managing and maintaining
Python software projects.
It contains:
- some general Python utilities
(`atelier.utils <http://atelier.lino-framework.org/api/atelier.utils.html>`_)
- a library for generating reStructuredText from Python
(`atelier.rstgen <http://atelier.lino-framework.org/api/atelier.rstgen.html>`_)
- some Sphinx extensions
(`atelier.sphinxconf <http://atelier.lino-framework.org/api/atelier.sphinxconf.html>`_)
- a library of invoke commands
(`atelier.invlib <http://atelier.lino-framework.org/api/atelier.invlib.html>`_)
- a minimalistic project management
(`atelier.projects <http://atelier.lino-framework.org/api/atelier.projects.html>`_)
The central project homepage is http://atelier.lino-framework.org
""",
classifiers="""\
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Framework :: Sphinx :: Extension
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Natural Language :: English
Operating System :: OS Independent""".splitlines())
SETUP_INFO.update(packages=[n for n in """
atelier
atelier.sphinxconf
atelier.invlib
""".splitlines() if n])
SETUP_INFO.update(package_data=dict())
def add_package_data(package, *patterns):
l = SETUP_INFO['package_data'].setdefault(package, [])
l.extend(patterns)
return l
add_package_data('atelier.sphinxconf', '*.html')
|
{
"content_hash": "ce869d47ca1daa150e0cde99ae730d07",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 91,
"avg_line_length": 35.670731707317074,
"alnum_prop": 0.7172649572649573,
"repo_name": "khchine5/atelier",
"id": "4afc9722ca33d16fe723182fa4db861729cd3795",
"size": "3160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atelier/setup_info.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "584"
},
{
"name": "Python",
"bytes": "160140"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.