text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import absolute_import
from mayavi.modules.vectors import *
|
{
"content_hash": "aad817aed437dafde79a076489b12bfb",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 38,
"avg_line_length": 38,
"alnum_prop": 0.7894736842105263,
"repo_name": "enthought/etsproxy",
"id": "374af0008a35d3dac44c8ccee2712cc824eb5da8",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/mayavi/modules/vectors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
"""
WSGI config for hr project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hr.settings")
application = get_wsgi_application()
|
{
"content_hash": "25c7b57adfa9a87d6f6d15dfdd841acf",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 23.8125,
"alnum_prop": 0.7637795275590551,
"repo_name": "Miles-Yao/HR",
"id": "20b0450cad720029a55fb8d14ed47eb2433760f9",
"size": "381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hr/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4159"
}
],
"symlink_target": ""
}
|
"""Reproduce an input boolean state."""
import asyncio
import logging
from typing import Iterable, Optional
from homeassistant.const import (
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_OFF,
ATTR_ENTITY_ID,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def _async_reproduce_states(
hass: HomeAssistantType, state: State, context: Optional[Context] = None
) -> None:
"""Reproduce input boolean states."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in (STATE_ON, STATE_OFF):
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
if cur_state.state == state.state:
return
service = SERVICE_TURN_ON if state.state == STATE_ON else SERVICE_TURN_OFF
await hass.services.async_call(
DOMAIN,
service,
{ATTR_ENTITY_ID: state.entity_id},
context=context,
blocking=True,
)
async def async_reproduce_states(
hass: HomeAssistantType, states: Iterable[State], context: Optional[Context] = None
) -> None:
"""Reproduce component states."""
await asyncio.gather(
*(_async_reproduce_states(hass, state, context) for state in states)
)
|
{
"content_hash": "f502b4911dd97a02c90463e2dd217c0e",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 87,
"avg_line_length": 26.19298245614035,
"alnum_prop": 0.6624246483590087,
"repo_name": "Cinntax/home-assistant",
"id": "b8bc18edfac9a782304518334199dff71aad517b",
"size": "1493",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/input_boolean/reproduce_state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
}
|
import os
import boto3
from bar import bar
from bar.baz import baz
from foo import foo
_ = os
_ = boto3
_ = bar
_ = baz
_ = foo
|
{
"content_hash": "0ceed00d133db9180fb60c3784fb2df7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 23,
"avg_line_length": 10.833333333333334,
"alnum_prop": 0.6615384615384615,
"repo_name": "bazelbuild/rules_python",
"id": "2b5b04425795453ee211686bd21e28cf205ddfad",
"size": "130",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gazelle/testdata/monorepo/coarse_grained/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "78630"
},
{
"name": "Python",
"bytes": "116552"
},
{
"name": "Shell",
"bytes": "676"
},
{
"name": "Starlark",
"bytes": "196289"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class YcalendarValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="ycalendar", parent_name="surface", **kwargs):
super(YcalendarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
[
"chinese",
"coptic",
"discworld",
"ethiopian",
"gregorian",
"hebrew",
"islamic",
"jalali",
"julian",
"mayan",
"nanakshahi",
"nepali",
"persian",
"taiwan",
"thai",
"ummalqura",
],
),
**kwargs,
)
|
{
"content_hash": "189579d5fbc3714ab29ac86fc6b1e7da",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 31.625,
"alnum_prop": 0.38537549407114624,
"repo_name": "plotly/plotly.py",
"id": "22cb7e6eec75ecd94382cb51eb9530285b7bedee",
"size": "1012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/surface/_ycalendar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""Get all hardware notifications associated with the passed hardware ID."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('identifier')
@environment.pass_env
def cli(env, identifier):
"""Get all hardware notifications."""
hardware = SoftLayer.HardwareManager(env.client)
notifications = hardware.get_notifications(identifier)
table = formatting.KeyValueTable(['Id', 'Domain', 'Hostmane', 'Username', 'Email', 'FirstName', 'Lastname'])
table.align['Domain'] = 'r'
table.align['Username'] = 'l'
for notification in notifications:
table.add_row([notification['id'],
notification['hardware']['fullyQualifiedDomainName'], notification['hardware']['hostname'],
notification['user']['username'], notification['user']['email'],
notification['user']['firstName'], notification['user']['lastName']])
env.fout(table)
|
{
"content_hash": "cab63400774f63a8194b7a0fd86f9d56",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 114,
"avg_line_length": 35.516129032258064,
"alnum_prop": 0.6775658492279746,
"repo_name": "softlayer/softlayer-python",
"id": "0612dcba7ea98c98c03b095ef83a1510b76c4126",
"size": "1101",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "SoftLayer/CLI/hardware/notifications.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "854"
},
{
"name": "Makefile",
"bytes": "7458"
},
{
"name": "Python",
"bytes": "2657752"
}
],
"symlink_target": ""
}
|
import logging
from unittest import TestCase
from dephp.scanner import lexer, scan_string
from dephp.plyparser import parser, parse_string
from dephp import phpast as ast
from tests._util import log_level
class ExpressionTestCase(TestCase):
def test_assign_with_double_quotes(self):
# PHP Scanner doesn't work like this.
tokens = scan_string('<?php $data = "a";')
expected = ['VARIABLE', 'EQUALS', 'QUOTE', 'ENCAPSED_AND_WHITESPACE',
'QUOTE', 'SEMI']
self.assertEquals(expected, [token.type for token in tokens])
with log_level(logging.INFO):
parsed = parse_string('<?php $data = "a";')
expected = ast.Program([ast.AssignOp(ast.Variable('$data'), '=', ast.String('a'))])
self.assertEquals(expected, parsed)
def test_assign_with_single_quotes(self):
parsed = parse_string('<?php $data = \'a\';')
expected = ast.Program([ast.AssignOp(ast.Variable('$data'), '=', ast.String('a'))])
self.assertEquals(expected, parsed)
def test_parenthesis_new(self):
"""This tends to get shift/reduce-ey with other bracketted
expressions."""
self.markTestIncomplete("don't know the node names yet")
parsed = parse_string('<?php (new stdclass)->prop;')
expected = ast.Program([ast.AssignOp(ast.Variable('$data'), '=', ast.String('a'))])
self.assertEquals(expected, parsed)
parsed = parse_string('<?php (new stdclass(1, 2))->prop;')
expected = ast.Program([ast.AssignOp(ast.Variable('$data'), '=', ast.String('a'))])
self.assertEquals(expected, parsed)
class InternalFunctionsTestCase(TestCase):
def test_isset(self):
parsed = parse_string('<?php isset($a);')
expected = ast.Program([ast.IsSet([ast.Variable('$a')])])
self.assertEquals(expected, parsed)
parsed = parse_string('<?php isset($a, $b);')
expected = ast.Program([ast.IsSet([ast.Variable('$a'), ast.Variable('$b')])])
self.assertEquals(expected, parsed)
def test_empty(self):
parsed = parse_string('<?php empty($a);')
expected = ast.Program([ast.Empty(ast.Variable('$a'))])
self.assertEquals(expected, parsed)
|
{
"content_hash": "8db553e624f185a241af61c9450aaa6f",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 91,
"avg_line_length": 42.15094339622642,
"alnum_prop": 0.6257833482542524,
"repo_name": "bnkr/dephp",
"id": "a53b187e520342dbcf245fbcc69648415e74aa54",
"size": "2234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parser/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133361"
}
],
"symlink_target": ""
}
|
"""Handles all requests to the conductor service."""
from oslo.config import cfg
from oslo import messaging
from nova import baserpc
from nova.conductor import manager
from nova.conductor import rpcapi
from nova.i18n import _LI, _LW
from nova.openstack.common import log as logging
from nova import utils
conductor_opts = [
cfg.BoolOpt('use_local',
default=False,
help='Perform nova-conductor operations locally'),
cfg.StrOpt('topic',
default='conductor',
help='The topic on which conductor nodes listen'),
cfg.StrOpt('manager',
default='nova.conductor.manager.ConductorManager',
help='Full class name for the Manager for conductor'),
cfg.IntOpt('workers',
help='Number of workers for OpenStack Conductor service. '
'The default will be the number of CPUs available.')
]
conductor_group = cfg.OptGroup(name='conductor',
title='Conductor Options')
CONF = cfg.CONF
CONF.register_group(conductor_group)
CONF.register_opts(conductor_opts, conductor_group)
LOG = logging.getLogger(__name__)
class LocalAPI(object):
"""A local version of the conductor API that does database updates
locally instead of via RPC.
"""
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(manager.ConductorManager())
def wait_until_ready(self, context, *args, **kwargs):
# nothing to wait for in the local case.
pass
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'compute')
def instance_get_all_by_host(self, context, host, columns_to_join=None):
return self._manager.instance_get_all_by_host(
context, host, None, columns_to_join=columns_to_join)
def instance_get_all_by_host_and_node(self, context, host, node):
return self._manager.instance_get_all_by_host(context, host, node,
None)
def migration_get_in_progress_by_host_and_node(self, context, host, node):
return self._manager.migration_get_in_progress_by_host_and_node(
context, host, node)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
return self._manager.aggregate_metadata_get_by_host(context,
host,
key)
def bw_usage_get(self, context, uuid, start_period, mac):
return self._manager.bw_usage_update(context, uuid, mac, start_period,
None, None, None, None, None, False)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed=None, update_cells=True):
return self._manager.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out,
last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
def provider_fw_rule_get_all(self, context):
return self._manager.provider_fw_rule_get_all(context)
def block_device_mapping_create(self, context, values):
return self._manager.block_device_mapping_update_or_create(context,
values,
create=True)
def block_device_mapping_update(self, context, bdm_id, values):
values = dict(values)
values['id'] = bdm_id
return self._manager.block_device_mapping_update_or_create(
context, values, create=False)
def block_device_mapping_update_or_create(self, context, values):
return self._manager.block_device_mapping_update_or_create(context,
values,
create=None)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
return self._manager.block_device_mapping_get_all_by_instance(
context, instance, legacy)
def vol_get_usage_by_time(self, context, start_time):
return self._manager.vol_get_usage_by_time(context, start_time)
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
return self._manager.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance, last_refreshed,
update_totals)
def service_get_all(self, context):
return self._manager.service_get_all_by(context, host=None, topic=None,
binary=None)
def service_get_all_by_topic(self, context, topic):
return self._manager.service_get_all_by(context, topic=topic,
host=None, binary=None)
def service_get_all_by_host(self, context, host):
return self._manager.service_get_all_by(context, host=host, topic=None,
binary=None)
def service_get_by_host_and_topic(self, context, host, topic):
return self._manager.service_get_all_by(context, topic, host,
binary=None)
def service_get_by_compute_host(self, context, host):
result = self._manager.service_get_all_by(context, 'compute', host,
binary=None)
# FIXME(comstud): A major revision bump to 2.0 should return a
# single entry, so we should just return 'result' at that point.
return result[0]
def service_get_by_args(self, context, host, binary):
return self._manager.service_get_all_by(context, host=host,
binary=binary, topic=None)
def service_create(self, context, values):
return self._manager.service_create(context, values)
def service_destroy(self, context, service_id):
return self._manager.service_destroy(context, service_id)
def compute_node_create(self, context, values):
return self._manager.compute_node_create(context, values)
def compute_node_update(self, context, node, values, prune_stats=False):
# NOTE(belliott) ignore prune_stats param, it's no longer relevant
return self._manager.compute_node_update(context, node, values)
def compute_node_delete(self, context, node):
return self._manager.compute_node_delete(context, node)
def service_update(self, context, service, values):
return self._manager.service_update(context, service, values)
def task_log_get(self, context, task_name, begin, end, host, state=None):
return self._manager.task_log_get(context, task_name, begin, end,
host, state)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
return self._manager.task_log_begin_task(context, task_name,
begin, end, host,
task_items, message)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
return self._manager.task_log_end_task(context, task_name,
begin, end, host,
errors, message)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
return self._manager.notify_usage_exists(
context, instance, current_period, ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, *args):
return self._manager.security_groups_trigger_handler(context,
event, args)
def security_groups_trigger_members_refresh(self, context, group_ids):
return self._manager.security_groups_trigger_members_refresh(context,
group_ids)
def get_ec2_ids(self, context, instance):
return self._manager.get_ec2_ids(context, instance)
def object_backport(self, context, objinst, target_version):
return self._manager.object_backport(context, objinst, target_version)
class LocalComputeTaskAPI(object):
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(
manager.ComputeTaskManager())
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations,
clean_shutdown=True):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self._manager.migrate_server(
context, instance, scheduler_hint, live=False, rebuild=False,
flavor=flavor, block_migration=None, disk_over_commit=None,
reservations=reservations, clean_shutdown=clean_shutdown)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit):
scheduler_hint = {'host': host_name}
self._manager.migrate_server(
context, instance, scheduler_hint, True, False, None,
block_migration, disk_over_commit, None)
def build_instances(self, context, instances, image,
filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
legacy_bdm=True):
utils.spawn_n(self._manager.build_instances, context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
utils.spawn_n(self._manager.unshelve_instance, context,
instance=instance)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None, kwargs=None):
# kwargs unused but required for cell compatibility.
utils.spawn_n(self._manager.rebuild_instance, context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
host=host,
preserve_ephemeral=preserve_ephemeral)
class API(LocalAPI):
"""Conductor API that does updates via RPC to the ConductorManager."""
def __init__(self):
self._manager = rpcapi.ConductorAPI()
self.base_rpcapi = baserpc.BaseAPI(topic=CONF.conductor.topic)
def wait_until_ready(self, context, early_timeout=10, early_attempts=10):
'''Wait until a conductor service is up and running.
This method calls the remote ping() method on the conductor topic until
it gets a response. It starts with a shorter timeout in the loop
(early_timeout) up to early_attempts number of tries. It then drops
back to the globally configured timeout for rpc calls for each retry.
'''
attempt = 0
timeout = early_timeout
# if we show the timeout message, make sure we show a similar
# message saying that everything is now working to avoid
# confusion
has_timedout = False
while True:
# NOTE(danms): Try ten times with a short timeout, and then punt
# to the configured RPC timeout after that
if attempt == early_attempts:
timeout = None
attempt += 1
# NOTE(russellb): This is running during service startup. If we
# allow an exception to be raised, the service will shut down.
# This may fail the first time around if nova-conductor wasn't
# running when this service started.
try:
self.base_rpcapi.ping(context, '1.21 GigaWatts',
timeout=timeout)
if has_timedout:
LOG.info(_LI('nova-conductor connection '
'established successfully'))
break
except messaging.MessagingTimeout:
has_timedout = True
LOG.warning(_LW('Timed out waiting for nova-conductor. '
'Is it running? Or did this service start '
'before nova-conductor? '
'Reattempting establishment of '
'nova-conductor connection...'))
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'conductor')
class ComputeTaskAPI(object):
"""ComputeTask API that queues up compute tasks for nova-conductor."""
def __init__(self):
self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI()
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations,
clean_shutdown=True):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, live=False, rebuild=False,
flavor=flavor, block_migration=None, disk_over_commit=None,
reservations=reservations, clean_shutdown=clean_shutdown)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit):
scheduler_hint = {'host': host_name}
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, True, False, None,
block_migration, disk_over_commit, None)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
self.conductor_compute_rpcapi.build_instances(context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
self.conductor_compute_rpcapi.unshelve_instance(context,
instance=instance)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None, kwargs=None):
# kwargs unused but required for cell compatibility
self.conductor_compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host)
|
{
"content_hash": "7bcc6c44f6be4ceb9320b9da99c168eb",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 79,
"avg_line_length": 46.59308510638298,
"alnum_prop": 0.5827958216793196,
"repo_name": "mgagne/nova",
"id": "5d5f30ba82d6ff149f502343bd9930f89b0d7766",
"size": "18124",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/conductor/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15421976"
},
{
"name": "Shell",
"bytes": "21612"
}
],
"symlink_target": ""
}
|
"""
Deuce Client - Block API
"""
import hashlib
import json
from stoplight import validate
from deuceclient.common.validation import *
class Block(object):
@staticmethod
def make_id(data):
sha1 = hashlib.sha1()
sha1.update(data)
return sha1.hexdigest().lower()
# TODO: Add a validator for data, ref_count, ref_modified
@validate(project_id=ProjectIdRule,
vault_id=VaultIdRule,
block_id=MetadataBlockIdRuleNoneOkay,
storage_id=StorageBlockIdRuleNoneOkay)
def __init__(self, project_id, vault_id, block_id=None,
storage_id=None, data=None,
ref_count=None, ref_modified=None, block_size=None,
block_orphaned='indeterminate', block_type='metadata'):
# NOTE(TheSriram): By default, the block_type is set to be metadata
# but this can be overridden when instantiating to either be metadata
# or storage
if block_id is None and storage_id is None:
raise ValueError("Both storage_id and block_id cannot be None")
elif block_type not in ('metadata', 'storage'):
raise ValueError(
'Invalid block_type Status Value: {0} '
'Accepted values are metadata '
'and storage'.format(block_type))
elif block_type == 'metadata' and block_id is None:
raise ValueError(
'block_id cannot be None, if block_type is set to metadata'
)
elif block_type == 'storage' and storage_id is None:
raise ValueError(
'storage_id cannot be None, if block_type is set to storage'
)
else:
self.__properties = {
'project_id': project_id,
'vault_id': vault_id,
'block_id': block_id,
'storage_id': storage_id,
'data': data,
'references': {
'count': ref_count,
'modified': ref_modified,
},
'block_size': block_size,
'block_orphaned': block_orphaned,
'block_type': block_type
}
def serialize(self):
return {
'project_id': self.project_id,
'vault_id': self.vault_id,
'block_id': self.block_id,
'storage_id': self.storage_id,
'references': {
'count': self.ref_count,
'modified': self.ref_modified
},
'block_size': self.__properties['block_size'],
'block_orphaned': self.block_orphaned,
'block_type': self.block_type
}
@staticmethod
def deserialize(serialized_data):
return Block(serialized_data['project_id'],
serialized_data['vault_id'],
block_id=serialized_data['block_id'],
storage_id=serialized_data['storage_id'],
ref_count=serialized_data['references']['count'],
ref_modified=serialized_data['references']['modified'],
block_size=serialized_data['block_size'],
block_orphaned=serialized_data['block_orphaned'],
block_type=serialized_data['block_type'])
def to_json(self):
return json.dumps(self.serialize())
@staticmethod
def from_json(serialized_data):
json_data = json.loads(serialized_data)
return Block.deserialize(json_data)
@property
def project_id(self):
return self.__properties['project_id']
@property
def vault_id(self):
return self.__properties['vault_id']
@property
def block_id(self):
return self.__properties['block_id']
@property
def block_type(self):
return self.__properties['block_type']
@block_id.setter
@validate(value=MetadataBlockIdRuleNoneOkay)
def block_id(self, value):
if self.__properties['block_type'] == 'metadata':
raise ValueError('Cannot update block_id '
'for metadata blocks')
else:
self.__properties['block_id'] = value
@property
def storage_id(self):
return self.__properties['storage_id']
@storage_id.setter
@validate(value=StorageBlockIdRule)
def storage_id(self, value):
if self.__properties['block_type'] == 'storage':
raise ValueError('Cannot update storage_id '
'for storage blocks')
else:
self.__properties['storage_id'] = value
@property
def data(self):
return self.__properties['data']
# TODO: Add a validator
@data.setter
def data(self, value):
self.__properties['data'] = value
def __len__(self):
if self.data is None:
if self.__properties['block_size'] is None:
return 0
else:
return self.__properties['block_size']
else:
return len(self.data)
def set_block_size(self, value):
self.__properties['block_size'] = value
@property
def block_orphaned(self):
return self.__properties['block_orphaned']
@block_orphaned.setter
@validate(value=BoolRule)
def block_orphaned(self, value):
self.__properties['block_orphaned'] = value
@property
def ref_count(self):
return self.__properties['references']['count']
# TODO: Add a validator
@ref_count.setter
def ref_count(self, value):
self.__properties['references']['count'] = value
@property
def ref_modified(self):
return self.__properties['references']['modified']
# TODO: Add a validator
@ref_modified.setter
def ref_modified(self, value):
self.__properties['references']['modified'] = value
|
{
"content_hash": "34884529dffc9ba999db990ad19c232f",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 77,
"avg_line_length": 31.962162162162162,
"alnum_prop": 0.5574158633519364,
"repo_name": "rackerlabs/deuce-client",
"id": "a5dcf269c3e055b1bfd68ae34fd0cbd3642517df",
"size": "5913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deuceclient/api/block.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "362823"
}
],
"symlink_target": ""
}
|
from murano.dsl import exceptions
import murano.dsl.type_scheme as type_scheme
class PropertyUsages(object):
In = 'In'
Out = 'Out'
InOut = 'InOut'
Runtime = 'Runtime'
Const = 'Const'
Config = 'Config'
All = set([In, Out, InOut, Runtime, Const, Config])
Writable = set([Out, InOut, Runtime])
class Spec(object):
def __init__(self, declaration, owner_class):
self._namespace_resolver = owner_class.namespace_resolver
self._contract = type_scheme.TypeScheme(declaration['Contract'])
self._usage = declaration.get('Usage') or 'In'
self._default = declaration.get('Default')
self._has_default = 'Default' in declaration
if self._usage not in PropertyUsages.All:
raise exceptions.DslSyntaxError(
'Unknown type {0}. Must be one of ({1})'.format(
self._usage, ', '.join(PropertyUsages.All)))
def validate(self, value, this, owner, context,
object_store, default=None):
if default is None:
default = self.default
return self._contract(value, context, this, owner, object_store,
self._namespace_resolver, default)
@property
def default(self):
return self._default
@property
def has_default(self):
return self._has_default
@property
def usage(self):
return self._usage
class PropertySpec(Spec):
pass
class ArgumentSpec(Spec):
pass
|
{
"content_hash": "77e7f379a112e90a9e1f4f9c5a3c2b00",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 72,
"avg_line_length": 28.28301886792453,
"alnum_prop": 0.6077384923282189,
"repo_name": "chenyujie/hybrid-murano",
"id": "5fcebbe4d5d3fc0d34f96c1edc0f68a92a5532e1",
"size": "2113",
"binary": false,
"copies": "3",
"ref": "refs/heads/hybrid-master",
"path": "murano/dsl/typespec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1013"
},
{
"name": "PowerShell",
"bytes": "8634"
},
{
"name": "Python",
"bytes": "1004440"
},
{
"name": "Shell",
"bytes": "6751"
}
],
"symlink_target": ""
}
|
from functools import WRAPPER_ASSIGNMENTS
from django.apps import apps
__all__ = ['is_installed', 'installed_apps']
def is_installed(app_name):
return apps.is_installed(app_name)
def installed_apps():
return [app.name for app in apps.get_app_configs()]
def available_attrs(fn):
return WRAPPER_ASSIGNMENTS
|
{
"content_hash": "68f01deef949d592bae643ee9c499dc8",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 55,
"avg_line_length": 23,
"alnum_prop": 0.7267080745341615,
"repo_name": "rsalmaso/django-cms",
"id": "f65a9709c661a3bc2ea31f2c3f3bafeb6deb6683",
"size": "322",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "cms/utils/compat/dj.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "204223"
},
{
"name": "JavaScript",
"bytes": "1250281"
},
{
"name": "Python",
"bytes": "2386268"
},
{
"name": "SCSS",
"bytes": "137693"
},
{
"name": "Shell",
"bytes": "22511"
}
],
"symlink_target": ""
}
|
import os
import shutil
from unittest import TestCase
from mlimages.gather.imagenet import ImagenetAPI
import testss.env as env
class TestImagenetAPI(TestCase):
def test_gather_wnid(self):
p = env.get_data_folder()
api = ImagenetAPI(p)
api.gather("n11531193")
path = env.get_path("wilding")
self.assertTrue(os.path.isdir(path))
shutil.rmtree(path)
def test_gather_subset(self):
p = env.get_data_folder()
api = ImagenetAPI(p, limit=3)
api.gather("n09289331", include_subset=True)
path = env.get_path("glacier")
self.assertTrue(os.path.isdir(path))
for f in ["alpine_glacier", "continental_glacier", "piedmont_glacier"]:
p = env.get_path("glacier/" + f)
self.assertTrue(os.path.isdir(p))
shutil.rmtree(path)
|
{
"content_hash": "4367e9ba71a540e2a6d6e1ff748e964f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 27.419354838709676,
"alnum_prop": 0.6270588235294118,
"repo_name": "icoxfog417/mlimages",
"id": "57a8a4afd45d54b862e9e801d29d850d7618a5fc",
"size": "850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/gather/imagenet/test_imagenet_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34265"
}
],
"symlink_target": ""
}
|
"""Model class for WMS Resource"""
from django.contrib.gis.db import models
__author__ = 'Christian Christelis <christian@kartoza.com>'
__date__ = '04/2015'
__license__ = "GPL"
__copyright__ = 'kartoza.com'
class NationalCertificateVocational(models.Model):
id = models.AutoField(primary_key=True)
national_certificate_vocational_level = models.IntegerField()
national_certificate_vocational_description = models.CharField(
max_length=255,
blank=True,
null=True)
objects = models.GeoManager()
def __unicode__(self):
return 'National Certificate Vocational Level (%s)' % (
self.national_certificate_vocational_level)
class Meta:
app_label = 'feti'
|
{
"content_hash": "afd23d7b7b75f135f14f7816db26d409",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 67,
"avg_line_length": 28.076923076923077,
"alnum_prop": 0.6712328767123288,
"repo_name": "cchristelis/feti",
"id": "3a88df93d385a90ff8fb5d3a6caab9a6380ad1ea",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "django_project/feti/models/national_certificate_vocational.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "66178"
},
{
"name": "HTML",
"bytes": "3411827"
},
{
"name": "JavaScript",
"bytes": "525391"
},
{
"name": "Makefile",
"bytes": "16513"
},
{
"name": "PLpgSQL",
"bytes": "9805987"
},
{
"name": "Python",
"bytes": "372712"
},
{
"name": "Shell",
"bytes": "2539"
}
],
"symlink_target": ""
}
|
import sys
from pyasn1 import error
from pyasn1.type import tag
from pyasn1.type import univ
__all__ = ['NumericString', 'PrintableString', 'TeletexString', 'T61String', 'VideotexString',
'IA5String', 'GraphicString', 'VisibleString', 'ISO646String',
'GeneralString', 'UniversalString', 'BMPString', 'UTF8String']
NoValue = univ.NoValue
noValue = univ.noValue
class AbstractCharacterString(univ.OctetString):
"""Creates |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type Python 2 :class:`unicode` or Python 3 :class:`str`.
When used in octet-stream context, |ASN.1| type assumes "|encoding|" encoding.
Keyword Args
------------
value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
unicode object (Python 2) or string (Python 3), alternatively string
(Python 2) or bytes (Python 3) representing octet-stream of serialised
unicode string (note `encoding` parameter) or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
:class:`str` (Python 3) the payload when |ASN.1| object is used
in octet-stream context.
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
"""
if sys.version_info[0] <= 2:
def __str__(self):
try:
# `str` is Py2 text representation
return self._value.encode(self.encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
"Can't encode string '%s' with codec %s" % (self._value, self.encoding)
)
def __unicode__(self):
return unicode(self._value)
def prettyIn(self, value):
try:
if isinstance(value, unicode):
return value
elif isinstance(value, str):
return value.decode(self.encoding)
elif isinstance(value, (tuple, list)):
return self.prettyIn(''.join([chr(x) for x in value]))
elif isinstance(value, univ.OctetString):
return value.asOctets().decode(self.encoding)
else:
return unicode(value)
except (UnicodeDecodeError, LookupError):
raise error.PyAsn1Error(
"Can't decode string '%s' with codec %s" % (value, self.encoding)
)
def asOctets(self, padding=True):
return str(self)
def asNumbers(self, padding=True):
return tuple([ord(x) for x in str(self)])
else:
def __str__(self):
# `unicode` is Py3 text representation
return str(self._value)
def __bytes__(self):
try:
return self._value.encode(self.encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
"Can't encode string '%s' with codec %s" % (self._value, self.encoding)
)
def prettyIn(self, value):
try:
if isinstance(value, str):
return value
elif isinstance(value, bytes):
return value.decode(self.encoding)
elif isinstance(value, (tuple, list)):
return self.prettyIn(bytes(value))
elif isinstance(value, univ.OctetString):
return value.asOctets().decode(self.encoding)
else:
return str(value)
except (UnicodeDecodeError, LookupError):
raise error.PyAsn1Error(
"Can't decode string '%s' with codec %s" % (value, self.encoding)
)
def asOctets(self, padding=True):
return bytes(self)
def asNumbers(self, padding=True):
return tuple(bytes(self))
#
# See OctetString.prettyPrint() for the explanation
#
def prettyOut(self, value):
return value
def prettyPrint(self, scope=0):
# first see if subclass has its own .prettyOut()
value = self.prettyOut(self._value)
if value is not self._value:
return value
return AbstractCharacterString.__str__(self)
def __reversed__(self):
return reversed(self._value)
class NumericString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class PrintableString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class TeletexString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class T61String(TeletexString):
__doc__ = TeletexString.__doc__
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class VideotexString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class IA5String(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class GraphicString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class VisibleString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class ISO646String(VisibleString):
__doc__ = VisibleString.__doc__
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class GeneralString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class UniversalString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
)
encoding = "utf-32-be"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class BMPString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
)
encoding = "utf-16-be"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class UTF8String(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
)
encoding = "utf-8"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
|
{
"content_hash": "d3769bbe1cacccf92a9069fd4e042cc9",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 99,
"avg_line_length": 33.695238095238096,
"alnum_prop": 0.6411343508573583,
"repo_name": "endlessm/chromium-browser",
"id": "8986b7038979974eec223c4eea91646cb89bf024",
"size": "10771",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "third_party/catapult/third_party/gsutil/third_party/pyasn1/pyasn1/type/char.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
import sh
import sys
class Keychain:
identities = []
identities_str = 'need find_identity first'
def __init__(self, name, password=''):
home = os.path.expanduser("~")
# home = os.getenv('HOME')
filename = os.path.join(home, 'Library', 'Keychains', name)
self.filename = filename
self.name = name
self.password = password
# Keychain.list(self.name)
self.securitycmd = sh.Command('/usr/bin/security')
def exists(self):
if os.path.exists(self.filename):
return True
if os.path.exists(self.filename + '-db'):
return True
return False
def delete(self):
self.securitycmd('delete-keychain', self.name,
_out=sys.stdout.buffer, _err=sys.stderr.buffer)
def create(self):
self.securitycmd('create-keychain', '-p', self.password, self.name,
_out=sys.stdout.buffer, _err=sys.stderr.buffer)
def add_to_search_list(self):
self.securitycmd('list-keychain', '-s', self.name, 'login.keychain',
_out=sys.stdout.buffer, _err=sys.stderr.buffer)
@staticmethod
def list():
securitycmd = sh.Command('/usr/bin/security')
securitycmd('list-keychain',
_out=sys.stdout.buffer, _err=sys.stderr.buffer)
def unlock(self):
self.securitycmd('unlock-keychain', '-p', self.password, self.name,
_out=sys.stdout, _err=sys.stderr)
# self.securitycmd('set-keychain-settings', '-u', '-t', 6000, self.name,
self.securitycmd('set-keychain-settings', '-l', self.name,
_out=sys.stdout, _err=sys.stderr)
def sierra_operation(self):
# https://stackoverflow.com/questions/39868578/security-codesign-in-sierra-keychain-ignores-access-control-settings-and-ui-p/40870033#40870033
self.securitycmd('set-key-partition-list',
'-S', 'apple-tool:,apple:,codesign:',
'-s', '-k', self.password, self.name)
def import_certificate(self, p12_filename, p12_password=None):
if not os.path.exists(p12_filename):
raise Exception("{} not exists".format(p12_filename))
cmd = self.securitycmd.bake('import', p12_filename, '-k', self.name,
'-T', '/usr/bin/codesign',
'-T', '/usr/bin/security')
# cmd = self.securitycmd.bake('import', p12_filename, '-k', self.name,
# '-A')
if p12_password is not None:
cmd = cmd.bake('-P', p12_password)
# print(cmd)
cmd(_out=sys.stdout, _err=sys.stderr)
def export(self, p12_filename, p12_password=''):
self.securitycmd('export', '-k', self.name, '-t', 'identities',
'-f', 'pkcs12', "-P", p12_password, '-o', p12_filename)
@staticmethod
def find_identity(keychain=None, valid_only=True):
#
# output = subprocess.check_output(
# ["/usr/bin/security", "find-identity", "-p", "codesigning", "-v",
# self.name])
# for identity_id, certificate_name in re.findall(
# "[ ]+[\d]+\) ([^ ]+) \"(.*)\"", output):
# self.identities.append({'identity_id': identity_id,
# 'certificate_name': certificate_name})
# self.identities_str = output
security = sh.Command('/usr/bin/security')
cmd = security.bake('find-identity', "-p", "codesigning")
if valid_only:
cmd = cmd.bake('-v')
if keychain is not None:
if isinstance(keychain, Keychain):
cmd = cmd.bake(keychain.name)
else:
cmd = cmd.bake(keychain)
cmd(_out=sys.stdout, _err=sys.stderr)
if __name__ == '__main__':
k = Keychain('build.keychain', 'password')
|
{
"content_hash": "c5f98bb79173528fe3d99a0eaae026f7",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 150,
"avg_line_length": 38.66019417475728,
"alnum_prop": 0.5434455047714716,
"repo_name": "oylbin/iOSCodeSign",
"id": "392ad7d5aa1ce8978fb5b0055e5056f45faba0b9",
"size": "3982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ioscodesign/keychain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12456"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
from .wrapped_decorator import signature_safe_contextmanager
from . import core
__all__ = [
'convert_reader_to_recordio_file', 'convert_reader_to_recordio_files'
]
@signature_safe_contextmanager
def create_recordio_writer(filename,
compressor=core.RecordIOWriter.Compressor.Snappy,
max_num_records=1000):
writer = core.RecordIOWriter(filename, compressor, max_num_records)
yield writer
writer.close()
def convert_reader_to_recordio_file(
filename,
reader_creator,
feeder,
compressor=core.RecordIOWriter.Compressor.Snappy,
max_num_records=1000,
feed_order=None):
"""
Convert a Python Reader to a recordio file.
Examples:
>>> import paddle.fluid as fluid
>>> import paddle.dataset.mnist as mnist
>>> import paddle
>>>
>>> tmp_program = fluid.Program()
>>> with fluid.program_guard(tmp_program):
>>> img = fluid.layers.data(name='img', shape=[784])
>>> label = fluid.layers.data(name='label', shape=[1], dtype='int64')
>>> feeder = fluid.DataFeeder(feed_list=[img, label], place=fluid.CPUPlace())
>>> # mnist.recordio will be generated in current directory
>>> fluid.recordio_writer.convert_reader_to_recordio_file(
>>> filename="mnist.recordio",
>>> reader_creator=paddle.batch(mnist.train(), batch_size=32),
>>> feeder=feeder)
Args:
filename(str): The recordio filename.
reader_creator(callable): The Python Reader Creator. See
:ref:`api_guide_python_reader`.
feeder(DataFeeder): The DataFeeder instance. Used to convert
:code:`reader_creator` to :code: `lod_tensor`
compressor: Must in fluid.core.RecordIOWriter.Compressor.Snappy or
fluid.core.RecordIOWriter.Compressor.NoCompress. Use :code:`Snappy`
by default.
max_num_records(int): Maximum number of records in one chuck. Each record
is each return value from reader function
feed_order(list): The order of variable names that the reader returns
Returns:
int: the number of record that saved.
"""
if feed_order is None:
feed_order = feeder.feed_names
counter = 0
with create_recordio_writer(filename, compressor,
max_num_records) as writer:
for batch in reader_creator():
res = feeder.feed(batch)
for each in feed_order:
writer.append_tensor(res[each])
writer.complete_append_tensor()
counter += 1
return counter
def convert_reader_to_recordio_files(
filename,
batch_per_file,
reader_creator,
feeder,
compressor=core.RecordIOWriter.Compressor.Snappy,
max_num_records=1000,
feed_order=None):
"""
convert a python reader to many recordio files.
This API is basically same as :code:`convert_reader_to_recordio_file`,
instead of it will create many recordio files. Each file contains at
most :code:`batch_per_file` records.
Please reference
:ref:`api_fluid_recordio_writer_convert_reader_to_recordio_file` for more
details.
"""
if feed_order is None:
feed_order = feeder.feed_names
f_name, f_ext = os.path.splitext(filename)
assert (f_ext == ".recordio")
lines = []
f_idx = 0
counter = 0
for idx, batch in enumerate(reader_creator()):
lines.append(batch)
if idx >= batch_per_file and idx % batch_per_file == 0:
filename = "%s-%05d%s" % (f_name, f_idx, f_ext)
with create_recordio_writer(filename, compressor,
max_num_records) as writer:
for l in lines:
res = feeder.feed(l)
for each in feed_order:
writer.append_tensor(res[each])
writer.complete_append_tensor()
counter += 1
lines = []
f_idx += 1
return counter
|
{
"content_hash": "d1fd6296d0bee262bce1c35f641cbd49",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 90,
"avg_line_length": 36.04237288135593,
"alnum_prop": 0.588055490242182,
"repo_name": "baidu/Paddle",
"id": "aa581f23a191639fdc026e7781897d5d996823a9",
"size": "4866",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/recordio_writer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "217842"
},
{
"name": "C++",
"bytes": "2771237"
},
{
"name": "CMake",
"bytes": "113670"
},
{
"name": "Cuda",
"bytes": "424141"
},
{
"name": "M4",
"bytes": "40913"
},
{
"name": "Perl",
"bytes": "11412"
},
{
"name": "Python",
"bytes": "892636"
},
{
"name": "Shell",
"bytes": "64351"
}
],
"symlink_target": ""
}
|
from core import perf_benchmark
from measurements import startup
import page_sets
from telemetry import benchmark
class _StartupCold(perf_benchmark.PerfBenchmark):
"""Measures cold startup time with a clean profile."""
options = {'pageset_repeat': 5}
@classmethod
def Name(cls):
return 'startup'
def CreatePageTest(self, options):
return startup.Startup(cold=True)
class _StartupWarm(perf_benchmark.PerfBenchmark):
"""Measures warm startup time with a clean profile."""
options = {'pageset_repeat': 20}
@classmethod
def Name(cls):
return 'startup'
@classmethod
def ValueCanBeAddedPredicate(cls, _, is_first_result):
return not is_first_result
def CreatePageTest(self, options):
return startup.Startup(cold=False)
@benchmark.Enabled('has tabs')
@benchmark.Disabled('snowleopard') # crbug.com/336913
class StartupColdBlankPage(_StartupCold):
"""Measures cold startup time with a clean profile."""
tag = 'cold'
page_set = page_sets.BlankPageSet
@classmethod
def Name(cls):
return 'startup.cold.blank_page'
@benchmark.Enabled('has tabs')
class StartupWarmBlankPage(_StartupWarm):
"""Measures warm startup time with a clean profile."""
tag = 'warm'
page_set = page_sets.BlankPageSet
@classmethod
def Name(cls):
return 'startup.warm.blank_page'
@benchmark.Enabled('has tabs')
@benchmark.Enabled('win', 'linux', 'mac')
@benchmark.Disabled('reference', 'android') # http://crbug.com/481919
class StartupLargeProfileColdBlankPage(_StartupCold):
"""Measures cold startup time with a large profile."""
tag = 'cold'
page_set = page_sets.BlankPageSetWithLargeProfile
options = {'pageset_repeat': 1}
def __init__(self, max_failures=None):
super(StartupLargeProfileColdBlankPage, self).__init__(max_failures)
def SetExtraBrowserOptions(self, options):
options.browser_startup_timeout = 10 * 60
@classmethod
def Name(cls):
return 'startup.large_profile.cold.blank_page'
@benchmark.Enabled('has tabs')
@benchmark.Enabled('win', 'linux', 'mac')
@benchmark.Disabled('reference', 'android') # http://crbug.com/481919
class StartupLargeProfileWarmBlankPage(_StartupWarm):
"""Measures warm startup time with a large profile."""
tag = 'warm'
page_set = page_sets.BlankPageSetWithLargeProfile
options = {'pageset_repeat': 1}
def __init__(self, max_failures=None):
super(StartupLargeProfileWarmBlankPage, self).__init__(max_failures)
def SetExtraBrowserOptions(self, options):
options.browser_startup_timeout = 10 * 60
@classmethod
def Name(cls):
return 'startup.large_profile.warm.blank_page'
|
{
"content_hash": "cc5e22d82c9ab34924513719d91cd6d3",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 72,
"avg_line_length": 27.904255319148938,
"alnum_prop": 0.7251239039268014,
"repo_name": "SaschaMester/delicium",
"id": "a54f55fd09336019cfb7a63835daff43a80a0c73",
"size": "2786",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tools/perf/benchmarks/startup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23829"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "4171711"
},
{
"name": "C++",
"bytes": "243066171"
},
{
"name": "CSS",
"bytes": "935112"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27211018"
},
{
"name": "Java",
"bytes": "14285999"
},
{
"name": "JavaScript",
"bytes": "20413885"
},
{
"name": "Makefile",
"bytes": "23496"
},
{
"name": "Objective-C",
"bytes": "1725804"
},
{
"name": "Objective-C++",
"bytes": "9880229"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "478406"
},
{
"name": "Python",
"bytes": "8261413"
},
{
"name": "Shell",
"bytes": "482077"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
""" Fit subunits for multiple cells simultaneously.
This script has the extensions of single cell models from earlier
as well as new population subunit models -
most notably the almost convolutional model - where each subunit is
summation of mother subunit and subunit specific modification.
"""
import sys
import os.path
import tensorflow as tf
from absl import app
from absl import flags
from absl import gfile
import cPickle as pickle
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
import scipy.io as sio
from scipy import ndimage
import random
FLAGS = flags.FLAGS
# flags for data location
flags.DEFINE_string('folder_name', 'experiment4',
'folder where to store all the data')
flags.DEFINE_string('save_location',
'/home/bhaishahster/',
'where to store logs and outputs?');
flags.DEFINE_string('data_location',
'/home/bhaishahster/data_breakdown/',
'where to take data from?')
# flags for stochastic learning and loading data
# data is split and stored as small .mat files
flags.DEFINE_integer('batchsz', 1000, 'batch size for training')
flags.DEFINE_integer('n_chunks', 216, 'number of data chunks') # should be 216
flags.DEFINE_integer('n_b_in_c', 1, 'number of batches in one chunk of data')
flags.DEFINE_integer('train_len', 216 - 21, 'how much training length to use?')
flags.DEFINE_float('step_sz', 10, 'step size for learning algorithm')
# random number generators initialized
# removes unneccessary data variabilities while comparing algorithms
flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed')
flags.DEFINE_integer('randseed', 65, 'python RNG seed')
# flags for model/loss specification
flags.DEFINE_string('model_id', 'relu', 'which model to fit')
flags.DEFINE_string('loss', 'poisson', 'which loss to use?')
flags.DEFINE_string('masked_stimulus', 'False',
'use all pixels or only those inside RF of selected cells?')
flags.DEFINE_string('all_cells', 'True',
'learn model for all cells or a few chosen ones?')
# model specific terms
# subunit grid spacing
flags.DEFINE_float('su_grid_spacing', 3, 'grid spacing')
# useful for models which take a specific number of subunits as input
flags.DEFINE_integer('ratio_SU', 2, 'ratio of subunits/cells')
# useful for convolution-like models
flags.DEFINE_integer('window', 3,
'size of window for each subunit in relu_window model')
flags.DEFINE_integer('stride', 3, 'stride for relu_window')
# some models need regularization of parameters
flags.DEFINE_float('lam_w', 0.0001, 'sparsitiy regularization of w')
flags.DEFINE_float('lam_a', 0.0001, 'sparsitiy regularization of a')
FLAGS = flags.FLAGS
# global stimulus variables
stim_train_part = np.array([])
resp_train_part = np.array([])
chunk_order = np.array([])
cells_choose = np.array([])
chosen_mask = np.array([])
def get_test_data():
# the last chunk of data is test data
test_data_chunks = [FLAGS.n_chunks]
for ichunk in test_data_chunks:
filename = FLAGS.data_location + 'Off_par_data_' + str(ichunk) + '.mat'
file_r = gfile.Open(filename, 'r')
data = sio.loadmat(file_r)
stim_part = data['maskedMovdd_part'].T
resp_part = data['Y_part'].T
test_len = stim_part.shape[0]
stim_part = stim_part[:, chosen_mask]
resp_part = resp_part[:, cells_choose]
return stim_part, resp_part, test_len
def get_next_training_batch(iteration):
# Returns a new batch of training data : stimulus and response arrays
# we will use global stimulus and response variables to permute training data
# chunks and store where we are in list of training data
# each chunk might have multiple training batches.
# So go through all batches in a 'chunk' before moving on to the next chunk
global stim_train_part
global resp_train_part
global chunk_order
togo = True
while togo:
if(iteration % FLAGS.n_b_in_c == 0):
# iteration is multiple of number of batches in a chunk means
# finished going through a chunk, load new chunk of data
ichunk = (iteration / FLAGS.n_b_in_c) % (FLAGS.train_len-1 ) # -1 as last one chunk used for testing
if (ichunk == 0):
# if starting over the chunks again, shuffle the chunks
chunk_order = np.random.permutation(np.arange(FLAGS.train_len)) # remove first chunk - weired?
if chunk_order[ichunk] + 1 != 1: # 1st chunk was weired for the dataset used
filename = FLAGS.data_location + 'Off_par_data_' + str(chunk_order[ichunk] + 1) + '.mat'
file_r = gfile.Open(filename, 'r')
data = sio.loadmat(file_r)
stim_train_part = data['maskedMovdd_part'] # stimulus
resp_train_part = data['Y_part'] # response
ichunk = chunk_order[ichunk] + 1
while stim_train_part.shape[1] < FLAGS.batchsz:
# if the current loaded data is smaller than batch size, load more chunks
if (ichunk > FLAGS.n_chunks):
ichunk = 2
filename = FLAGS.data_location + 'Off_par_data_' + str(ichunk) + '.mat'
file_r = gfile.Open(filename, 'r')
data = sio.loadmat(file_r)
stim_train_part = np.append(stim_train_part, data['maskedMovdd_part'],
axis=1)
resp_train_part = np.append(resp_train_part, data['Y_part'], axis=1)
ichunk = ichunk + 1
ibatch = iteration % FLAGS.n_b_in_c # which section of current chunk to use
try:
stim_train = np.array(stim_train_part[:,ibatch: ibatch + FLAGS.batchsz],
dtype='float32').T
resp_train = np.array(resp_train_part[:,ibatch: ibatch + FLAGS.batchsz],
dtype='float32').T
togo=False
except:
iteration = np.random.randint(1,100000)
print('Load exception iteration: ' + str(iteration) +
'chunk: ' + str(chunk_order[ichunk]) + 'batch: ' + str(ibatch) )
togo=True
stim_train = stim_train[:, chosen_mask]
resp_train = resp_train[:, cells_choose]
return stim_train, resp_train, FLAGS.batchsz
def get_windows():
# use FLAGS to get convolutional 'windows' for convolutional models.
window = FLAGS.window # 2*window +1 is the width and height of windows
n_pix = (2* window + 1) ** 2 # number of pixels in the window
w_mask = np.zeros((2 * window + 1, 2 * window + 1, 1, n_pix))
icnt = 0
# make mask_tf: weight (dimx X dimy X npix) for convolutional layer,
# where each layer is 1 for a particular pixel in window and 0 for others.
# This is used for flattening the pixels in a window,
# so that different weights could be applied to each window
for ix in range(2 * window + 1):
for iy in range(2 * window + 1):
w_mask[ix, iy, 0, icnt] =1
icnt = icnt + 1
mask_tf = tf.constant(np.array(w_mask, dtype='float32'))
# number of windows in x and y dimensions
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
return mask_tf, dimx, dimy, n_pix
def main(argv):
# global variables will be used for getting training data
global cells_choose
global chosen_mask
global chunk_order
# set random seeds: when same algorithm run with different FLAGS,
# the sequence of random data is same.
np.random.seed(FLAGS.np_randseed)
random.seed(FLAGS.randseed)
# initial chunk order (will be re-shuffled everytime we go over a chunk)
chunk_order = np.random.permutation(np.arange(FLAGS.n_chunks-1))
# Load data summary
data_filename = FLAGS.data_location + 'data_details.mat'
summary_file = gfile.Open(data_filename, 'r')
data_summary = sio.loadmat(summary_file)
cells = np.squeeze(data_summary['cells'])
# which cells to train subunits for
if FLAGS.all_cells == 'True':
cells_choose = np.array(np.ones(np.shape(cells)), dtype='bool')
else:
cells_choose = (cells ==3287) | (cells ==3318 ) | (cells ==3155) | (cells ==3066)
n_cells = np.sum(cells_choose) # number of cells
# load spikes and relevant stimulus pixels for chosen cells
tot_spks = np.squeeze(data_summary['tot_spks'])
tot_spks_chosen_cells = np.array(tot_spks[cells_choose] ,dtype='float32')
total_mask = np.squeeze(data_summary['totalMaskAccept_log']).T
# chosen_mask = which pixels to learn subunits over
if FLAGS.masked_stimulus == 'True':
chosen_mask = np.array(np.sum(total_mask[cells_choose,:],0)>0, dtype='bool')
else:
chosen_mask = np.array(np.ones(3200).astype('bool'))
stim_dim = np.sum(chosen_mask) # stimulus dimensions
print('\ndataset summary loaded')
# print parameters
print('Save folder name: ' + str(FLAGS.folder_name) +
'\nmodel:' + str(FLAGS.model_id) +
'\nLoss:' + str(FLAGS.loss) +
'\nmasked stimulus:' + str(FLAGS.masked_stimulus) +
'\nall_cells?' + str(FLAGS.all_cells) +
'\nbatch size' + str(FLAGS.batchsz) +
'\nstep size' + str(FLAGS.step_sz) +
'\ntraining length: ' + str(FLAGS.train_len) +
'\nn_cells: '+str(n_cells))
# decide the number of subunits to fit
n_su = FLAGS.ratio_SU*n_cells
# filename for saving file
short_filename = ('_masked_stim=' + str(FLAGS.masked_stimulus) + '_all_cells='+
str(FLAGS.all_cells) + '_loss='+
str(FLAGS.loss) + '_batch_sz='+ str(FLAGS.batchsz) +
'_step_sz'+ str(FLAGS.step_sz) +
'_tlen=' + str(FLAGS.train_len) + '_bg')
with tf.Session() as sess:
# set up stimulus and response palceholders
stim = tf.placeholder(tf.float32, shape=[None, stim_dim], name='stim')
resp = tf.placeholder(tf.float32, name='resp')
data_len = tf.placeholder(tf.float32, name='data_len')
if FLAGS.loss == 'poisson':
b_init = np.array(0.000001*np.ones(n_cells)) # a very small positive bias needed to avoid log(0) in poisson loss
else:
b_init = np.log((tot_spks_chosen_cells)/(216000. - tot_spks_chosen_cells)) # log-odds, a good initialization for some losses (like logistic)
# different firing rate models
if FLAGS.model_id == 'exp_additive':
# This model was implemented for earlier work.
# firing rate for cell c: lam_c = sum_s exp(w_s.x + a_sc)
# filename
short_filename = ('model=' + str(FLAGS.model_id) + short_filename)
# variables
w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su),
dtype='float32'), name='w')
a = tf.Variable(np.array(0.01 * np.random.rand(n_cells, 1, n_su),
dtype='float32'), name='a')
# firing rate model
lam = tf.transpose(tf.reduce_sum(tf.exp(tf.matmul(stim, w) + a), 2))
regularization = 0
vars_fit = [w, a]
def proj(): # called after every training step - to project to parameter constraints
pass
if FLAGS.model_id == 'relu':
# firing rate for cell c: lam_c = a_c'.relu(w.x) + b
# we know a>0 and for poisson loss, b>0
# for poisson loss: small b added to prevent lam_c going to 0
# filename
short_filename = ('model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) + '_lam_a=' +
str(FLAGS.lam_a) + '_nsu=' + str(n_su) + short_filename)
# variables
w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su),
dtype='float32'), name='w')
a = tf.Variable(np.array(0.01 * np.random.rand(n_su, n_cells),
dtype='float32'), name='a')
b = tf.Variable(np.array(b_init,dtype='float32'), name='b')
# firing rate model
lam = tf.matmul(tf.nn.relu(tf.matmul(stim, w)), a) + b
vars_fit = [w, a] # which variables are learnt
if not FLAGS.loss == 'poisson': # don't learn b for poisson loss
vars_fit = vars_fit + [b]
# regularization of parameters
regularization = (FLAGS.lam_w * tf.reduce_sum(tf.abs(w)) +
FLAGS.lam_a * tf.reduce_sum(tf.abs(a)))
# projection to satisfy constraints
a_pos = tf.assign(a, (a + tf.abs(a))/2)
b_pos = tf.assign(b, (b + tf.abs(b))/2)
def proj():
sess.run(a_pos)
if FLAGS.loss == 'poisson':
sess.run(b_pos)
if FLAGS.model_id == 'relu_window':
# firing rate for cell c: lam_c = a_c'.relu(w.x) + b,
# where w_i are over a small window which are convolutionally related with each other.
# we know a>0 and for poisson loss, b>0
# for poisson loss: small b added to prevent lam_c going to 0
# filename
short_filename = ('model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) +
'_lam_w=' + str(FLAGS.lam_w) + short_filename )
mask_tf, dimx, dimy, n_pix = get_windows() # get convolutional windows
# variables
w = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32'), name='w')
a = tf.Variable(np.array(np.random.rand(dimx*dimy, n_cells),dtype='float32'), name='a')
b = tf.Variable(np.array(b_init,dtype='float32'), name='b')
vars_fit = [w, a] # which variables are learnt
if not FLAGS.loss == 'poisson': # don't learn b for poisson loss
vars_fit = vars_fit + [b]
# stimulus filtered with convolutional windows
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_masked = tf.nn.conv2d(stim4D, mask_tf,
strides=[1, FLAGS.stride, FLAGS.stride, 1],
padding="VALID" )
stim_wts = tf.nn.relu(tf.reduce_sum(tf.mul(stim_masked, w), 3))
# get firing rate
lam = tf.matmul(tf.reshape(stim_wts, [-1,dimx*dimy]),a) + b
# regularization
regularization = FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w))
# projection to satisfy hard variable constraints
a_pos = tf.assign(a, (a + tf.abs(a))/2)
b_pos = tf.assign(b, (b + tf.abs(b))/2)
def proj():
sess.run(a_pos)
if FLAGS.loss == 'poisson':
sess.run(b_pos)
if FLAGS.model_id == 'relu_window_mother':
# firing rate for cell c: lam_c = a_c'.relu(w.x) + b,
# where w_i are over a small window which are convolutionally related with each other.
# w_i = w_mother + w_del_i,
# where w_mother is common accross all 'windows' and w_del is different for different windows.
# we know a>0 and for poisson loss, b>0
# for poisson loss: small b added to prevent lam_c going to 0
# filename
short_filename = ('model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) +
'_lam_w=' + str(FLAGS.lam_w) + short_filename )
mask_tf, dimx, dimy, n_pix = get_windows()
# variables
w_del = tf.Variable(np.array( 0.05*np.random.randn(dimx, dimy, n_pix),
dtype='float32'), name='w_del')
w_mother = tf.Variable(np.array( np.ones((2 * FLAGS.window + 1,
2 * FLAGS.window + 1, 1, 1)),
dtype='float32'), name='w_mother')
a = tf.Variable(np.array(np.random.rand(dimx*dimy, n_cells),
dtype='float32'), name='a')
b = tf.Variable(np.array(b_init,dtype='float32'), name='b')
vars_fit = [w_mother, w_del, a] # which variables to learn
if not FLAGS.loss == 'poisson':
vars_fit = vars_fit + [b]
# stimulus filtered with convolutional windows
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_convolved = tf.reduce_sum( tf.nn.conv2d(stim4D,
w_mother,
strides=[1, FLAGS.stride,
FLAGS.stride, 1],
padding="VALID"),3)
stim_masked = tf.nn.conv2d(stim4D,
mask_tf,
strides=[1, FLAGS.stride, FLAGS.stride, 1],
padding="VALID" )
stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3)
# activation of differnet subunits
su_act = tf.nn.relu(stim_del + stim_convolved)
# get firing rate
lam = tf.matmul(tf.reshape(su_act, [-1, dimx*dimy]),a) + b
# regularization
regularization = FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w_del))
# projection to satisfy hard variable constraints
a_pos = tf.assign(a, (a + tf.abs(a))/2)
b_pos = tf.assign(b, (b + tf.abs(b))/2)
def proj():
sess.run(a_pos)
if FLAGS.loss == 'poisson':
sess.run(b_pos)
if FLAGS.model_id == 'relu_window_mother_sfm':
# firing rate for cell c: lam_c = a_sfm_c'.relu(w.x) + b,
# a_sfm_c = softmax(a) : so a cell cannot be connected to all subunits equally well.
# where w_i are over a small window which are convolutionally related with each other.
# w_i = w_mother + w_del_i,
# where w_mother is common accross all 'windows' and w_del is different for different windows.
# we know a>0 and for poisson loss, b>0
# for poisson loss: small b added to prevent lam_c going to 0
# filename
short_filename = ('model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) +
'_lam_w=' + str(FLAGS.lam_w) + short_filename)
mask_tf, dimx, dimy, n_pix = get_windows()
# variables
w_del = tf.Variable(np.array( 0.05*np.random.randn(dimx, dimy, n_pix),
dtype='float32'), name='w_del')
w_mother = tf.Variable(np.array( np.ones((2 * FLAGS.window + 1,
2 * FLAGS.window + 1, 1, 1)),
dtype='float32'), name='w_mother')
a = tf.Variable(np.array(np.random.randn(dimx*dimy, n_cells),
dtype='float32'), name='a')
a_sfm = tf.transpose(tf.nn.softmax(tf.transpose(a)))
b = tf.Variable(np.array(b_init,dtype='float32'), name='b')
vars_fit = [w_mother, w_del, a] # which variables to fit
if not FLAGS.loss == 'poisson':
vars_fit = vars_fit + [b]
# stimulus filtered with convolutional windows
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_convolved = tf.reduce_sum(tf.nn.conv2d(stim4D,
w_mother,
strides=[1, FLAGS.stride,
FLAGS.stride, 1],
padding="VALID"),3)
stim_masked = tf.nn.conv2d(stim4D, mask_tf,
strides=[1, FLAGS.stride, FLAGS.stride, 1],
padding="VALID" )
stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3)
# activation of differnet subunits
su_act = tf.nn.relu(stim_del + stim_convolved)
# get firing rate
lam = tf.matmul(tf.reshape(su_act, [-1, dimx*dimy]), a_sfm) + b
# regularization
regularization = FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w_del))
# projection to satisfy hard variable constraints
b_pos = tf.assign(b, (b + tf.abs(b))/2)
def proj():
if FLAGS.loss == 'poisson':
sess.run(b_pos)
if FLAGS.model_id == 'relu_window_mother_sfm_exp':
# firing rate for cell c: lam_c = exp(a_sfm_c'.relu(w.x)) + b,
# a_sfm_c = softmax(a) : so a cell cannot be connected to all subunits equally well.
# exponential output NL would cancel the log() in poisson and might get better estimation properties.
# where w_i are over a small window which are convolutionally related with each other.
# w_i = w_mother + w_del_i,
# where w_mother is common accross all 'windows' and w_del is different for different windows.
# we know a>0 and for poisson loss, b>0
# for poisson loss: small b added to prevent lam_c going to 0
# filename
short_filename = ('model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) +
'_lam_w=' + str(FLAGS.lam_w) + short_filename)
# get windows
mask_tf, dimx, dimy, n_pix = get_windows()
# declare variables
w_del = tf.Variable(np.array( 0.05*np.random.randn(dimx, dimy, n_pix),
dtype='float32'), name='w_del')
w_mother = tf.Variable(np.array( np.ones((2 * FLAGS.window + 1,
2 * FLAGS.window + 1, 1, 1)),
dtype='float32'), name='w_mother')
a = tf.Variable(np.array(np.random.randn(dimx*dimy, n_cells),
dtype='float32'), name='a')
a_sfm = tf.transpose(tf.nn.softmax(tf.transpose(a)))
b = tf.Variable(np.array(b_init,dtype='float32'), name='b')
vars_fit = [w_mother, w_del, a]
if not FLAGS.loss == 'poisson':
vars_fit = vars_fit + [b]
# filter stimulus
stim4D = tf.expand_dims(tf.reshape(stim, (-1,40,80)), 3)
stim_convolved = tf.reduce_sum( tf.nn.conv2d(stim4D,
w_mother,
strides=[1, FLAGS.stride,
FLAGS.stride, 1],
padding="VALID"),3)
stim_masked = tf.nn.conv2d(stim4D, mask_tf,
strides=[1, FLAGS.stride, FLAGS.stride, 1],
padding="VALID" )
stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3)
# get subunit activation
su_act = tf.nn.relu(stim_del + stim_convolved)
# get cell firing rates
lam = tf.exp(tf.matmul(tf.reshape(su_act, [-1, dimx*dimy]), a_sfm)) + b
# regularization
regularization = FLAGS.lam_w * tf.reduce_sum(tf.nn.l2_loss(w_del))
# projection to satisfy hard variable constraints
b_pos = tf.assign(b, (b + tf.abs(b))/2)
def proj():
if FLAGS.loss == 'poisson':
sess.run(b_pos)
# different loss functions
if FLAGS.loss == 'poisson':
loss_inter = (tf.reduce_sum(lam)/120. -
tf.reduce_sum(resp*tf.log(lam))) / data_len
if FLAGS.loss == 'logistic':
loss_inter = tf.reduce_sum(tf.nn.softplus(-2 * (resp - 0.5)*lam)) / data_len
if FLAGS.loss == 'hinge':
loss_inter = tf.reduce_sum(tf.nn.relu(1 -2 * (resp - 0.5)*lam)) / data_len
loss = loss_inter + regularization # add regularization to get final loss function
# training consists of calling training()
# which performs a train step and
# project parameters to model specific constraints using proj()
train_step = tf.train.AdagradOptimizer(FLAGS.step_sz).minimize(loss,
var_list=
vars_fit)
def training(inp_dict):
sess.run(train_step, feed_dict=inp_dict) # one step of gradient descent
proj() # model specific projection operations
# evaluate loss on given data.
def get_loss(inp_dict):
ls = sess.run(loss,feed_dict = inp_dict)
return ls
# saving details
# make a folder with name derived from parameters of the algorithm
# - it saves checkpoint files and summaries used in tensorboard
parent_folder = FLAGS.save_location + FLAGS.folder_name + '/'
# make folder if it does not exist
if not gfile.IsDirectory(parent_folder):
gfile.MkDir(parent_folder)
FLAGS.save_location = parent_folder + short_filename + '/'
if not gfile.IsDirectory(FLAGS.save_location):
gfile.MkDir(FLAGS.save_location)
save_filename = FLAGS.save_location + short_filename
# create summary writers
# create histogram summary for all parameters which are learnt
for ivar in vars_fit:
tf.histogram_summary(ivar.name, ivar)
# loss summary
l_summary = tf.scalar_summary('loss',loss)
# loss without regularization summary
l_inter_summary = tf.scalar_summary('loss_inter',loss_inter)
# Merge all the summary writer ops into one op (this way,
# calling one op stores all summaries)
merged = tf.merge_all_summaries()
# training and testing has separate summary writers
train_writer = tf.train.SummaryWriter(FLAGS.save_location + 'train',
sess.graph)
test_writer = tf.train.SummaryWriter(FLAGS.save_location + 'test')
## Fitting procedure
print('Start fitting')
sess.run(tf.initialize_all_variables())
saver_var = tf.train.Saver(tf.all_variables(),
keep_checkpoint_every_n_hours=0.05)
load_prev = False
start_iter=0
try:
# restore previous fits if they are available
# - useful when programs are preempted frequently on .
latest_filename = short_filename + '_latest_fn'
restore_file = tf.train.latest_checkpoint(FLAGS.save_location,
latest_filename)
# restore previous iteration count and start from there.
start_iter = int(restore_file.split('/')[-1].split('-')[-1])
saver_var.restore(sess, restore_file) # restore variables
load_prev = True
except:
print('No previous dataset')
if load_prev:
print('Previous results loaded')
else:
print('Variables initialized')
# Finally, do fitting
icnt = 0
# get test data and make test dictionary
stim_test,resp_test,test_length = get_test_data()
fd_test = {stim: stim_test,
resp: resp_test,
data_len: test_length}
for istep in np.arange(start_iter,400000):
print(istep)
# get training data and make test dictionary
stim_train, resp_train, train_len = get_next_training_batch(istep)
fd_train = {stim: stim_train,
resp: resp_train,
data_len: train_len}
# take training step
training(fd_train)
if istep%10 == 0:
# compute training and testing losses
ls_train = get_loss(fd_train)
ls_test = get_loss(fd_test)
latest_filename = short_filename + '_latest_fn'
saver_var.save(sess, save_filename, global_step=istep,
latest_filename = latest_filename)
# add training summary
summary = sess.run(merged, feed_dict=fd_train)
train_writer.add_summary(summary,istep)
# add testing summary
summary = sess.run(merged, feed_dict=fd_test)
test_writer.add_summary(summary,istep)
print(istep, ls_train, ls_test)
icnt += FLAGS.batchsz
if icnt > 216000-1000:
icnt = 0
tms = np.random.permutation(np.arange(216000-1000))
if __name__ == '__main__':
app.run()
|
{
"content_hash": "1489a8bc0e654c10be8c61c496e09269",
"timestamp": "",
"source": "github",
"line_count": 651,
"max_line_length": 147,
"avg_line_length": 42.01996927803379,
"alnum_prop": 0.5885212940961433,
"repo_name": "googlearchive/rgc-models",
"id": "5c82b5ed8def8cddb3921b3d8b54a2472c7cf104",
"size": "28010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "response_model/python/population_subunits/coarse/fitting/few_cells_tf_refractoring.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1471817"
}
],
"symlink_target": ""
}
|
import abc
from engine import dataseries
from engine.dataseries import bards
from engine import bar
from engine import resamplebase
class AggFunGrouper(resamplebase.Grouper):
def __init__(self, groupDateTime, value, aggfun):
super(AggFunGrouper, self).__init__(groupDateTime)
self.__values = [value]
self.__aggfun = aggfun
def addValue(self, value):
self.__values.append(value)
def getGrouped(self):
return self.__aggfun(self.__values)
class BarGrouper(resamplebase.Grouper):
def __init__(self, groupDateTime, bar_, frequency):
super(BarGrouper, self).__init__(groupDateTime)
self.__open = bar_.getOpen()
self.__high = bar_.getHigh()
self.__low = bar_.getLow()
self.__close = bar_.getClose()
self.__volume = bar_.getVolume()
self.__adjClose = bar_.getAdjClose()
self.__useAdjValue = bar_.getUseAdjValue()
self.__frequency = frequency
def addValue(self, value):
self.__high = max(self.__high, value.getHigh())
self.__low = min(self.__low, value.getLow())
self.__close = value.getClose()
self.__adjClose = value.getAdjClose()
self.__volume += value.getVolume()
def getGrouped(self):
"""Return the grouped value."""
ret = bar.BasicBar(
self.getDateTime(),
self.__open, self.__high, self.__low, self.__close, self.__volume, self.__adjClose,
self.__frequency
)
ret.setUseAdjustedValue(self.__useAdjValue)
return ret
class DSResampler(object):
__metaclass__ = abc.ABCMeta
def initDSResampler(self, dataSeries, frequency):
if not resamplebase.is_valid_frequency(frequency):
raise Exception("Unsupported frequency")
self.__frequency = frequency
self.__grouper = None
self.__range = None
dataSeries.getNewValueEvent().subscribe(self.__onNewValue)
@abc.abstractmethod
def buildGrouper(self, range_, value, frequency):
raise NotImplementedError()
def __onNewValue(self, dataSeries, dateTime, value):
if self.__range is None:
self.__range = resamplebase.build_range(dateTime, self.__frequency)
self.__grouper = self.buildGrouper(self.__range, value, self.__frequency)
elif self.__range.belongs(dateTime):
self.__grouper.addValue(value)
else:
self.appendWithDateTime(self.__grouper.getDateTime(), self.__grouper.getGrouped())
self.__range = resamplebase.build_range(dateTime, self.__frequency)
self.__grouper = self.buildGrouper(self.__range, value, self.__frequency)
def pushLast(self):
if self.__grouper is not None:
self.appendWithDateTime(self.__grouper.getDateTime(), self.__grouper.getGrouped())
self.__grouper = None
self.__range = None
def checkNow(self, dateTime):
if self.__range is not None and not self.__range.belongs(dateTime):
self.appendWithDateTime(self.__grouper.getDateTime(), self.__grouper.getGrouped())
self.__grouper = None
self.__range = None
class ResampledBarDataSeries(bards.BarDataSeries, DSResampler):
"""A BarDataSeries that will build on top of another, higher frequency, BarDataSeries.
Resampling will take place as new values get pushed into the dataseries being resampled.
:param dataSeries: The DataSeries instance being resampled.
:type dataSeries: :class:`engine.dataseries.bards.BarDataSeries`
:param frequency: The grouping frequency in seconds. Must be > 0.
:param maxLen: The maximum number of values to hold.
Once a bounded length is full, when new items are added, a corresponding number of items are discarded
from the opposite end.
:type maxLen: int.
.. note::
* Supported resampling frequencies are:
* Less than bar.Frequency.DAY
* bar.Frequency.DAY
* bar.Frequency.MONTH
"""
def __init__(self, dataSeries, frequency, maxLen=None):
if not isinstance(dataSeries, bards.BarDataSeries):
raise Exception("dataSeries must be a dataseries.bards.BarDataSeries instance")
super(ResampledBarDataSeries, self).__init__(maxLen)
self.initDSResampler(dataSeries, frequency)
def checkNow(self, dateTime):
"""Forces a resample check. Depending on the resample frequency, and the current datetime, a new
value may be generated.
:param dateTime: The current datetime.
:type dateTime: :class:`datetime.datetime`
"""
return super(ResampledBarDataSeries, self).checkNow(dateTime)
def buildGrouper(self, range_, value, frequency):
return BarGrouper(range_.getBeginning(), value, frequency)
class ResampledDataSeries(dataseries.SequenceDataSeries, DSResampler):
def __init__(self, dataSeries, frequency, aggfun, maxLen=None):
super(ResampledDataSeries, self).__init__(maxLen)
self.initDSResampler(dataSeries, frequency)
self.__aggfun = aggfun
def buildGrouper(self, range_, value, frequency):
return AggFunGrouper(range_.getBeginning(), value, self.__aggfun)
|
{
"content_hash": "491bbe3ca1bc1bfadadb7aac14e9d8a7",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 110,
"avg_line_length": 37.59285714285714,
"alnum_prop": 0.6482994489834695,
"repo_name": "Yam-cn/potato",
"id": "bbefa9d3b06945c979d7f3f8491d16cf5d47b713",
"size": "5877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "engine/dataseries/resampled.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "482582"
},
{
"name": "C++",
"bytes": "499680"
},
{
"name": "CSS",
"bytes": "269141"
},
{
"name": "HTML",
"bytes": "3627157"
},
{
"name": "JavaScript",
"bytes": "1343865"
},
{
"name": "PHP",
"bytes": "34371"
},
{
"name": "Python",
"bytes": "1484158"
},
{
"name": "Shell",
"bytes": "1044"
}
],
"symlink_target": ""
}
|
"""Tests for database migrations for the API database.
These are "opportunistic" tests which allow testing against all three databases
(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up DBs named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that DB and username/password combo to run the tests. Refer
to the 'tools/test-setup.sh' for an example of how to configure this.
"""
from unittest import mock
from alembic import command as alembic_api
from alembic import script as alembic_script
from migrate.versioning import api as migrate_api
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
from oslo_log import log as logging
import sqlalchemy
import testtools
from nova.db.api import models
from nova.db import migration
from nova import test
LOG = logging.getLogger(__name__)
class NovaModelsMigrationsSync(test_migrations.ModelsMigrationsSync):
"""Test that the models match the database after migrations are run."""
# Migrations can take a long time, particularly on underpowered CI nodes.
# Give them some breathing room.
TIMEOUT_SCALING_FACTOR = 4
def setUp(self):
super().setUp()
self.engine = enginefacade.writer.get_engine()
def db_sync(self, engine):
with mock.patch.object(migration, '_get_engine', return_value=engine):
migration.db_sync(database='api')
def get_engine(self):
return self.engine
def get_metadata(self):
return models.BASE.metadata
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table':
# migrate_version is a sqlalchemy-migrate control table and
# isn't included in the model.
if name == 'migrate_version':
return False
# Define a whitelist of tables that will be removed from the DB in
# a later release and don't have a corresponding model anymore.
return name not in models.REMOVED_TABLES
return True
def filter_metadata_diff(self, diff):
# Filter out diffs that shouldn't cause a sync failure.
new_diff = []
for element in diff:
if isinstance(element, list):
# modify_nullable is a list
new_diff.append(element)
else:
# tuple with action as first element. Different actions have
# different tuple structures.
if element[0] == 'add_fk':
fkey = element[1]
tablename = fkey.table.name
column_keys = fkey.column_keys
if (tablename, column_keys) in models.REMOVED_FKEYS:
continue
elif element[0] == 'remove_column':
table = element[2]
column = element[3].name
if (table, column) in models.REMOVED_COLUMNS:
continue
new_diff.append(element)
return new_diff
class TestModelsSyncSQLite(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
pass
class TestModelsSyncMySQL(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestModelsSyncPostgreSQL(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
class NovaModelsMigrationsLegacySync(NovaModelsMigrationsSync):
"""Test that the models match the database after old migrations are run."""
def db_sync(self, engine):
# the 'nova.db.migration.db_sync' method will not use the legacy
# sqlalchemy-migrate-based migration flow unless the database is
# already controlled with sqlalchemy-migrate, so we need to manually
# enable version controlling with this tool to test this code path
repository = migration._find_migrate_repo(database='api')
migrate_api.version_control(
engine, repository, migration.MIGRATE_INIT_VERSION['api'])
# now we can apply migrations as expected and the legacy path will be
# followed
super().db_sync(engine)
class TestModelsLegacySyncSQLite(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
pass
class TestModelsLegacySyncMySQL(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestModelsLegacySyncPostgreSQL(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
class NovaMigrationsWalk(
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase,
):
# Migrations can take a long time, particularly on underpowered CI nodes.
# Give them some breathing room.
TIMEOUT_SCALING_FACTOR = 4
def setUp(self):
super().setUp()
self.engine = enginefacade.writer.get_engine()
self.config = migration._find_alembic_conf('api')
self.init_version = migration.ALEMBIC_INIT_VERSION['api']
def _migrate_up(self, connection, revision):
if revision == self.init_version: # no tests for the initial revision
alembic_api.upgrade(self.config, revision)
return
self.assertIsNotNone(
getattr(self, '_check_%s' % revision, None),
(
'API DB Migration %s does not have a test; you must add one'
) % revision,
)
pre_upgrade = getattr(self, '_pre_upgrade_%s' % revision, None)
if pre_upgrade:
pre_upgrade(connection)
alembic_api.upgrade(self.config, revision)
post_upgrade = getattr(self, '_check_%s' % revision, None)
if post_upgrade:
post_upgrade(connection)
_b30f573d3377_removed_columns = {
'access_ip_v4',
'access_ip_v6',
'config_drive',
'display_name',
'image_ref',
'info_cache',
'instance_metadata',
'key_name',
'locked_by',
'progress',
'request_spec_id',
'security_groups',
'task_state',
'user_id',
'vm_state',
}
def _pre_upgrade_b30f573d3377(self, connection):
# we use the inspector here rather than oslo_db.utils.column_exists,
# since the latter will create a new connection
inspector = sqlalchemy.inspect(connection)
columns = [x['name'] for x in inspector.get_columns('build_requests')]
for removed_column in self._b30f573d3377_removed_columns:
self.assertIn(removed_column, columns)
def _check_b30f573d3377(self, connection):
# we use the inspector here rather than oslo_db.utils.column_exists,
# since the latter will create a new connection
inspector = sqlalchemy.inspect(connection)
columns = [x['name'] for x in inspector.get_columns('build_requests')]
for removed_column in self._b30f573d3377_removed_columns:
self.assertNotIn(removed_column, columns)
def test_single_base_revision(self):
"""Ensure we only have a single base revision.
There's no good reason for us to have diverging history, so validate
that only one base revision exists. This will prevent simple errors
where people forget to specify the base revision. If this fail for your
change, look for migrations that do not have a 'revises' line in them.
"""
script = alembic_script.ScriptDirectory.from_config(self.config)
self.assertEqual(1, len(script.get_bases()))
def test_single_head_revision(self):
"""Ensure we only have a single head revision.
There's no good reason for us to have diverging history, so validate
that only one head revision exists. This will prevent merge conflicts
adding additional head revision points. If this fail for your change,
look for migrations with the same 'revises' line in them.
"""
script = alembic_script.ScriptDirectory.from_config(self.config)
self.assertEqual(1, len(script.get_heads()))
def test_walk_versions(self):
with self.engine.begin() as connection:
self.config.attributes['connection'] = connection
script = alembic_script.ScriptDirectory.from_config(self.config)
revisions = [x.revision for x in script.walk_revisions()]
# for some reason, 'walk_revisions' gives us the revisions in
# reverse chronological order so we have to invert this
revisions.reverse()
self.assertEqual(revisions[0], self.init_version)
for revision in revisions:
LOG.info('Testing revision %s', revision)
self._migrate_up(connection, revision)
def test_db_version_alembic(self):
engine = enginefacade.writer.get_engine()
with mock.patch.object(migration, '_get_engine', return_value=engine):
migration.db_sync(database='api')
script = alembic_script.ScriptDirectory.from_config(self.config)
head = script.get_current_head()
self.assertEqual(head, migration.db_version(database='api'))
class TestMigrationsWalkSQLite(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
pass
class TestMigrationsWalkMySQL(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestMigrationsWalkPostgreSQL(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
|
{
"content_hash": "9568fb755395c31e73806596cf3dae5c",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 79,
"avg_line_length": 34.285234899328856,
"alnum_prop": 0.6643828912596652,
"repo_name": "mahak/nova",
"id": "3b9b17aab206b4108218a144ebfca58686f7b350",
"size": "10790",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/db/api/test_migrations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
}
|
import logging
from pycsw.core.etree import PARSER
from pycsw import __version__
LOGGER = logging.getLogger(__name__)
class StaticContext(object):
"""core configuration"""
def __init__(self, prefix='csw30'):
"""initializer"""
LOGGER.debug('Initializing static context')
self.version = __version__
self.ogc_schemas_base = 'http://schemas.opengis.net'
self.parser = PARSER
self.languages = {
'en': 'english',
'fr': 'french',
'el': 'greek',
}
self.response_codes = {
'OK': '200 OK',
'NotFound': '404 Not Found',
'InvalidValue': '400 Invalid property value',
'OperationParsingFailed': '400 Bad Request',
'OperationProcessingFailed': '403 Server Processing Failed',
'OperationNotSupported': '400 Not Implemented',
'MissingParameterValue': '400 Bad Request',
'InvalidParameterValue': '400 Bad Request',
'VersionNegotiationFailed': '400 Bad Request',
'InvalidUpdateSequence': '400 Bad Request',
'OptionNotSupported': '400 Not Implemented',
'NoApplicableCode': '400 Internal Server Error'
}
self.namespaces = {
'atom': 'http://www.w3.org/2005/Atom',
'csw': 'http://www.opengis.net/cat/csw/2.0.2',
'csw30': 'http://www.opengis.net/cat/csw/3.0',
'dc': 'http://purl.org/dc/elements/1.1/',
'dct': 'http://purl.org/dc/terms/',
'dif': 'http://gcmd.gsfc.nasa.gov/Aboutus/xml/dif/',
'fes20': 'http://www.opengis.net/fes/2.0',
'fgdc': 'http://www.opengis.net/cat/csw/csdgm',
'gm03': 'http://www.interlis.ch/INTERLIS2.3',
'gmd': 'http://www.isotc211.org/2005/gmd',
'gml': 'http://www.opengis.net/gml',
'ogc': 'http://www.opengis.net/ogc',
'os': 'http://a9.com/-/spec/opensearch/1.1/',
'ows': 'http://www.opengis.net/ows',
'ows11': 'http://www.opengis.net/ows/1.1',
'ows20': 'http://www.opengis.net/ows/2.0',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'sitemap': 'http://www.sitemaps.org/schemas/sitemap/0.9',
'soapenv': 'http://www.w3.org/2003/05/soap-envelope',
'xlink': 'http://www.w3.org/1999/xlink',
'xs': 'http://www.w3.org/2001/XMLSchema',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'
}
self.keep_ns_prefixes = [
'csw', 'dc', 'dct', 'gmd', 'gml', 'ows', 'xs'
]
self.md_core_model = {
'typename': 'pycsw:CoreMetadata',
'outputschema': 'http://pycsw.org/metadata',
'mappings': {
'pycsw:Identifier': 'identifier',
# CSW typename (e.g. csw:Record, md:MD_Metadata)
'pycsw:Typename': 'typename',
# schema namespace, i.e. http://www.isotc211.org/2005/gmd
'pycsw:Schema': 'schema',
# origin of resource, either 'local', or URL to web service
'pycsw:MdSource': 'mdsource',
# date of insertion
'pycsw:InsertDate': 'insert_date', # date of insertion
# raw XML metadata
'pycsw:XML': 'xml',
# raw metadata payload, xml to be migrated to this in the future
'pycsw:Metadata': 'metadata',
# raw metadata payload type, xml as default for now
'pycsw:MetadataType': 'metadata_type',
# bag of metadata element and attributes ONLY, no XML tages
'pycsw:AnyText': 'anytext',
'pycsw:Language': 'language',
'pycsw:Title': 'title',
'pycsw:Abstract': 'abstract',
'pycsw:Keywords': 'keywords',
'pycsw:KeywordType': 'keywordstype',
'pycsw:Format': 'format',
'pycsw:Source': 'source',
'pycsw:Date': 'date',
'pycsw:Modified': 'date_modified',
'pycsw:Type': 'type',
# geometry, specified in OGC WKT
'pycsw:BoundingBox': 'wkt_geometry',
'pycsw:CRS': 'crs',
'pycsw:AlternateTitle': 'title_alternate',
'pycsw:RevisionDate': 'date_revision',
'pycsw:CreationDate': 'date_creation',
'pycsw:PublicationDate': 'date_publication',
'pycsw:OrganizationName': 'organization',
'pycsw:SecurityConstraints': 'securityconstraints',
'pycsw:ParentIdentifier': 'parentidentifier',
'pycsw:TopicCategory': 'topicategory',
'pycsw:ResourceLanguage': 'resourcelanguage',
'pycsw:GeographicDescriptionCode': 'geodescode',
'pycsw:Denominator': 'denominator',
'pycsw:DistanceValue': 'distancevalue',
'pycsw:DistanceUOM': 'distanceuom',
'pycsw:TempExtent_begin': 'time_begin',
'pycsw:TempExtent_end': 'time_end',
'pycsw:ServiceType': 'servicetype',
'pycsw:ServiceTypeVersion': 'servicetypeversion',
'pycsw:Operation': 'operation',
'pycsw:CouplingType': 'couplingtype',
'pycsw:OperatesOn': 'operateson',
'pycsw:OperatesOnIdentifier': 'operatesonidentifier',
'pycsw:OperatesOnName': 'operatesoname',
'pycsw:Degree': 'degree',
'pycsw:AccessConstraints': 'accessconstraints',
'pycsw:OtherConstraints': 'otherconstraints',
'pycsw:Classification': 'classification',
'pycsw:ConditionApplyingToAccessAndUse': 'conditionapplyingtoaccessanduse',
'pycsw:Lineage': 'lineage',
'pycsw:ResponsiblePartyRole': 'responsiblepartyrole',
'pycsw:SpecificationTitle': 'specificationtitle',
'pycsw:SpecificationDate': 'specificationdate',
'pycsw:SpecificationDateType': 'specificationdatetype',
'pycsw:Creator': 'creator',
'pycsw:Publisher': 'publisher',
'pycsw:Contributor': 'contributor',
'pycsw:Relation': 'relation',
'pycsw:Platform': 'platform',
'pycsw:Instrument': 'instrument',
'pycsw:SensorType': 'sensortype',
'pycsw:CloudCover': 'cloudcover',
'pycsw:Bands': 'bands',
# links: list of dicts with properties: name, description, protocol, url
'pycsw:Links': 'links',
}
}
self.model = None
self.models = {
'csw': {
'operations_order': [
'GetCapabilities', 'DescribeRecord', 'GetDomain',
'GetRecords', 'GetRecordById', 'GetRepositoryItem'
],
'operations': {
'GetCapabilities': {
'methods': {
'get': True,
'post': True,
},
'parameters': {
'sections': {
'values': ['ServiceIdentification', 'ServiceProvider',
'OperationsMetadata', 'Filter_Capabilities']
}
}
},
'DescribeRecord': {
'methods': {
'get': True,
'post': True,
},
'parameters': {
'schemaLanguage': {
'values': ['http://www.w3.org/XML/Schema',
'http://www.w3.org/TR/xmlschema-1/',
'http://www.w3.org/2001/XMLSchema']
},
'typeName': {
'values': ['csw:Record']
},
'outputFormat': {
'values': ['application/xml', 'application/json']
}
}
},
'GetRecords': {
'methods': {
'get': True,
'post': True,
},
'parameters': {
'resultType': {
'values': ['hits', 'results', 'validate']
},
'typeNames': {
'values': ['csw:Record']
},
'outputSchema': {
'values': ['http://www.opengis.net/cat/csw/2.0.2']
},
'outputFormat': {
'values': ['application/xml', 'application/json']
},
'CONSTRAINTLANGUAGE': {
'values': ['FILTER', 'CQL_TEXT']
},
'ElementSetName': {
'values': ['brief', 'summary', 'full']
}
},
'constraints': {
}
},
'GetRecordById': {
'methods': {
'get': True,
'post': True,
},
'parameters': {
'outputSchema': {
'values': ['http://www.opengis.net/cat/csw/2.0.2']
},
'outputFormat': {
'values': ['application/xml', 'application/json']
},
'ElementSetName': {
'values': ['brief', 'summary', 'full']
}
}
},
'GetRepositoryItem': {
'methods': {
'get': True,
'post': False,
},
'parameters': {
}
}
},
'parameters': {
'version': {
'values': ['2.0.2', '3.0.0']
},
'service': {
'values': ['CSW']
}
},
'constraints': {
'MaxRecordDefault': {
'values': ['10']
},
'PostEncoding': {
'values': ['XML', 'SOAP']
},
'XPathQueryables': {
'values': ['allowed']
}
},
'typenames': {
'csw:Record': {
'outputschema': 'http://www.opengis.net/cat/csw/2.0.2',
'queryables': {
'SupportedDublinCoreQueryables': {
# map Dublin Core queryables to core metadata model
'dc:title':
{'dbcol': self.md_core_model['mappings']['pycsw:Title']},
'dct:alternative':
{'dbcol': self.md_core_model['mappings']['pycsw:AlternateTitle']},
'dc:creator':
{'dbcol': self.md_core_model['mappings']['pycsw:Creator']},
'dc:subject':
{'dbcol': self.md_core_model['mappings']['pycsw:Keywords']},
'dct:abstract':
{'dbcol': self.md_core_model['mappings']['pycsw:Abstract']},
'dc:publisher':
{'dbcol': self.md_core_model['mappings']['pycsw:Publisher']},
'dc:contributor':
{'dbcol': self.md_core_model['mappings']['pycsw:Contributor']},
'dct:modified':
{'dbcol': self.md_core_model['mappings']['pycsw:Modified']},
'dc:date':
{'dbcol': self.md_core_model['mappings']['pycsw:Date']},
'dc:type':
{'dbcol': self.md_core_model['mappings']['pycsw:Type']},
'dc:format':
{'dbcol': self.md_core_model['mappings']['pycsw:Format']},
'dc:identifier':
{'dbcol': self.md_core_model['mappings']['pycsw:Identifier']},
'dc:source':
{'dbcol': self.md_core_model['mappings']['pycsw:Source']},
'dc:language':
{'dbcol': self.md_core_model['mappings']['pycsw:Language']},
'dc:relation':
{'dbcol': self.md_core_model['mappings']['pycsw:Relation']},
'dc:rights':
{'dbcol':
self.md_core_model['mappings']['pycsw:AccessConstraints']},
'dct:spatial':
{'dbcol': self.md_core_model['mappings']['pycsw:CRS']},
# bbox and full text map to internal fixed columns
'ows:BoundingBox':
{'dbcol': self.md_core_model['mappings']['pycsw:BoundingBox']},
'csw:AnyText':
{'dbcol': self.md_core_model['mappings']['pycsw:AnyText']},
}
}
}
}
},
'csw30': {
'operations_order': [
'GetCapabilities', 'GetDomain', 'GetRecords',
'GetRecordById', 'GetRepositoryItem'
],
'operations': {
'GetCapabilities': {
'methods': {
'get': True,
'post': True,
},
'parameters': {
'acceptVersions': {
'values': ['2.0.2', '3.0.0']
},
'acceptFormats': {
'values': ['text/xml', 'application/xml']
},
'sections': {
'values': ['ServiceIdentification', 'ServiceProvider',
'OperationsMetadata', 'Filter_Capabilities', 'All']
}
}
},
'GetRecords': {
'methods': {
'get': True,
'post': True,
},
'parameters': {
'typeNames': {
'values': ['csw:Record', 'csw30:Record']
},
'outputSchema': {
'values': ['http://www.opengis.net/cat/csw/3.0']
},
'outputFormat': {
'values': ['application/xml', 'application/json', 'application/atom+xml']
},
'CONSTRAINTLANGUAGE': {
'values': ['FILTER', 'CQL_TEXT']
},
'ElementSetName': {
'values': ['brief', 'summary', 'full']
}
},
'constraints': {
}
},
'GetRecordById': {
'methods': {
'get': True,
'post': True,
},
'parameters': {
'outputSchema': {
'values': ['http://www.opengis.net/cat/csw/3.0']
},
'outputFormat': {
'values': ['application/xml', 'application/json', 'application/atom+xml']
},
'ElementSetName': {
'values': ['brief', 'summary', 'full']
}
}
},
'GetRepositoryItem': {
'methods': {
'get': True,
'post': False,
},
'parameters': {
}
}
},
'parameters': {
'version': {
'values': ['2.0.2', '3.0.0']
},
'service': {
'values': ['CSW']
}
},
'constraints': {
'MaxRecordDefault': {
'values': ['10']
},
'PostEncoding': {
'values': ['XML', 'SOAP']
},
'XPathQueryables': {
'values': ['allowed']
},
'http://www.opengis.net/spec/csw/3.0/conf/OpenSearch': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/GetCapabilities-XML': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/GetRecordById-XML': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/GetRecords-Basic-XML': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/GetRecords-Distributed-XML': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/GetRecords-Distributed-KVP': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/GetRecords-Async-XML': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/GetRecords-Async-KVP': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/GetDomain-XML': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/GetDomain-KVP': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/Transaction': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/Harvest-Basic-XML': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/Harvest-Basic-KVP': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/Harvest-Async-XML': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/Harvest-Async-KVP': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/Harvest-Periodic-XML': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/Harvest-Periodic-KVP': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/Filter-CQL': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/Filter-FES-XML': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/Filter-FES-KVP-Advanced': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/SupportedGMLVersions': {
'values': ['http://www.opengis.net/gml']
},
'http://www.opengis.net/spec/csw/3.0/conf/DefaultSortingAlgorithm': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/CoreQueryables': {
'values': ['TRUE']
},
'http://www.opengis.net/spec/csw/3.0/conf/CoreSortables': {
'values': ['TRUE']
}
},
'typenames': {
'csw:Record': {
'outputschema': 'http://www.opengis.net/cat/csw/3.0',
'queryables': {
'SupportedDublinCoreQueryables': {
# map Dublin Core queryables to core metadata model
'dc:title':
{'dbcol': self.md_core_model['mappings']['pycsw:Title']},
'dct:alternative':
{'dbcol': self.md_core_model['mappings']['pycsw:AlternateTitle']},
'dc:creator':
{'dbcol': self.md_core_model['mappings']['pycsw:Creator']},
'dc:subject':
{'dbcol': self.md_core_model['mappings']['pycsw:Keywords']},
'dct:abstract':
{'dbcol': self.md_core_model['mappings']['pycsw:Abstract']},
'dc:publisher':
{'dbcol': self.md_core_model['mappings']['pycsw:Publisher']},
'dc:contributor':
{'dbcol': self.md_core_model['mappings']['pycsw:Contributor']},
'dct:modified':
{'dbcol': self.md_core_model['mappings']['pycsw:Modified']},
'dc:date':
{'dbcol': self.md_core_model['mappings']['pycsw:Date']},
'dc:type':
{'dbcol': self.md_core_model['mappings']['pycsw:Type']},
'dc:format':
{'dbcol': self.md_core_model['mappings']['pycsw:Format']},
'dc:identifier':
{'dbcol': self.md_core_model['mappings']['pycsw:Identifier']},
'dc:source':
{'dbcol': self.md_core_model['mappings']['pycsw:Source']},
'dc:language':
{'dbcol': self.md_core_model['mappings']['pycsw:Language']},
'dc:relation':
{'dbcol': self.md_core_model['mappings']['pycsw:Relation']},
'dc:rights':
{'dbcol':
self.md_core_model['mappings']['pycsw:AccessConstraints']},
'dct:spatial':
{'dbcol': self.md_core_model['mappings']['pycsw:CRS']},
# bbox and full text map to internal fixed columns
'ows:BoundingBox':
{'dbcol': self.md_core_model['mappings']['pycsw:BoundingBox']},
'csw:AnyText':
{'dbcol': self.md_core_model['mappings']['pycsw:AnyText']},
}
}
}
}
}
}
self.set_model(prefix)
def set_model(self, prefix):
"""sets model given request context"""
self.model = self.models[prefix]
def gen_domains(self):
"""Generate parameter domain model"""
domain = {}
domain['methods'] = {'get': True, 'post': True}
domain['parameters'] = {'ParameterName': {'values': []}}
for operation in self.model['operations'].keys():
for parameter in self.model['operations'][operation]['parameters']:
domain['parameters']['ParameterName']['values'].append('%s.%s' %
(operation, parameter))
return domain
def refresh_dc(self, mappings):
"""Refresh Dublin Core mappings"""
LOGGER.debug('refreshing Dublin Core mappings with %s', str(mappings))
defaults = {
'dc:title': 'pycsw:Title',
'dct:alternative': 'pycsw:AlternateTitle',
'dc:creator': 'pycsw:Creator',
'dc:subject': 'pycsw:Keywords',
'dct:abstract': 'pycsw:Abstract',
'dc:publisher': 'pycsw:Publisher',
'dc:contributor': 'pycsw:Contributor',
'dct:modified': 'pycsw:Modified',
'dc:date': 'pycsw:Date',
'dc:type': 'pycsw:Type',
'dc:format': 'pycsw:Format',
'dc:identifier': 'pycsw:Identifier',
'dc:source': 'pycsw:Source',
'dc:language': 'pycsw:Language',
'dc:relation': 'pycsw:Relation',
'dc:rights': 'pycsw:AccessConstraints',
'dct:spatial': 'pycsw:CRS',
'ows:BoundingBox': 'pycsw:BoundingBox',
'csw:AnyText': 'pycsw:AnyText',
}
for k, val in defaults.items():
for model, params in self.models.items():
queryables = params['typenames']['csw:Record']['queryables']
queryables['SupportedDublinCoreQueryables'][k] = {
'dbcol': mappings['mappings'][val]
}
|
{
"content_hash": "5c5648f5d01b666326dc4c699bd5fc56",
"timestamp": "",
"source": "github",
"line_count": 582,
"max_line_length": 105,
"avg_line_length": 47.10481099656357,
"alnum_prop": 0.3779317891665147,
"repo_name": "ricardogsilva/pycsw",
"id": "155f241d655e46d6b15cb4e012e6732222383e84",
"size": "28727",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pycsw/core/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2909"
},
{
"name": "HTML",
"bytes": "25603"
},
{
"name": "Makefile",
"bytes": "677"
},
{
"name": "Python",
"bytes": "846706"
},
{
"name": "Shell",
"bytes": "129"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_creature_lair_boar_wolf.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "f2f0962e37f6188c643930c182a071bc",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 23.76923076923077,
"alnum_prop": 0.6925566343042071,
"repo_name": "anhstudios/swganh",
"id": "3135f2a1c69c841df8a3c1af0924f98caa35473e",
"size": "454",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/building/poi/shared_creature_lair_boar_wolf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget import plugin
from flexget.event import event
from . import series as plugin_series
from . import db
class FilterSeriesPremiere(plugin_series.FilterSeriesBase):
"""
Accept an entry that appears to be the first episode of any series.
Can be configured with any of the options of series plugin
Examples:
series_premiere: yes
series_premiere:
path: ~/Media/TV/_NEW_/.
quality: 720p
timeframe: 12 hours
NOTE: this plugin only looks in the entry title and expects the title
format to start with the series name followed by the episode info. Use
the manipulate plugin to modify the entry title to match this format, if
necessary.
TODO:
- integrate thetvdb to allow refining by genres, etc.
"""
@property
def schema(self):
settings = self.settings_schema
settings['properties']['allow_seasonless'] = {'type': 'boolean'}
settings['properties']['allow_teasers'] = {'type': 'boolean'}
return {'anyOf': [{'type': 'boolean'}, settings]}
# Run after series and metainfo series plugins
@plugin.priority(115)
def on_task_metainfo(self, task, config):
if not config:
# Don't run when we are disabled
return
# Generate the group settings for series plugin
group_settings = {}
allow_seasonless = False
desired_eps = [0, 1]
if isinstance(config, dict):
allow_seasonless = config.pop('allow_seasonless', False)
if not config.pop('allow_teasers', True):
desired_eps = [1]
group_settings = config
group_settings['identified_by'] = 'ep'
# Generate a list of unique series that have premieres
guess_entry = plugin.get('metainfo_series', self).guess_entry
# Make a set of unique series according to series name normalization rules
guessed_series = {}
for entry in task.entries:
if guess_entry(entry, allow_seasonless=allow_seasonless, config=group_settings):
if (
not entry['season_pack']
and entry['series_season'] == 1
and entry['series_episode'] in desired_eps
):
normalized_name = plugin_series.normalize_series_name(entry['series_name'])
db_series = (
task.session.query(db.Series)
.filter(db.Series.name == normalized_name)
.first()
)
if db_series and db_series.in_tasks:
continue
guessed_series.setdefault(normalized_name, entry['series_name'])
# Reject any further episodes in those series
for entry in task.entries:
for series in guessed_series.values():
if entry.get('series_name') == series and (
entry.get('season_pack')
or not (
entry.get('series_season') == 1
and entry.get('series_episode') in desired_eps
)
):
entry.reject('Non premiere episode or season pack in a premiere series')
# Combine settings and series into series plugin config format
allseries = {
'settings': {'series_premiere': group_settings},
'series_premiere': list(guessed_series.values()),
}
# Merge the our config in to the main series config
self.merge_config(task, allseries)
@event('plugin.register')
def register_plugin():
plugin.register(FilterSeriesPremiere, 'series_premiere', api_ver=2)
|
{
"content_hash": "218bca58a541d40d87e39669b9851d48",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 95,
"avg_line_length": 38.46534653465346,
"alnum_prop": 0.5884169884169884,
"repo_name": "tobinjt/Flexget",
"id": "3e416fcf86fbfcc04a0276b3ffb1cce324d69dd4",
"size": "3885",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/components/series/series_premiere.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "2338"
},
{
"name": "HTML",
"bytes": "79800"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3492888"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1576"
}
],
"symlink_target": ""
}
|
import os, io
import sys, getopt
import random
import codecs
#sys.stdout = codecs.getwriter('utf8')(sys.stdout)
#sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')
#print sys.stdout.encoding
HeadProperty = ['a','art','ad','conj','prep','pron','int','n','num','v','vi','vt']
def ChAlign(string, length=0):
if length == 0:
return string
slen = len(string)
re = string
if isinstance(string, str):
placeholder = ' '
else:
placeholder = u' '
while slen < length:
re += placeholder
slen += 1
return re
if __name__ == '__main__':
columns = 3
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(sys.argv[1:],"hi:o:c:",["ifile=","ofile=","columns="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
elif opt in ("-c", "--columns"):
columns = int(arg)
print 'Input file is "', inputfile
print 'Output file is "', outputfile
WordList = {} # {word:[phonetic, meaning],}
WordList['!abnormal!'] = []
WordPropSort = {'unknown':{},'odd':[]}
for prop in HeadProperty:
WordPropSort[prop] = {}
with io.open(inputfile, 'r', encoding='utf8') as f:
for line in f.readlines():
word = line.strip().rstrip('\n')
if word != '':
splits = word.split('[')
if len(splits) > 1:
wordname = splits[0].strip()
WordList[wordname] = []
subsplits = splits[1].split(']')
if len(subsplits) > 1:
WordList[wordname].append(subsplits[0].strip())
WordList[wordname].append(subsplits[1].strip())
else:
del WordList[wordname]
WordList['!abnormal!'].append(word)
else:
WordList['!abnormal!'].append(word)
for word in WordList:
if word == '!abnormal!':
WordPropSort['odd'] = WordList[word]
continue
meaning = WordList[word][1]
phonetic = WordList[word][0]
issort = False
for prop in reversed(HeadProperty):
strmean = meaning.lstrip('*').strip()
if strmean.startswith(prop):
WordPropSort[prop].update({word : [phonetic,meaning]})
issort = True
break
if issort is not True:
WordPropSort['unknown'].update({word : [phonetic,meaning]})
with io.open(outputfile, 'w', encoding='utf8') as of:
keys = HeadProperty + ['unknown','odd']
for propkey in keys:
of.writelines(u'\n' + propkey + u'\n')
if propkey == 'odd':
for item in WordPropSort[propkey]:
item += u'\n'
of.writelines(item)
# print item
else:
for word in WordPropSort[propkey]:
meaning = WordPropSort[propkey][word][1]
phonetic = WordPropSort[propkey][word][0]
of.writelines('%-50s' % meaning + word + u' [' +phonetic + u']\n')
#print meaning
|
{
"content_hash": "941de2c3a162954e531c88523ca81e9c",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 91,
"avg_line_length": 32.845454545454544,
"alnum_prop": 0.48906725712704124,
"repo_name": "howmind/tools",
"id": "d50efa23d411697af0198ea66d21a3da4206dd78",
"size": "3662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WordsMeaningSplit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7283"
},
{
"name": "Shell",
"bytes": "49502"
}
],
"symlink_target": ""
}
|
import os
import re
from datetime import datetime
import pytz
class AwsNexradFile(object):
"""
This class contains metadata about the remote NEXRAD file on AWS
:var key: AWS key for this NEXRAD file
:vartype key: str
:var last_modified: when the file was last modified on AWS
:vartype last_modified: datetime
:var awspath: filepath on AWS to NEXRAD file
:vartype awspath: str
:var filename: the NEXRAD filename
:vartype filename: str
:var scan_time: volume scan time for the NEXRAD file
:vartype scan_time: datetime
:var radar_id: the four letter radar id (i.e. KTLX)
:vartype radar_id: str
:var filepath: absolute path to the downloaded file on the local system
:vartype str:
"""
def __init__(self,scandict):
super(AwsNexradFile, self).__init__()
self._scan_time_re = re.compile(r'(....)(\d{4}\d{2}\d{2}_\d{2}\d{2}\d{2}).*')
self.key = scandict.get('Key',None)
self.last_modified = scandict.get('LastModified',None)
self.awspath = None
self.filename = None
self.scan_time = None
self.radar_id = None
if self.key is not None:
self._parse_key()
def _parse_key(self):
self.awspath,self.filename = os.path.split(self.key)
match = self._scan_time_re.match(self.filename)
if match is not None:
self.radar_id = match.group(1)
timestring = match.group(2)
self.scan_time = datetime.strptime(timestring,
'%Y%m%d_%H%M%S').replace(tzinfo=pytz.UTC)
def create_filepath(self, basepath, keep_aws_structure):
"""
This function creates the file path in preperation for downloading. If keep_aws_structure
is True then subfolders will be created under the basepath with the same structure as the
AWS Nexrad Bucket.
You should not need to call this function as it is done for you on download.
:param basepath: string - base folder to save files too
:param keep_aws_structure: boolean - weather or not to use the aws folder structure
inside the basepath...(year/month/day/radar/)
:return: tuple - directory path and full filepath
"""
if keep_aws_structure:
directorypath = os.path.join(basepath,self.awspath)
filepath = os.path.join(directorypath,self.filename)
else:
directorypath = basepath
filepath = os.path.join(basepath, self.filename)
return directorypath,filepath
def __repr__(self):
return '<AwsNexradFile object - {}>'.format(self.key)
|
{
"content_hash": "c4e97b97f1a3adeaab155d31c26c837b",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 97,
"avg_line_length": 35.223684210526315,
"alnum_prop": 0.6264475158759806,
"repo_name": "aarande/nexradaws",
"id": "5ec9bc09b9624b91d9afdf83f4601a34d00ba96f",
"size": "2677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nexradaws/resources/awsnexradfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "33233"
}
],
"symlink_target": ""
}
|
import logging
__author__ = 'rolandh'
from pymongo import Connection
#import cjson
import time
from datetime import datetime
from saml2 import time_util
from saml2.cache import ToOld
from saml2.time_util import TIME_FORMAT
logger = logging.getLogger(__name__)
class Cache(object):
def __init__(self, server=None, debug=0, db=None):
if server:
connection = Connection(server)
else:
connection = Connection()
if db:
self._db = connection[db]
else:
self._db = connection.pysaml2
self._cache = self._db.collection
self.debug = debug
def delete(self, subject_id):
self._cache.remove({"subject_id": subject_id})
def get_identity(self, subject_id, entities=None,
check_not_on_or_after=True):
""" Get all the identity information that has been received and
are still valid about the subject.
:param subject_id: The identifier of the subject
:param entities: The identifiers of the entities whoes assertions are
interesting. If the list is empty all entities are interesting.
:return: A 2-tuple consisting of the identity information (a
dictionary of attributes and values) and the list of entities
whoes information has timed out.
"""
res = {}
oldees = []
if not entities:
for item in self._cache.find({"subject_id": subject_id}):
try:
info = self._get_info(item, check_not_on_or_after)
except ToOld:
oldees.append(item["entity_id"])
continue
for key, vals in info["ava"].items():
try:
tmp = set(res[key]).union(set(vals))
res[key] = list(tmp)
except KeyError:
res[key] = vals
else:
for entity_id in entities:
try:
info = self.get(subject_id, entity_id,
check_not_on_or_after)
except ToOld:
oldees.append(entity_id)
continue
for key, vals in info["ava"].items():
try:
tmp = set(res[key]).union(set(vals))
res[key] = list(tmp)
except KeyError:
res[key] = vals
return res, oldees
def _get_info(self, item, check_not_on_or_after=True):
""" Get session information about a subject gotten from a
specified IdP/AA.
:param item: Information stored
:return: The session information as a dictionary
"""
timestamp = item["timestamp"]
if check_not_on_or_after and not time_util.not_on_or_after(timestamp):
raise ToOld()
try:
return item["info"]
except KeyError:
return None
def get(self, subject_id, entity_id, check_not_on_or_after=True):
res = self._cache.find_one({"subject_id": subject_id,
"entity_id": entity_id})
if not res:
return {}
else:
return self._get_info(res, check_not_on_or_after)
def set(self, subject_id, entity_id, info, timestamp=0):
""" Stores session information in the cache. Assumes that the subject_id
is unique within the context of the Service Provider.
:param subject_id: The subject identifier
:param entity_id: The identifier of the entity_id/receiver of an
assertion
:param info: The session info, the assertion is part of this
:param timestamp: A time after which the assertion is not valid.
"""
if isinstance(timestamp, datetime) or isinstance(timestamp,
time.struct_time):
timestamp = time.strftime(TIME_FORMAT, timestamp)
doc = {"subject_id": subject_id,
"entity_id": entity_id,
"info": info,
"timestamp": timestamp}
_ = self._cache.insert(doc)
def reset(self, subject_id, entity_id):
""" Scrap the assertions received from a IdP or an AA about a special
subject.
:param subject_id: The subjects identifier
:param entity_id: The identifier of the entity_id of the assertion
:return:
"""
self._cache.update({"subject_id": subject_id, "entity_id": entity_id},
{"$set": {"info": {}, "timestamp": 0}})
def entities(self, subject_id):
""" Returns all the entities of assertions for a subject, disregarding
whether the assertion still is valid or not.
:param subject_id: The identifier of the subject
:return: A possibly empty list of entity identifiers
"""
try:
return [i["entity_id"] for i in self._cache.find({"subject_id":
subject_id})]
except ValueError:
return []
def receivers(self, subject_id):
""" Another name for entities() just to make it more logic in the IdP
scenario """
return self.entities(subject_id)
def active(self, subject_id, entity_id):
""" Returns the status of assertions from a specific entity_id.
:param subject_id: The ID of the subject
:param entity_id: The entity ID of the entity_id of the assertion
:return: True or False depending on if the assertion is still
valid or not.
"""
item = self._cache.find_one({"subject_id": subject_id,
"entity_id": entity_id})
try:
return time_util.not_on_or_after(item["timestamp"])
except ToOld:
return False
def subjects(self):
""" Return identifiers for all the subjects that are in the cache.
:return: list of subject identifiers
"""
subj = [i["subject_id"] for i in self._cache.find()]
return list(set(subj))
def update(self, subject_id, entity_id, ava):
""" """
item = self._cache.find_one({"subject_id": subject_id,
"entity_id": entity_id})
info = item["info"]
info["ava"].update(ava)
self._cache.update({"subject_id": subject_id, "entity_id": entity_id},
{"$set": {"info": info}})
def valid_to(self, subject_id, entity_id, newtime):
""" """
self._cache.update({"subject_id": subject_id, "entity_id": entity_id},
{"$set": {"timestamp": newtime}})
def clear(self):
self._cache.remove()
|
{
"content_hash": "ea75fa22c79af747e0e27ccd63e7203f",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 80,
"avg_line_length": 34.68181818181818,
"alnum_prop": 0.5402650356778798,
"repo_name": "Runscope/pysaml2",
"id": "2f5d9c45ba0e6f2af741d02d49cb152d73d0c659",
"size": "6889",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/saml2/mdbcache.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "46"
},
{
"name": "Python",
"bytes": "2602966"
},
{
"name": "Shell",
"bytes": "4371"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0009_auto_20210322_1837'),
]
operations = [
migrations.AlterField(
model_name='banner',
name='color',
field=models.CharField(default='hs-yellow', help_text='Bakgrunnsfargen til banneret som en hex-farge. hs-green, hs-yellow og hs-red støttes også som presets.', max_length=10, verbose_name='bannercolor'),
),
migrations.AlterField(
model_name='banner',
name='site',
field=models.CharField(default='*', help_text="Det interne navnet på URL-stien til sidene som banneret skal dukke opp på. Wildcard (*) støttes. F.eks. er '*' ALLE sider, 'inventory:*' er alle lagersider.", max_length=250, verbose_name='bannersider'),
),
migrations.AlterField(
model_name='banner',
name='text',
field=models.TextField(default='Sample Text', help_text='Tekst som vises i banneret.', max_length=1000, verbose_name='bannertext'),
),
migrations.AlterField(
model_name='banner',
name='text_color',
field=models.CharField(default='hs-black', help_text='Tekstfargen på banneret. hs-white og hs-black støttes som presets.', max_length=10, verbose_name='bannertextcolor'),
),
]
|
{
"content_hash": "1e554a2a56b7d805f9c563b7876d6b11",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 262,
"avg_line_length": 45.064516129032256,
"alnum_prop": 0.6227630637079457,
"repo_name": "hackerspace-ntnu/website",
"id": "94835fa7c2ac5b8cca998876136245887e9d7fad",
"size": "1453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/migrations/0010_auto_20210322_1840.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16771"
},
{
"name": "HTML",
"bytes": "235369"
},
{
"name": "JavaScript",
"bytes": "43249"
},
{
"name": "Python",
"bytes": "323186"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import numpy as np
import scipy.misc
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class Video(object):
def __init__(self, filename, fps=15, bitrate=2000, dump_imgs=True):
self.filename = filename
self.fps = fps
self.bitrate = bitrate
self.dump_imgs = dump_imgs
if which('ffmpeg') is None and which('avconv') is None:
self.disable_video = True
else:
self.disable_video = False
dirpath = os.path.split(filename)[0]
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
if dump_imgs:
self.imgs_dir = os.path.splitext(filename)[0]
if not os.path.exists(self.imgs_dir):
os.mkdir(self.imgs_dir)
self.proc = None
self.img_shape = None
self.n_frames = 0
def _start_proc(self):
if len(self.img_shape) == 3 and self.img_shape[2] == 3:
pix_fmt = 'rgb24'
else:
pix_fmt = 'gray'
if which('ffmpeg') is None:
encoder = 'avconv'
else:
encoder = 'ffmpeg'
cmd = [
encoder,
'-y',
'-loglevel', 'error',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-s', '%dx%d' % (self.img_shape[1], self.img_shape[0]),
'-pix_fmt', pix_fmt,
'-r', '%.02f' % self.fps,
'-i', '-', '-an',
self.filename,
]
popen_params = {"stdout": DEVNULL,
"stderr": subprocess.PIPE,
"stdin": subprocess.PIPE}
self.proc = subprocess.Popen(cmd, **popen_params)
def append(self, frame):
if frame.dtype.kind == 'f':
frame -= np.min(frame)
frame /= np.max(frame)/255.
frame = frame.astype(np.uint8)
if frame.shape[0] % 2 != 0:
frame = np.insert(frame, frame.shape[0], 0, axis=0)
if frame.shape[1] % 2 != 0:
frame = np.insert(frame, frame.shape[1], 0, axis=1)
if self.dump_imgs:
img_path = os.path.join(self.imgs_dir, '%.5d.png' % self.n_frames)
scipy.misc.imsave(img_path, frame)
if self.img_shape is None:
self.img_shape = frame.shape
if frame.ndim == 3 and frame.shape[2] not in [1, 3]:
raise ValueError('invalid # of channels: %i' % frame.shape[2])
elif self.img_shape != frame.shape:
raise ValueError('frame shape mismatch')
self.n_frames += 1
if self.disable_video:
return
if self.proc is None:
self._start_proc()
try:
self.proc.stdin.write(frame.tostring())
self.proc.stdin.flush()
except IOError:
raise RuntimeError('Failed writing frame to video:\n %s '
% self.proc.stderr.read())
def __del__(self):
if self.proc is not None:
self.proc.stdin.close()
if self.proc.stderr is not None:
self.proc.stderr.close()
self.proc.wait()
del self.proc
|
{
"content_hash": "c6e1b92615c0d93c6f9dc22edb81c1fe",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 78,
"avg_line_length": 32.49122807017544,
"alnum_prop": 0.5134989200863931,
"repo_name": "andersbll/vae_gan",
"id": "e02f2efb7db10b6b060526dc205f975462410d18",
"size": "3704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "video.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74280"
}
],
"symlink_target": ""
}
|
import nengo
from nengo.exceptions import ConfigError
import pytest
from rig import place_and_route as par
from nengo_spinnaker import Simulator, add_spinnaker_params
from nengo_spinnaker.config import CallableParameter
from nengo_spinnaker import node_io
def test_add_spinnaker_params():
"""Test adding SpiNNaker specific parameters to a configuration object."""
# Create a network
with nengo.Network() as net:
n_ft = nengo.Node(lambda t: [t, t**2])
ptn = nengo.Node(size_in=2)
# Setting SpiNNaker-specific options before calling `add_spinnaker_params`
# should fail.
for param, value in [
("function_of_time", True),
("function_of_time_period", 0.5),
]:
with pytest.raises(AttributeError) as excinfo:
setattr(net.config[n_ft], param, value)
assert param in str(excinfo.value)
for param, value in [
("optimize_out", False),
]:
with pytest.raises(AttributeError) as excinfo:
setattr(net.config[ptn], param, value)
for param, value in [
("placer", lambda r, n, m, c: None),
("placer_kwargs", {}),
("allocater", lambda r, n, m, c, p: None),
("allocater_kwargs", {}),
("router", lambda r, n, m, c, p, a: None),
("router_kwargs", {}),
("node_io", None),
("node_io_kwargs", {}),
]:
with pytest.raises(ConfigError) as excinfo:
setattr(net.config[Simulator], param, value)
assert "Simulator" in str(excinfo.value)
# Adding the SpiNNaker parameters should allow all of these to pass
add_spinnaker_params(net.config)
assert net.config[nengo.Node].function_of_time is False
assert net.config[nengo.Node].function_of_time_period is None
assert net.config[nengo.Node].optimize_out is None
assert net.config[Simulator].placer is par.place
assert net.config[Simulator].placer_kwargs == {}
assert net.config[Simulator].allocator is par.allocate
assert net.config[Simulator].allocator_kwargs == {}
assert net.config[Simulator].router is par.route
assert net.config[Simulator].router_kwargs == {}
assert net.config[Simulator].node_io is node_io.Ethernet
assert net.config[Simulator].node_io_kwargs == {}
def test_callable_parameter_validate():
"""Test that the callable parameter fails to validate if passed something
other than a callable.
"""
cp = CallableParameter("test")
with pytest.raises(ValueError) as excinfo:
cp.validate(None, "Not a function")
assert "must be callable" in str(excinfo.value)
cp.validate(None, lambda x: None)
@pytest.mark.xfail(reason="Problems with Parameters")
def test_function_of_time_node():
# Test that function of time can't be marked on Nodes unless they have size
# in == 0
with nengo.Network() as net:
not_f_of_t = nengo.Node(lambda t, x: t**2, size_in=1)
f_of_t = nengo.Node(lambda t: t)
# Modify the config
add_spinnaker_params(net.config)
net.config[f_of_t].function_of_time = True
with pytest.raises(ValueError):
net.config[not_f_of_t].function_of_time = True
# Check the settings are valid
assert not net.config[not_f_of_t].function_of_time
assert net.config[f_of_t].function_of_time
|
{
"content_hash": "60954ccda6516ce76f0b9762a8caca1c",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 79,
"avg_line_length": 34.78350515463917,
"alnum_prop": 0.6437462951985774,
"repo_name": "project-rig/nengo_spinnaker",
"id": "2a7c60861603beed09272706aa6c71e10512c3ce",
"size": "3374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "156127"
},
{
"name": "C++",
"bytes": "4428"
},
{
"name": "Makefile",
"bytes": "3057"
},
{
"name": "Python",
"bytes": "609080"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
def raise_error(apps, schema_editor):
# Test operation in non-atomic migration is not wrapped in transaction
Publisher = apps.get_model('migrations', 'Publisher')
Publisher.objects.create(name='Test Publisher')
raise RuntimeError('Abort migration')
class Migration(migrations.Migration):
atomic = False
operations = [
migrations.CreateModel(
"Publisher",
[
("name", models.CharField(primary_key=True, max_length=255)),
],
),
migrations.RunPython(raise_error),
migrations.CreateModel(
"Book",
[
("title", models.CharField(primary_key=True, max_length=255)),
("publisher", models.ForeignKey("migrations.Publisher", models.SET_NULL, null=True)),
],
),
]
|
{
"content_hash": "b34e6e44767bf2e1feb14ff65ec6f6f2",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 101,
"avg_line_length": 30.870967741935484,
"alnum_prop": 0.58098223615465,
"repo_name": "yephper/django",
"id": "65cd53b497e45760eb16c55c9a03827fbd6d57cc",
"size": "982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/migrations/test_migrations_non_atomic/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
}
|
from unittest.mock import Mock, call, patch
from expecter import expect
from gitman import system
def describe_launch():
@patch('platform.system', Mock(return_value="Windows"))
@patch('gitman.system._launch_windows')
def it_opens_files(startfile):
system.launch("fake/path")
expect(startfile.mock_calls) == [call("fake/path")]
@patch('platform.system', Mock(return_value="fake"))
def it_raises_an_exception_when_platform_is_unknown():
with expect.raises(RuntimeError):
system.launch(None)
|
{
"content_hash": "69f429f8b8be95eb95b3976fe2550e56",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 59,
"avg_line_length": 30.555555555555557,
"alnum_prop": 0.68,
"repo_name": "jacebrowning/gdm",
"id": "44d9e131223d3b073178d866968e768535f7bd0c",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature/flat_flat-recursive_resolver",
"path": "gitman/tests/test_system.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "9014"
},
{
"name": "Python",
"bytes": "83502"
}
],
"symlink_target": ""
}
|
""" Panel4D: a 4-d dict like collection of panels """
from pandas.core.panelnd import create_nd_panel_factory
from pandas.core.panel import Panel
Panel4D = create_nd_panel_factory(klass_name='Panel4D',
orders=['labels', 'items', 'major_axis',
'minor_axis'],
slices={'labels': 'labels',
'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis',
'minor': 'minor_axis'}, stat_axis=2,
ns=dict(__doc__="""
Panel4D is a 4-Dimensional named container very much like a Panel, but
having 4 named dimensions. It is intended as a test bed for more
N-Dimensional named containers.
Parameters
----------
data : ndarray (labels x items x major x minor), or dict of Panels
labels : Index or array-like : axis=0
items : Index or array-like : axis=1
major_axis : Index or array-like: axis=2
minor_axis : Index or array-like: axis=3
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""))
def panel4d_init(self, data=None, labels=None, items=None, major_axis=None,
minor_axis=None, copy=False, dtype=None):
self._init_data(data=data, labels=labels, items=items,
major_axis=major_axis, minor_axis=minor_axis, copy=copy,
dtype=dtype)
Panel4D.__init__ = panel4d_init
|
{
"content_hash": "171a52962b5a5ec038e64c14d08456db",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 40.68888888888889,
"alnum_prop": 0.523211359912616,
"repo_name": "BigDataforYou/movie_recommendation_workshop_1",
"id": "33bd79195cc779644980fe88daae2e0445015040",
"size": "1831",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/core/panel4d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "738713"
},
{
"name": "C++",
"bytes": "169366"
},
{
"name": "CSS",
"bytes": "14786"
},
{
"name": "Fortran",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "1408733"
},
{
"name": "JavaScript",
"bytes": "13700"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "19755294"
},
{
"name": "Shell",
"bytes": "3276"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
import sys
sys.path[0:0] = [""]
import unittest
from mongoengine import *
from mongoengine.connection import get_db
__all__ = ("GeoFieldTest", )
class GeoFieldTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
self.db = get_db()
def _test_for_expected_error(self, Cls, loc, expected):
try:
Cls(loc=loc).validate()
self.fail('Should not validate the location {0}'.format(loc))
except ValidationError as e:
self.assertEqual(expected, e.to_dict()['loc'])
def test_geopoint_validation(self):
class Location(Document):
loc = GeoPointField()
invalid_coords = [{"x": 1, "y": 2}, 5, "a"]
expected = 'GeoPointField can only accept tuples or lists of (x, y)'
for coord in invalid_coords:
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[], [1], [1, 2, 3]]
for coord in invalid_coords:
expected = "Value (%s) must be a two-dimensional point" % repr(coord)
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[{}, {}], ("a", "b")]
for coord in invalid_coords:
expected = "Both values (%s) in point must be float or int" % repr(coord)
self._test_for_expected_error(Location, coord, expected)
def test_point_validation(self):
class Location(Document):
loc = PointField()
invalid_coords = {"x": 1, "y": 2}
expected = 'PointField can only accept a valid GeoJson dictionary or lists of (x, y)'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": []}
expected = 'PointField type must be "Point"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "Point", "coordinates": [1, 2, 3]}
expected = "Value ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [5, "a"]
expected = "PointField can only accept lists of [x, y]"
for coord in invalid_coords:
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[], [1], [1, 2, 3]]
for coord in invalid_coords:
expected = "Value (%s) must be a two-dimensional point" % repr(coord)
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[{}, {}], ("a", "b")]
for coord in invalid_coords:
expected = "Both values (%s) in point must be float or int" % repr(coord)
self._test_for_expected_error(Location, coord, expected)
Location(loc=[1, 2]).validate()
Location(loc={
"type": "Point",
"coordinates": [
81.4471435546875,
23.61432859499169
]}).validate()
def test_linestring_validation(self):
class Location(Document):
loc = LineStringField()
invalid_coords = {"x": 1, "y": 2}
expected = 'LineStringField can only accept a valid GeoJson dictionary or lists of (x, y)'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": [[]]}
expected = 'LineStringField type must be "LineString"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "LineString", "coordinates": [[1, 2, 3]]}
expected = "Invalid LineString:\nValue ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [5, "a"]
expected = "Invalid LineString must contain at least one valid point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[1]]
expected = "Invalid LineString:\nValue (%s) must be a two-dimensional point" % repr(invalid_coords[0])
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[1, 2, 3]]
expected = "Invalid LineString:\nValue (%s) must be a two-dimensional point" % repr(invalid_coords[0])
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[{}, {}]], [("a", "b")]]
for coord in invalid_coords:
expected = "Invalid LineString:\nBoth values (%s) in point must be float or int" % repr(coord[0])
self._test_for_expected_error(Location, coord, expected)
Location(loc=[[1, 2], [3, 4], [5, 6], [1,2]]).validate()
def test_polygon_validation(self):
class Location(Document):
loc = PolygonField()
invalid_coords = {"x": 1, "y": 2}
expected = 'PolygonField can only accept a valid GeoJson dictionary or lists of (x, y)'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": [[]]}
expected = 'PolygonField type must be "Polygon"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "Polygon", "coordinates": [[[1, 2, 3]]]}
expected = "Invalid Polygon:\nValue ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[5, "a"]]]
expected = "Invalid Polygon:\nBoth values ([5, 'a']) in point must be float or int"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[]]]
expected = "Invalid Polygon must contain at least one valid linestring"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[1, 2, 3]]]
expected = "Invalid Polygon:\nValue ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[{}, {}]], [("a", "b")]]
expected = "Invalid Polygon:\nBoth values ([{}, {}]) in point must be float or int, Both values (('a', 'b')) in point must be float or int"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[1, 2], [3, 4]]]
expected = "Invalid Polygon:\nLineStrings must start and end at the same point"
self._test_for_expected_error(Location, invalid_coords, expected)
Location(loc=[[[1, 2], [3, 4], [5, 6], [1, 2]]]).validate()
def test_multipoint_validation(self):
class Location(Document):
loc = MultiPointField()
invalid_coords = {"x": 1, "y": 2}
expected = 'MultiPointField can only accept a valid GeoJson dictionary or lists of (x, y)'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": [[]]}
expected = 'MultiPointField type must be "MultiPoint"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MultiPoint", "coordinates": [[1, 2, 3]]}
expected = "Value ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[]]
expected = "Invalid MultiPoint must contain at least one valid point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[1]], [[1, 2, 3]]]
for coord in invalid_coords:
expected = "Value (%s) must be a two-dimensional point" % repr(coord[0])
self._test_for_expected_error(Location, coord, expected)
invalid_coords = [[[{}, {}]], [("a", "b")]]
for coord in invalid_coords:
expected = "Both values (%s) in point must be float or int" % repr(coord[0])
self._test_for_expected_error(Location, coord, expected)
Location(loc=[[1, 2]]).validate()
Location(loc={
"type": "MultiPoint",
"coordinates": [
[1, 2],
[81.4471435546875, 23.61432859499169]
]}).validate()
def test_multilinestring_validation(self):
class Location(Document):
loc = MultiLineStringField()
invalid_coords = {"x": 1, "y": 2}
expected = 'MultiLineStringField can only accept a valid GeoJson dictionary or lists of (x, y)'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": [[]]}
expected = 'MultiLineStringField type must be "MultiLineString"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MultiLineString", "coordinates": [[[1, 2, 3]]]}
expected = "Invalid MultiLineString:\nValue ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [5, "a"]
expected = "Invalid MultiLineString must contain at least one valid linestring"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[1]]]
expected = "Invalid MultiLineString:\nValue (%s) must be a two-dimensional point" % repr(invalid_coords[0][0])
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[1, 2, 3]]]
expected = "Invalid MultiLineString:\nValue (%s) must be a two-dimensional point" % repr(invalid_coords[0][0])
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[[{}, {}]]], [[("a", "b")]]]
for coord in invalid_coords:
expected = "Invalid MultiLineString:\nBoth values (%s) in point must be float or int" % repr(coord[0][0])
self._test_for_expected_error(Location, coord, expected)
Location(loc=[[[1, 2], [3, 4], [5, 6], [1,2]]]).validate()
def test_multipolygon_validation(self):
class Location(Document):
loc = MultiPolygonField()
invalid_coords = {"x": 1, "y": 2}
expected = 'MultiPolygonField can only accept a valid GeoJson dictionary or lists of (x, y)'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MadeUp", "coordinates": [[]]}
expected = 'MultiPolygonField type must be "MultiPolygon"'
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = {"type": "MultiPolygon", "coordinates": [[[[1, 2, 3]]]]}
expected = "Invalid MultiPolygon:\nValue ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[[5, "a"]]]]
expected = "Invalid MultiPolygon:\nBoth values ([5, 'a']) in point must be float or int"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[[]]]]
expected = "Invalid MultiPolygon must contain at least one valid Polygon"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[[1, 2, 3]]]]
expected = "Invalid MultiPolygon:\nValue ([1, 2, 3]) must be a two-dimensional point"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[[{}, {}]]], [[("a", "b")]]]
expected = "Invalid MultiPolygon:\nBoth values ([{}, {}]) in point must be float or int, Both values (('a', 'b')) in point must be float or int"
self._test_for_expected_error(Location, invalid_coords, expected)
invalid_coords = [[[[1, 2], [3, 4]]]]
expected = "Invalid MultiPolygon:\nLineStrings must start and end at the same point"
self._test_for_expected_error(Location, invalid_coords, expected)
Location(loc=[[[[1, 2], [3, 4], [5, 6], [1, 2]]]]).validate()
def test_indexes_geopoint(self):
"""Ensure that indexes are created automatically for GeoPointFields.
"""
class Event(Document):
title = StringField()
location = GeoPointField()
geo_indicies = Event._geo_indices()
self.assertEqual(geo_indicies, [{'fields': [('location', '2d')]}])
def test_geopoint_embedded_indexes(self):
"""Ensure that indexes are created automatically for GeoPointFields on
embedded documents.
"""
class Venue(EmbeddedDocument):
location = GeoPointField()
name = StringField()
class Event(Document):
title = StringField()
venue = EmbeddedDocumentField(Venue)
geo_indicies = Event._geo_indices()
self.assertEqual(geo_indicies, [{'fields': [('venue.location', '2d')]}])
def test_indexes_2dsphere(self):
"""Ensure that indexes are created automatically for GeoPointFields.
"""
class Event(Document):
title = StringField()
point = PointField()
line = LineStringField()
polygon = PolygonField()
geo_indicies = Event._geo_indices()
self.assertTrue({'fields': [('line', '2dsphere')]} in geo_indicies)
self.assertTrue({'fields': [('polygon', '2dsphere')]} in geo_indicies)
self.assertTrue({'fields': [('point', '2dsphere')]} in geo_indicies)
def test_indexes_2dsphere_embedded(self):
"""Ensure that indexes are created automatically for GeoPointFields.
"""
class Venue(EmbeddedDocument):
name = StringField()
point = PointField()
line = LineStringField()
polygon = PolygonField()
class Event(Document):
title = StringField()
venue = EmbeddedDocumentField(Venue)
geo_indicies = Event._geo_indices()
self.assertTrue({'fields': [('venue.line', '2dsphere')]} in geo_indicies)
self.assertTrue({'fields': [('venue.polygon', '2dsphere')]} in geo_indicies)
self.assertTrue({'fields': [('venue.point', '2dsphere')]} in geo_indicies)
def test_geo_indexes_recursion(self):
class Location(Document):
name = StringField()
location = GeoPointField()
class Parent(Document):
name = StringField()
location = ReferenceField(Location)
Location.drop_collection()
Parent.drop_collection()
list(Parent.objects)
collection = Parent._get_collection()
info = collection.index_information()
self.assertFalse('location_2d' in info)
self.assertEqual(len(Parent._geo_indices()), 0)
self.assertEqual(len(Location._geo_indices()), 1)
def test_geo_indexes_auto_index(self):
# Test just listing the fields
class Log(Document):
location = PointField(auto_index=False)
datetime = DateTimeField()
meta = {
'indexes': [[("location", "2dsphere"), ("datetime", 1)]]
}
self.assertEqual([], Log._geo_indices())
Log.drop_collection()
Log.ensure_indexes()
info = Log._get_collection().index_information()
self.assertEqual(info["location_2dsphere_datetime_1"]["key"],
[('location', '2dsphere'), ('datetime', 1)])
# Test listing explicitly
class Log(Document):
location = PointField(auto_index=False)
datetime = DateTimeField()
meta = {
'indexes': [
{'fields': [("location", "2dsphere"), ("datetime", 1)]}
]
}
self.assertEqual([], Log._geo_indices())
Log.drop_collection()
Log.ensure_indexes()
info = Log._get_collection().index_information()
self.assertEqual(info["location_2dsphere_datetime_1"]["key"],
[('location', '2dsphere'), ('datetime', 1)])
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "e662556bf50a804132d08eda1fc4ccc8",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 152,
"avg_line_length": 41.44358974358974,
"alnum_prop": 0.5967951494153313,
"repo_name": "elephanter/mongoengine",
"id": "8193d87e90635143c39dbc27178a79f1e5271e8e",
"size": "16187",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/fields/geo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1005321"
}
],
"symlink_target": ""
}
|
"""Trigger the GitHub action to build our kits."""
import sys
import requests
repo_owner = sys.argv[1]
# The GitHub URL makes no mention of which workflow to use. It's found based on
# the event_type, which matches the types in the workflow:
#
# on:
# repository_dispatch:
# types:
# - build-kits
#
resp = requests.post(
f"https://api.github.com/repos/{repo_owner}/dispatches",
json={"event_type": "build-kits"},
)
print(f"Status: {resp.status_code}")
print(resp.text)
|
{
"content_hash": "a867cd7ebff01ba1125cc6622cbd5e09",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 21.82608695652174,
"alnum_prop": 0.6673306772908366,
"repo_name": "nedbat/coveragepy",
"id": "0485df10a3c78d83c342075d01776a7e99ad4a5d",
"size": "658",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ci/trigger_build_kits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "48728"
},
{
"name": "CSS",
"bytes": "37343"
},
{
"name": "HTML",
"bytes": "213879"
},
{
"name": "JavaScript",
"bytes": "48668"
},
{
"name": "Makefile",
"bytes": "9529"
},
{
"name": "Python",
"bytes": "1324579"
},
{
"name": "SCSS",
"bytes": "17425"
},
{
"name": "Shell",
"bytes": "2240"
}
],
"symlink_target": ""
}
|
from kivy.tests.common import GraphicUnitTest
from kivy.input.motionevent import MotionEvent
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.slider import Slider
from kivy.base import EventLoop
class UTMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
super(UTMotionEvent, self).depack(args)
class _TestSliderHandle(Slider):
def __init__(self, **kwargs):
super(_TestSliderHandle, self).__init__(**kwargs)
self.sensitivity = 'handle'
class _TestSliderAll(Slider):
def __init__(self, **kwargs):
super(_TestSliderAll, self).__init__(**kwargs)
self.sensitivity = 'all'
class SliderMoveTestCase(GraphicUnitTest):
framecount = 0
def setUp(self):
# kill KV lang logging (too long test)
import kivy.lang.builder as builder
if not hasattr(self, '_trace'):
self._trace = builder.trace
self.builder = builder
builder.trace = lambda *_, **__: None
super(SliderMoveTestCase, self).setUp()
def tearDown(self, *args, **kwargs):
# add the logging back
import kivy.lang.builder as builder
builder.trace = self._trace
super(SliderMoveTestCase, self).tearDown(*args, **kwargs)
def test_slider_move(self):
EventLoop.ensure_window()
win = EventLoop.window
layout = BoxLayout(orientation='vertical')
s_handle = _TestSliderHandle()
s_all = _TestSliderAll()
layout.add_widget(s_handle)
layout.add_widget(s_all)
win.add_widget(layout)
# get widgets ready
EventLoop.idle()
cur1 = s_handle.children[0]
cur2 = s_all.children[0]
h1 = cur1.to_window(*cur1.center)[1]
h2 = h1 - s_handle.cursor_height
h3 = cur2.to_window(*cur2.center)[1]
h4 = h3 - s_all.cursor_height
w1 = cur1.to_window(*cur1.center)[0]
w2 = cur2.to_window(*cur2.center)[0]
wh = win.width / 2.0
dt = 2
# default pos, new pos, slider ID
points = [
[w1, h1, wh, h1, 'handle'],
[w1, h2, wh, h2, 'handle'],
[w2, h3, wh, h3, 'all'],
[w2, h4, wh, h4, 'all'],
]
for point in points:
x, y, nx, ny, id = point
# custom touch
touch = UTMotionEvent("unittest", 1, {
"x": x / float(win.width),
"y": y / float(win.height),
})
# touch down
EventLoop.post_dispatch_input("begin", touch)
if id == 'handle':
# touch on handle
if x == w1 and y == h1:
self.assertAlmostEqual(
s_handle.value, 0.0,
delta=dt
)
# touch in widget area (ignored, previous value)
elif x == w1 and y == h2:
self.assertAlmostEqual(
s_handle.value, 50.0,
delta=dt
)
elif id == 'all':
# touch on handle:
if x == w1 and y == h3:
self.assertAlmostEqual(
s_all.value, 0.0,
delta=dt
)
# touch in widget area
elif x == w1 and y == h4:
self.assertAlmostEqual(
s_all.value, 0.0,
delta=dt
)
# move from default to new pos
touch.move({
"x": nx / float(win.width),
"y": ny / float(win.height)
})
EventLoop.post_dispatch_input("update", touch)
if id == 'handle':
# move from handle to center
if nx == wh and ny == h1:
self.assertAlmostEqual(
s_handle.value, 50.0,
delta=dt
)
# move to center (ignored, previous value)
elif nx == wh and ny == h2:
self.assertAlmostEqual(
s_handle.value, 50.0,
delta=dt
)
elif id == 'all':
# touch on handle:
if nx == wh and ny == h3:
self.assertAlmostEqual(
s_all.value, 50.0,
delta=dt
)
# touch in widget area
elif nx == wh and ny == h4:
self.assertAlmostEqual(
s_all.value, 50.0,
delta=dt
)
# touch up
EventLoop.post_dispatch_input("end", touch)
self.render(layout)
if __name__ == '__main__':
import unittest
unittest.main()
|
{
"content_hash": "f074ecb62c9cb4bca95e59fdc912a355",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 65,
"avg_line_length": 30.265060240963855,
"alnum_prop": 0.464171974522293,
"repo_name": "matham/kivy",
"id": "9c88fe9bc61957402e916d9e89638b4529330570",
"size": "5024",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kivy/tests/test_uix_slider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "324418"
},
{
"name": "C++",
"bytes": "3888"
},
{
"name": "Emacs Lisp",
"bytes": "9838"
},
{
"name": "GLSL",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "4993"
},
{
"name": "Objective-C",
"bytes": "21550"
},
{
"name": "PowerShell",
"bytes": "5375"
},
{
"name": "Python",
"bytes": "4204346"
},
{
"name": "Shell",
"bytes": "25365"
},
{
"name": "Vim script",
"bytes": "2120"
}
],
"symlink_target": ""
}
|
from tests.testcase import TestCase
from edmunds.foundation.applicationmiddleware import ApplicationMiddleware
class TestMiddleware(TestCase):
"""
Test the Middleware
"""
cache = None
def set_up(self):
"""
Set up the test case
"""
super(TestMiddleware, self).set_up()
TestMiddleware.cache = dict()
TestMiddleware.cache['timeline'] = []
def test_registering_application_middleware(self):
"""
Test registering the application middleware
"""
# Check empty
self.assert_not_in('edmunds.applicationmiddleware.middleware', self.app.extensions)
# Register the middleware
self.app.middleware(MyApplicationMiddleware)
# Check if registered
self.assert_equal(1, self.app.extensions['edmunds.applicationmiddleware.middleware'].count(MyApplicationMiddleware))
self.assert_is_instance(self.app.wsgi_app, MyApplicationMiddleware)
self.assert_not_is_instance(self.app.wsgi_app.wsgi_app, MyApplicationMiddleware)
# Try adding it again
self.app.middleware(MyApplicationMiddleware)
# Check if duplicate
self.assert_equal(1, self.app.extensions['edmunds.applicationmiddleware.middleware'].count(MyApplicationMiddleware))
self.assert_is_instance(self.app.wsgi_app, MyApplicationMiddleware)
self.assert_not_is_instance(self.app.wsgi_app.wsgi_app, MyApplicationMiddleware)
# Try adding second one
self.app.middleware(MySecondApplicationMiddleware)
# Check if registered
self.assert_equal(1, self.app.extensions['edmunds.applicationmiddleware.middleware'].count(MyApplicationMiddleware))
self.assert_equal(1, self.app.extensions['edmunds.applicationmiddleware.middleware'].count(MySecondApplicationMiddleware))
self.assert_is_instance(self.app.wsgi_app, MySecondApplicationMiddleware)
self.assert_is_instance(self.app.wsgi_app.wsgi_app, MyApplicationMiddleware)
self.assert_not_is_instance(self.app.wsgi_app.wsgi_app.wsgi_app, MyApplicationMiddleware)
def test_handling_application_middleware(self):
"""
Test handling of application middleware
"""
# Register the middleware
self.app.middleware(MyApplicationMiddleware)
# Add it a second time to make sure it is only called once
self.app.middleware(MyApplicationMiddleware)
# Add route
rule = '/' + self.rand_str(20)
@self.app.route(rule)
def handle_route():
TestMiddleware.cache['timeline'].append('handle_route')
return ''
# Call route
with self.app.test_client() as c:
rv = c.get(rule)
self.assert_equal(2, len(TestMiddleware.cache['timeline']))
self.assert_in(MyApplicationMiddleware.__name__, TestMiddleware.cache['timeline'])
self.assert_equal(0, TestMiddleware.cache['timeline'].index(MyApplicationMiddleware.__name__))
self.assert_in('handle_route', TestMiddleware.cache['timeline'])
self.assert_equal(1, TestMiddleware.cache['timeline'].index('handle_route'))
# Add second middleware
self.app.middleware(MySecondApplicationMiddleware)
# Call route
TestMiddleware.cache = dict()
TestMiddleware.cache['timeline'] = []
with self.app.test_client() as c:
rv = c.get(rule)
self.assert_equal(3, len(TestMiddleware.cache['timeline']))
self.assert_in(MySecondApplicationMiddleware.__name__, TestMiddleware.cache['timeline'])
self.assert_equal(0, TestMiddleware.cache['timeline'].index(MySecondApplicationMiddleware.__name__))
self.assert_in(MyApplicationMiddleware.__name__, TestMiddleware.cache['timeline'])
self.assert_equal(1, TestMiddleware.cache['timeline'].index(MyApplicationMiddleware.__name__))
self.assert_in('handle_route', TestMiddleware.cache['timeline'])
self.assert_equal(2, TestMiddleware.cache['timeline'].index('handle_route'))
class MyApplicationMiddleware(ApplicationMiddleware):
"""
Application Middleware class
"""
def handle(self, environment, start_response):
TestMiddleware.cache['timeline'].append(self.__class__.__name__)
return super(MyApplicationMiddleware, self).handle(environment, start_response)
class MySecondApplicationMiddleware(ApplicationMiddleware):
"""
Second Application Middleware class
"""
def handle(self, environment, start_response):
TestMiddleware.cache['timeline'].append(self.__class__.__name__)
return super(MySecondApplicationMiddleware, self).handle(environment, start_response)
|
{
"content_hash": "9060bafc4bab20f02f1bbedc187e69b1",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 130,
"avg_line_length": 37.61417322834646,
"alnum_prop": 0.6790872932803015,
"repo_name": "LowieHuyghe/edmunds",
"id": "3aa49501aa213a68af65b304f25af2a6e1122b84",
"size": "4778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/foundation/concerns/testmiddleware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "570304"
}
],
"symlink_target": ""
}
|
from .proc_base import ProcBase
class ProcMounts(ProcBase):
'''Object represents the /proc/mounts file.'''
format_str = '{0:15} | {1:25} | {2:15} | {3}'
def __init__(self, pid):
'''
Read file by calling base class constructor
then parse the contents.
'''
super().__init__('/proc/{0}/mounts'.format(pid))
self.mounts = []
self.read()
def read(self):
'''Parses contents of /proc/[pid]/mounts'''
if not self.content:
return
for line in self.content.split('\n'):
tokens = line.split()
if not tokens:
continue
name = None
mount_point = None
fs_type = None
options = None
if tokens[0]:
name = tokens[0]
if tokens[1]:
mount_point = tokens[1]
if tokens[2]:
fs_type = tokens[2]
if tokens[3]:
options = tokens[3]
self.mounts.append((name, mount_point, fs_type, options))
def dump(self):
'''Print information gathered to stdout.'''
super().dump() # Print file header
table_header_str = self.format_str.format(
'Name', 'mount point', 'file type', 'configuration')
print(table_header_str)
print(len(table_header_str) * '-')
for (name, mount, fs_type, config) in self.mounts:
print(self.format_str.format(name, mount, fs_type, config))
|
{
"content_hash": "fff13d96b9354f5fda09b99046b4533b",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 71,
"avg_line_length": 28.943396226415093,
"alnum_prop": 0.5052151238591917,
"repo_name": "EwanC/pyProc",
"id": "727475cee51b6a99151c39b286d4febb522bddad",
"size": "1558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proc_scraper/proc_mounts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21221"
}
],
"symlink_target": ""
}
|
import os.path
from flask import Flask, request, jsonify, send_file
from iiif2 import IIIF, web
PATH = os.path.dirname(os.path.realpath(__file__))
app = Flask(__name__)
@app.route('/<identifier>/info.json')
def info(identifier):
return jsonify(web.info(request.url, resolve(identifier), identifier))
@app.route('/<identifier>/<region>/<size>/<rotation>/<quality>.<fmt>')
def iiif(**kwargs):
params = web.Parse.params(**kwargs)
path = resolve(params.get('identifier'))
with IIIF.render(path, **params) as tile:
return send_file(tile, mimetype=tile.mime)
def resolve(identifier):
"""Resolves a iiif identifier to the resource's path on disk.
This method is specific to this server's architecture.
"""
return os.path.join(PATH, 'images', '%s.jpg' % identifier)
if __name__ == "__main__":
app.run(debug=True)
|
{
"content_hash": "5ba6d1aebe63dd036ddfc8dc0e22a9a2",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 74,
"avg_line_length": 28.6,
"alnum_prop": 0.6748251748251748,
"repo_name": "mekarpeles/iiif2",
"id": "1ae5323f4e1013964f432baa1576dfe3feb12332",
"size": "858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/flask-iiif/app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17401"
}
],
"symlink_target": ""
}
|
import io
from ....tests.helper import pytest
import numpy as np
from ..column import Column
from ..diff import (FITSDiff, HeaderDiff, ImageDataDiff, TableDataDiff,
HDUDiff, report_diff_values)
from ..hdu import HDUList, PrimaryHDU, ImageHDU
from ..hdu.table import BinTableHDU
from ..header import Header
from ....tests.helper import catch_warnings
from ....utils.exceptions import AstropyDeprecationWarning
from ....extern.six.moves import range
from ....io import fits
from . import FitsTestCase
class TestDiff(FitsTestCase):
def test_identical_headers(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
assert HeaderDiff(ha, hb).identical
assert HeaderDiff(ha.tostring(), hb.tostring()).identical
with pytest.raises(TypeError):
HeaderDiff(1, 2)
def test_slightly_different_headers(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
assert not HeaderDiff(ha, hb).identical
def test_common_keywords(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
hb['D'] = (5, 'Comment')
assert HeaderDiff(ha, hb).common_keywords == ['A', 'B', 'C']
def test_different_keyword_count(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
del hb['B']
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_count == (3, 2)
# But make sure the common keywords are at least correct
assert diff.common_keywords == ['A', 'C']
def test_different_keywords(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
hb['D'] = (5, 'Comment')
ha['E'] = (6, 'Comment')
ha['F'] = (7, 'Comment')
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keywords == (['E', 'F'], ['D'])
def test_different_keyword_values(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {'C': [(3, 4)]}
def test_different_keyword_comments(self):
ha = Header([('A', 1), ('B', 2), ('C', 3, 'comment 1')])
hb = ha.copy()
hb.comments['C'] = 'comment 2'
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert (diff.diff_keyword_comments ==
{'C': [('comment 1', 'comment 2')]})
def test_different_keyword_values_with_duplicate(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
ha.append(('C', 4))
hb.append(('C', 5))
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {'C': [None, (4, 5)]}
def test_asymmetric_duplicate_keywords(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
ha.append(('A', 2, 'comment 1'))
ha.append(('A', 3, 'comment 2'))
hb.append(('B', 4, 'comment 3'))
hb.append(('C', 5, 'comment 4'))
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {}
assert (diff.diff_duplicate_keywords ==
{'A': (3, 1), 'B': (1, 2), 'C': (1, 2)})
report = diff.report()
assert ("Inconsistent duplicates of keyword 'A' :\n"
" Occurs 3 time(s) in a, 1 times in (b)") in report
def test_floating_point_rtol(self):
ha = Header([('A', 1), ('B', 2.00001), ('C', 3.000001)])
hb = ha.copy()
hb['B'] = 2.00002
hb['C'] = 3.000002
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(2.00001, 2.00002)], 'C': [(3.000001, 3.000002)]})
diff = HeaderDiff(ha, hb, rtol=1e-6)
assert not diff.identical
assert diff.diff_keyword_values == {'B': [(2.00001, 2.00002)]}
diff = HeaderDiff(ha, hb, rtol=1e-5)
assert diff.identical
def test_floating_point_atol(self):
ha = Header([('A', 1), ('B', 1.0), ('C', 0.0)])
hb = ha.copy()
hb['B'] = 1.00001
hb['C'] = 0.000001
diff = HeaderDiff(ha, hb, rtol=1e-6)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(1.0, 1.00001)], 'C': [(0.0, 0.000001)]})
diff = HeaderDiff(ha, hb, rtol=1e-5)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'C': [(0.0, 0.000001)]})
diff = HeaderDiff(ha, hb, atol=1e-6)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(1.0, 1.00001)]})
diff = HeaderDiff(ha, hb, atol=1e-5) # strict inequality
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(1.0, 1.00001)]})
diff = HeaderDiff(ha, hb, rtol=1e-5, atol=1e-5)
assert diff.identical
diff = HeaderDiff(ha, hb, atol=1.1e-5)
assert diff.identical
diff = HeaderDiff(ha, hb, rtol=1e-6, atol=1e-6)
assert not diff.identical
def test_deprecation_tolerance(self):
"""Verify uses of tolerance and rtol.
This test should be removed in the next astropy version."""
ha = Header([('B', 1.0), ('C', 0.1)])
hb = ha.copy()
hb['B'] = 1.00001
hb['C'] = 0.100001
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
diff = HeaderDiff(ha, hb, tolerance=1e-6)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"tolerance" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "rtol" instead.')
assert (diff.diff_keyword_values == {'C': [(0.1, 0.100001)],
'B': [(1.0, 1.00001)]})
assert not diff.identical
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
# `rtol` is always ignored when `tolerance` is provided
diff = HeaderDiff(ha, hb, rtol=1e-6, tolerance=1e-5)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"tolerance" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "rtol" instead.')
assert diff.identical
def test_ignore_blanks(self):
with fits.conf.set_temp('strip_header_whitespace', False):
ha = Header([('A', 1), ('B', 2), ('C', 'A ')])
hb = ha.copy()
hb['C'] = 'A'
assert ha['C'] != hb['C']
diff = HeaderDiff(ha, hb)
# Trailing blanks are ignored by default
assert diff.identical
assert diff.diff_keyword_values == {}
# Don't ignore blanks
diff = HeaderDiff(ha, hb, ignore_blanks=False)
assert not diff.identical
assert diff.diff_keyword_values == {'C': [('A ', 'A')]}
def test_ignore_blank_cards(self):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/152
Ignore blank cards.
"""
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = Header([('A', 1), ('', ''), ('B', 2), ('', ''), ('C', 3)])
hc = ha.copy()
hc.append()
hc.append()
# We now have a header with interleaved blanks, and a header with end
# blanks, both of which should ignore the blanks
assert HeaderDiff(ha, hb).identical
assert HeaderDiff(ha, hc).identical
assert HeaderDiff(hb, hc).identical
assert not HeaderDiff(ha, hb, ignore_blank_cards=False).identical
assert not HeaderDiff(ha, hc, ignore_blank_cards=False).identical
# Both hb and hc have the same number of blank cards; since order is
# currently ignored, these should still be identical even if blank
# cards are not ignored
assert HeaderDiff(hb, hc, ignore_blank_cards=False).identical
hc.append()
# But now there are different numbers of blanks, so they should not be
# ignored:
assert not HeaderDiff(hb, hc, ignore_blank_cards=False).identical
def test_ignore_keyword_values(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['B'] = 4
hb['C'] = 5
diff = HeaderDiff(ha, hb, ignore_keywords=['*'])
assert diff.identical
diff = HeaderDiff(ha, hb, ignore_keywords=['B'])
assert not diff.identical
assert diff.diff_keyword_values == {'C': [(3, 5)]}
report = diff.report()
assert 'Keyword B has different values' not in report
assert 'Keyword C has different values' in report
# Test case-insensitivity
diff = HeaderDiff(ha, hb, ignore_keywords=['b'])
assert not diff.identical
assert diff.diff_keyword_values == {'C': [(3, 5)]}
def test_ignore_keyword_comments(self):
ha = Header([('A', 1, 'A'), ('B', 2, 'B'), ('C', 3, 'C')])
hb = ha.copy()
hb.comments['B'] = 'D'
hb.comments['C'] = 'E'
diff = HeaderDiff(ha, hb, ignore_comments=['*'])
assert diff.identical
diff = HeaderDiff(ha, hb, ignore_comments=['B'])
assert not diff.identical
assert diff.diff_keyword_comments == {'C': [('C', 'E')]}
report = diff.report()
assert 'Keyword B has different comments' not in report
assert 'Keyword C has different comments' in report
# Test case-insensitivity
diff = HeaderDiff(ha, hb, ignore_comments=['b'])
assert not diff.identical
assert diff.diff_keyword_comments == {'C': [('C', 'E')]}
def test_trivial_identical_images(self):
ia = np.arange(100).reshape((10, 10))
ib = np.arange(100).reshape((10, 10))
diff = ImageDataDiff(ia, ib)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_relative_tolerance(self):
ia = np.ones((10, 10)) - 0.00001
ib = np.ones((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-4)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_absolute_tolerance(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-4)
assert not diff.identical
assert diff.diff_total == 100
diff = ImageDataDiff(ia, ib, atol=1.0e-4)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_rtol_and_atol(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-5)
assert diff.identical
assert diff.diff_total == 0
def test_not_identical_within_rtol_and_atol(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-6)
assert not diff.identical
assert diff.diff_total == 100
def test_identical_comp_image_hdus(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/189
For this test we mostly just care that comparing to compressed images
does not crash, and returns the correct results. Two compressed images
will be considered identical if the decompressed data is the same.
Obviously we test whether or not the same compression was used by
looking for (or ignoring) header differences.
"""
data = np.arange(100.0).reshape((10, 10))
hdu = fits.CompImageHDU(data=data)
hdu.writeto(self.temp('test.fits'))
hdula = fits.open(self.temp('test.fits'))
hdulb = fits.open(self.temp('test.fits'))
diff = FITSDiff(hdula, hdulb)
assert diff.identical
def test_different_dimensions(self):
ia = np.arange(100).reshape((10, 10))
ib = np.arange(100) - 1
# Although ib could be reshaped into the same dimensions, for now the
# data is not compared anyways
diff = ImageDataDiff(ia, ib)
assert not diff.identical
assert diff.diff_dimensions == ((10, 10), (100,))
assert diff.diff_total == 0
report = diff.report()
assert 'Data dimensions differ' in report
assert 'a: 10 x 10' in report
assert 'b: 100' in report
assert 'No further data comparison performed.'
def test_different_pixels(self):
ia = np.arange(100).reshape((10, 10))
ib = np.arange(100).reshape((10, 10))
ib[0, 0] = 10
ib[5, 5] = 20
diff = ImageDataDiff(ia, ib)
assert not diff.identical
assert diff.diff_dimensions == ()
assert diff.diff_total == 2
assert diff.diff_ratio == 0.02
assert diff.diff_pixels == [((0, 0), (0, 10)), ((5, 5), (55, 20))]
def test_identical_tables(self):
c1 = Column('A', format='L', array=[True, False])
c2 = Column('B', format='X', array=[[0], [1]])
c3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [4, 5, 6, 7]])
c4 = Column('D', format='J', bscale=2.0, array=[0, 1])
c5 = Column('E', format='A3', array=['abc', 'def'])
c6 = Column('F', format='E', unit='m', array=[0.0, 1.0])
c7 = Column('G', format='D', bzero=-0.1, array=[0.0, 1.0])
c8 = Column('H', format='C', array=[0.0+1.0j, 2.0+3.0j])
c9 = Column('I', format='M', array=[4.0+5.0j, 6.0+7.0j])
c10 = Column('J', format='PI(2)', array=[[0, 1], [2, 3]])
columns = [c1, c2, c3, c4, c5, c6, c7, c8, c9, c10]
ta = BinTableHDU.from_columns(columns)
tb = BinTableHDU.from_columns([c.copy() for c in columns])
diff = TableDataDiff(ta.data, tb.data)
assert diff.identical
assert len(diff.common_columns) == 10
assert (diff.common_column_names ==
set(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']))
assert diff.diff_ratio == 0
assert diff.diff_total == 0
def test_diff_empty_tables(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/178
Ensure that diffing tables containing empty data doesn't crash.
"""
c1 = Column('D', format='J')
c2 = Column('E', format='J')
thdu = BinTableHDU.from_columns([c1, c2], nrows=0)
hdula = fits.HDUList([thdu])
hdulb = fits.HDUList([thdu])
diff = FITSDiff(hdula, hdulb)
assert diff.identical
def test_ignore_table_fields(self):
c1 = Column('A', format='L', array=[True, False])
c2 = Column('B', format='X', array=[[0], [1]])
c3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [4, 5, 6, 7]])
c4 = Column('B', format='X', array=[[1], [0]])
c5 = Column('C', format='4I', dim='(2, 2)',
array=[[1, 2, 3, 4], [5, 6, 7, 8]])
ta = BinTableHDU.from_columns([c1, c2, c3])
tb = BinTableHDU.from_columns([c1, c4, c5])
diff = TableDataDiff(ta.data, tb.data, ignore_fields=['B', 'C'])
assert diff.identical
# The only common column should be c1
assert len(diff.common_columns) == 1
assert diff.common_column_names == set(['a'])
assert diff.diff_ratio == 0
assert diff.diff_total == 0
def test_different_table_field_names(self):
ca = Column('A', format='L', array=[True, False])
cb = Column('B', format='L', array=[True, False])
cc = Column('C', format='L', array=[True, False])
ta = BinTableHDU.from_columns([ca, cb])
tb = BinTableHDU.from_columns([ca, cc])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert len(diff.common_columns) == 1
assert diff.common_column_names == set(['a'])
assert diff.diff_column_names == (['B'], ['C'])
assert diff.diff_ratio == 0
assert diff.diff_total == 0
report = diff.report()
assert 'Extra column B of format L in a' in report
assert 'Extra column C of format L in b' in report
def test_different_table_field_counts(self):
"""
Test tables with some common columns, but different number of columns
overall.
"""
ca = Column('A', format='L', array=[True, False])
cb = Column('B', format='L', array=[True, False])
cc = Column('C', format='L', array=[True, False])
ta = BinTableHDU.from_columns([cb])
tb = BinTableHDU.from_columns([ca, cb, cc])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert diff.diff_column_count == (1, 3)
assert len(diff.common_columns) == 1
assert diff.common_column_names == set(['b'])
assert diff.diff_column_names == ([], ['A', 'C'])
assert diff.diff_ratio == 0
assert diff.diff_total == 0
report = diff.report()
assert ' Tables have different number of columns:' in report
assert ' a: 1\n b: 3' in report
def test_different_table_rows(self):
"""
Test tables that are otherwise identical but one has more rows than the
other.
"""
ca1 = Column('A', format='L', array=[True, False])
cb1 = Column('B', format='L', array=[True, False])
ca2 = Column('A', format='L', array=[True, False, True])
cb2 = Column('B', format='L', array=[True, False, True])
ta = BinTableHDU.from_columns([ca1, cb1])
tb = BinTableHDU.from_columns([ca2, cb2])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert diff.diff_column_count == ()
assert len(diff.common_columns) == 2
assert diff.diff_rows == (2, 3)
assert diff.diff_values == []
report = diff.report()
assert 'Table rows differ' in report
assert 'a: 2' in report
assert 'b: 3' in report
assert 'No further data comparison performed.'
def test_different_table_data(self):
"""
Test diffing table data on columns of several different data formats
and dimensions.
"""
ca1 = Column('A', format='L', array=[True, False])
ca2 = Column('B', format='X', array=[[0], [1]])
ca3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [4, 5, 6, 7]])
ca4 = Column('D', format='J', bscale=2.0, array=[0.0, 2.0])
ca5 = Column('E', format='A3', array=['abc', 'def'])
ca6 = Column('F', format='E', unit='m', array=[0.0, 1.0])
ca7 = Column('G', format='D', bzero=-0.1, array=[0.0, 1.0])
ca8 = Column('H', format='C', array=[0.0+1.0j, 2.0+3.0j])
ca9 = Column('I', format='M', array=[4.0+5.0j, 6.0+7.0j])
ca10 = Column('J', format='PI(2)', array=[[0, 1], [2, 3]])
cb1 = Column('A', format='L', array=[False, False])
cb2 = Column('B', format='X', array=[[0], [0]])
cb3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [5, 6, 7, 8]])
cb4 = Column('D', format='J', bscale=2.0, array=[2.0, 2.0])
cb5 = Column('E', format='A3', array=['abc', 'ghi'])
cb6 = Column('F', format='E', unit='m', array=[1.0, 2.0])
cb7 = Column('G', format='D', bzero=-0.1, array=[2.0, 3.0])
cb8 = Column('H', format='C', array=[1.0+1.0j, 2.0+3.0j])
cb9 = Column('I', format='M', array=[5.0+5.0j, 6.0+7.0j])
cb10 = Column('J', format='PI(2)', array=[[1, 2], [3, 4]])
ta = BinTableHDU.from_columns([ca1, ca2, ca3, ca4, ca5, ca6, ca7,
ca8, ca9, ca10])
tb = BinTableHDU.from_columns([cb1, cb2, cb3, cb4, cb5, cb6, cb7,
cb8, cb9, cb10])
diff = TableDataDiff(ta.data, tb.data, numdiffs=20)
assert not diff.identical
# The column definitions are the same, but not the column values
assert diff.diff_columns == ()
assert diff.diff_values[0] == (('A', 0), (True, False))
assert diff.diff_values[1] == (('B', 1), ([1], [0]))
assert diff.diff_values[2][0] == ('C', 1)
assert (diff.diff_values[2][1][0] == [[4, 5], [6, 7]]).all()
assert (diff.diff_values[2][1][1] == [[5, 6], [7, 8]]).all()
assert diff.diff_values[3] == (('D', 0), (0, 2.0))
assert diff.diff_values[4] == (('E', 1), ('def', 'ghi'))
assert diff.diff_values[5] == (('F', 0), (0.0, 1.0))
assert diff.diff_values[6] == (('F', 1), (1.0, 2.0))
assert diff.diff_values[7] == (('G', 0), (0.0, 2.0))
assert diff.diff_values[8] == (('G', 1), (1.0, 3.0))
assert diff.diff_values[9] == (('H', 0), (0.0+1.0j, 1.0+1.0j))
assert diff.diff_values[10] == (('I', 0), (4.0+5.0j, 5.0+5.0j))
assert diff.diff_values[11][0] == ('J', 0)
assert (diff.diff_values[11][1][0] == [0, 1]).all()
assert (diff.diff_values[11][1][1] == [1, 2]).all()
assert diff.diff_values[12][0] == ('J', 1)
assert (diff.diff_values[12][1][0] == [2, 3]).all()
assert (diff.diff_values[12][1][1] == [3, 4]).all()
assert diff.diff_total == 13
assert diff.diff_ratio == 0.65
report = diff.report()
assert ('Column A data differs in row 0:\n'
' a> True\n'
' b> False') in report
assert ('...and at 13 more indices.\n'
' Column D data differs in row 0:') in report
assert ('13 different table data element(s) found (65.00% different)'
in report)
assert report.count('more indices') == 1
def test_identical_files_basic(self):
"""Test identicality of two simple, extensionless files."""
a = np.arange(100).reshape((10, 10))
hdu = PrimaryHDU(data=a)
hdu.writeto(self.temp('testa.fits'))
hdu.writeto(self.temp('testb.fits'))
diff = FITSDiff(self.temp('testa.fits'), self.temp('testb.fits'))
assert diff.identical
report = diff.report()
# Primary HDUs should contain no differences
assert 'Primary HDU' not in report
assert 'Extension HDU' not in report
assert 'No differences found.' in report
a = np.arange(10)
ehdu = ImageHDU(data=a)
diff = HDUDiff(ehdu, ehdu)
assert diff.identical
report = diff.report()
assert 'No differences found.' in report
def test_partially_identical_files1(self):
"""
Test files that have some identical HDUs but a different extension
count.
"""
a = np.arange(100).reshape((10, 10))
phdu = PrimaryHDU(data=a)
ehdu = ImageHDU(data=a)
hdula = HDUList([phdu, ehdu])
hdulb = HDUList([phdu, ehdu, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdu_count == (2, 3)
# diff_hdus should be empty, since the third extension in hdulb
# has nothing to compare against
assert diff.diff_hdus == []
report = diff.report()
assert 'Files contain different numbers of HDUs' in report
assert 'a: 2\n b: 3' in report
assert 'No differences found between common HDUs' in report
def test_partially_identical_files2(self):
"""
Test files that have some identical HDUs but one different HDU.
"""
a = np.arange(100).reshape((10, 10))
phdu = PrimaryHDU(data=a)
ehdu = ImageHDU(data=a)
ehdu2 = ImageHDU(data=(a + 1))
hdula = HDUList([phdu, ehdu, ehdu])
hdulb = HDUList([phdu, ehdu2, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdu_count == ()
assert len(diff.diff_hdus) == 1
assert diff.diff_hdus[0][0] == 1
hdudiff = diff.diff_hdus[0][1]
assert not hdudiff.identical
assert hdudiff.diff_extnames == ()
assert hdudiff.diff_extvers == ()
assert hdudiff.diff_extension_types == ()
assert hdudiff.diff_headers.identical
assert hdudiff.diff_data is not None
datadiff = hdudiff.diff_data
assert isinstance(datadiff, ImageDataDiff)
assert not datadiff.identical
assert datadiff.diff_dimensions == ()
assert (datadiff.diff_pixels ==
[((0, y), (y, y + 1)) for y in range(10)])
assert datadiff.diff_ratio == 1.0
assert datadiff.diff_total == 100
report = diff.report()
# Primary HDU and 2nd extension HDU should have no differences
assert 'Primary HDU' not in report
assert 'Extension HDU 2' not in report
assert 'Extension HDU 1' in report
assert 'Headers contain differences' not in report
assert 'Data contains differences' in report
for y in range(10):
assert 'Data differs at [{}, 1]'.format(y + 1) in report
assert '100 different pixels found (100.00% different).' in report
def test_partially_identical_files3(self):
"""
Test files that have some identical HDUs but a different extension
name.
"""
phdu = PrimaryHDU()
ehdu = ImageHDU(name='FOO')
hdula = HDUList([phdu, ehdu])
ehdu = BinTableHDU(name='BAR')
ehdu.header['EXTVER'] = 2
ehdu.header['EXTLEVEL'] = 3
hdulb = HDUList([phdu, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdus[0][0] == 1
hdu_diff = diff.diff_hdus[0][1]
assert hdu_diff.diff_extension_types == ('IMAGE', 'BINTABLE')
assert hdu_diff.diff_extnames == ('FOO', 'BAR')
assert hdu_diff.diff_extvers == (1, 2)
assert hdu_diff.diff_extlevels == (1, 3)
report = diff.report()
assert 'Extension types differ' in report
assert 'a: IMAGE\n b: BINTABLE' in report
assert 'Extension names differ' in report
assert 'a: FOO\n b: BAR' in report
assert 'Extension versions differ' in report
assert 'a: 1\n b: 2' in report
assert 'Extension levels differ' in report
assert 'a: 1\n b: 2' in report
def test_diff_nans(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/204"""
# First test some arrays that should be equivalent....
arr = np.empty((10, 10), dtype=np.float64)
arr[:5] = 1.0
arr[5:] = np.nan
arr2 = arr.copy()
table = np.rec.array([(1.0, 2.0), (3.0, np.nan), (np.nan, np.nan)],
names=['cola', 'colb']).view(fits.FITS_rec)
table2 = table.copy()
assert ImageDataDiff(arr, arr2).identical
assert TableDataDiff(table, table2).identical
# Now let's introduce some differences, where there are nans and where
# there are not nans
arr2[0][0] = 2.0
arr2[5][0] = 2.0
table2[0][0] = 2.0
table2[1][1] = 2.0
diff = ImageDataDiff(arr, arr2)
assert not diff.identical
assert diff.diff_pixels[0] == ((0, 0), (1.0, 2.0))
assert diff.diff_pixels[1][0] == (5, 0)
assert np.isnan(diff.diff_pixels[1][1][0])
assert diff.diff_pixels[1][1][1] == 2.0
diff = TableDataDiff(table, table2)
assert not diff.identical
assert diff.diff_values[0] == (('cola', 0), (1.0, 2.0))
assert diff.diff_values[1][0] == ('colb', 1)
assert np.isnan(diff.diff_values[1][1][0])
assert diff.diff_values[1][1][1] == 2.0
def test_diff_types(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4122
"""
f = io.StringIO()
a = 1.0
b = '1.0'
report_diff_values(f, a, b)
out = f.getvalue()
assert out.lstrip('u') == " (float) a> 1.0\n (str) b> '1.0'\n ? + +\n"
def test_float_comparison(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/21
"""
f = io.StringIO()
a = np.float32(0.029751372)
b = np.float32(0.029751368)
report_diff_values(f, a, b)
out = f.getvalue()
# This test doesn't care about what the exact output is, just that it
# did show a difference in their text representations
assert 'a>' in out
assert 'b>' in out
def test_file_output_from_path_string(self):
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
report_as_string = diffobj.report()
assert open(outpath).read() == report_as_string
def test_file_output_overwrite_safety(self):
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
with pytest.raises(IOError):
diffobj.report(fileobj=outpath)
def test_file_output_overwrite_success(self):
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
report_as_string = diffobj.report()
diffobj.report(fileobj=outpath, overwrite=True)
assert open(outpath).read() == report_as_string, ("overwritten output "
"file is not identical to report string")
def test_file_output_overwrite_vs_clobber(self):
"""Verify uses of clobber and overwrite."""
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
report_as_string = diffobj.report()
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
diffobj.report(fileobj=outpath, clobber=True)
assert len(warning_lines) == 0
# assert warning_lines[0].category == AstropyDeprecationWarning
# assert (str(warning_lines[0].message) == '"clobber" was '
# 'deprecated in version 1.3 and will be removed in a '
# 'future version. Use argument "overwrite" instead.')
|
{
"content_hash": "87582a4cbae5751f8a45880fb9139d00",
"timestamp": "",
"source": "github",
"line_count": 814,
"max_line_length": 94,
"avg_line_length": 38.342751842751845,
"alnum_prop": 0.5456729999038801,
"repo_name": "joergdietrich/astropy",
"id": "790000b54a4e0905709a132c7468cbe9d0ee5b75",
"size": "31276",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astropy/io/fits/tests/test_diff.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366874"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "7616749"
},
{
"name": "Shell",
"bytes": "425"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import os
import sys
from nose.tools import eq_, ok_
from fabric.state import env, output
from fabric.context_managers import (cd, settings, lcd, hide, shell_env, quiet,
warn_only, prefix, path)
from fabric.operations import run, local
import six
from six import BytesIO as StringIO
if six.PY3:
from .utils import mock_streams, FabricTest
from .server import server
else:
from utils import mock_streams, FabricTest
from server import server
#
# cd()
#
def test_error_handling():
"""
cd cleans up after itself even in case of an exception
"""
class TestException(Exception):
pass
try:
with cd('somewhere'):
raise TestException('Houston, we have a problem.')
except TestException:
pass
finally:
with cd('else'):
eq_(env.cwd, 'else')
def test_cwd_with_absolute_paths():
"""
cd() should append arg if non-absolute or overwrite otherwise
"""
existing = '/some/existing/path'
additional = 'another'
absolute = '/absolute/path'
with settings(cwd=existing):
with cd(absolute):
eq_(env.cwd, absolute)
with cd(additional):
eq_(env.cwd, existing + '/' + additional)
#
# prefix
#
def test_nested_prefix():
"""
prefix context managers can be created outside of the with block and nested
"""
cm1 = prefix('1')
cm2 = prefix('2')
with cm1:
with cm2:
eq_(env.command_prefixes, ['1', '2'])
#
# hide/show
#
def test_hide_show_exception_handling():
"""
hide()/show() should clean up OK if exceptions are raised
"""
try:
with hide('stderr'):
# now it's False, while the default is True
eq_(output.stderr, False)
raise Exception
except Exception:
# Here it should be True again.
# If it's False, this means hide() didn't clean up OK.
eq_(output.stderr, True)
#
# settings()
#
def test_setting_new_env_dict_key_should_work():
"""
Using settings() with a previously nonexistent key should work correctly
"""
key = 'thisshouldnevereverexistseriouslynow'
value = 'a winner is you'
with settings(**{key: value}):
ok_(key in env)
ok_(key not in env)
def test_settings():
"""
settings() should temporarily override env dict with given key/value pair
"""
env.testval = "outer value"
with settings(testval="inner value"):
eq_(env.testval, "inner value")
eq_(env.testval, "outer value")
def test_settings_with_multiple_kwargs():
"""
settings() should temporarily override env dict with given key/value pairS
"""
env.testval1 = "outer 1"
env.testval2 = "outer 2"
with settings(testval1="inner 1", testval2="inner 2"):
eq_(env.testval1, "inner 1")
eq_(env.testval2, "inner 2")
eq_(env.testval1, "outer 1")
eq_(env.testval2, "outer 2")
def test_settings_with_other_context_managers():
"""
settings() should take other context managers, and use them with other overrided
key/value pairs.
"""
env.testval1 = "outer 1"
prev_lcwd = env.lcwd
with settings(lcd("here"), testval1="inner 1"):
eq_(env.testval1, "inner 1")
ok_(env.lcwd.endswith("here")) # Should be the side-effect of adding cd to settings
ok_(env.testval1, "outer 1")
eq_(env.lcwd, prev_lcwd)
def test_settings_clean_revert():
"""
settings(clean_revert=True) should only revert values matching input values
"""
env.modified = "outer"
env.notmodified = "outer"
with settings(
modified="inner",
notmodified="inner",
inner_only="only",
clean_revert=True
):
eq_(env.modified, "inner")
eq_(env.notmodified, "inner")
eq_(env.inner_only, "only")
env.modified = "modified internally"
eq_(env.modified, "modified internally")
ok_("inner_only" not in env)
#
# shell_env()
#
def test_shell_env():
"""
shell_env() sets the shell_env attribute in the env dict
"""
with shell_env(KEY="value"):
eq_(env.shell_env['KEY'], 'value')
eq_(env.shell_env, {})
class TestQuietAndWarnOnly(FabricTest):
@server()
@mock_streams('both')
def test_quiet_hides_all_output(self):
return True
# Sanity test - normally this is not empty
run("ls /simple")
ok_(sys.stdout.getvalue())
# Reset
sys.stdout = StringIO()
# Real test
with quiet():
run("ls /simple")
# Empty output
ok_(not sys.stdout.getvalue())
# Reset
sys.stdout = StringIO()
# Kwarg test
run("ls /simple", quiet=True)
ok_(not sys.stdout.getvalue())
@server(responses={'barf': [
"this is my stdout",
"this is my stderr",
1
]})
def test_quiet_sets_warn_only_to_true(self):
# Sanity test to ensure environment
with settings(warn_only=False):
with quiet():
eq_(run("barf").return_code, 1)
# Kwarg test
eq_(run("barf", quiet=True).return_code, 1)
@server(responses={'hrm': ["", "", 1]})
@mock_streams('both')
def test_warn_only_is_same_as_settings_warn_only(self):
with warn_only():
eq_(run("hrm").failed, True)
@server()
@mock_streams('both')
def test_warn_only_does_not_imply_hide_everything(self):
with warn_only():
run("ls /simple")
assert sys.stdout.getvalue().strip() != ""
# path() (distinct from shell_env)
class TestPathManager(FabricTest):
def setup(self):
super(TestPathManager, self).setup()
self.real = os.environ.get('PATH')
def via_local(self):
with hide('everything'):
return local("echo $PATH", capture=True)
def test_lack_of_path_has_default_local_path(self):
"""
No use of 'with path' == default local $PATH
"""
eq_(self.real, self.via_local())
def test_use_of_path_appends_by_default(self):
"""
'with path' appends by default
"""
with path('foo'):
eq_(self.via_local(), self.real + ":foo")
|
{
"content_hash": "44caf7acdc7253db6941be191efde94d",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 91,
"avg_line_length": 25.5609756097561,
"alnum_prop": 0.5888994910941476,
"repo_name": "pashinin/fabric",
"id": "9c3ffaca69bd48a2c5e7f2cc7f6cd82d27841f89",
"size": "6288",
"binary": false,
"copies": "1",
"ref": "refs/heads/p33",
"path": "tests/tes2t_context_managers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "15260"
},
{
"name": "Makefile",
"bytes": "2993"
},
{
"name": "Python",
"bytes": "443280"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
import multiprocessing
import pathos.multiprocessing as multi
def pdSafe(s):
'''Transform name into Pandas-safe name (i.e., dot-notation-accessible).'''
s = s.translate(None, '\\/ ?!@#$%^&*()-+=\`~|][{}<>,')
s = s.replace('.', '_')
return s
#def applyParallel(groupedDF, func, cpucount=None, chunksize=None):
# '''User Pietro Battiston's solution from
# http://stackoverflow.com/questions/26187759/parallelize-apply-after-pandas-groupby
# '''
# if cpucount is None:
# cpucount = multiprocessing.cpu_count()
# # Python 3 only?: with multiprocessing.Pool(cpucount) as pool:
# pool = multiprocessing.Pool(cpucount)
# #try:
# ret_list = pool.map(func, [group for name, group in groupedDF], chunksize=chunksize)
# #except:
# # pool.terminate()
# # raise
# return pd.concat(ret_list)
def applyParallel(groupedDF, func, cpucount=None, chunksize=None, nice=False):
'''Combination of user Pietro Battiston's solution from
http://stackoverflow.com/a/29281494
and Mike McKerns answer at http://stackoverflow.com/a/21345423
This requires that the func return a DataFrame (or indexed Series, if
that's even possible?)
'''
if cpucount is None:
if nice:
# Be nice, taking over most but not all of available physical CPU's
cpucount = int(np.floor(multiprocessing.cpu_count()*0.85))
else:
# Be greedy, trying to take over *all* available CPU's
cpucount = int(np.ceil(multiprocessing.cpu_count()))
with multi.ProcessingPool(cpucount) as pool:
pool = multi.ProcessingPool(cpucount)
ret_list = pool.imap(func,
[group for name, group in groupedDF],
chunksize=chunksize)
#if isinstance(ret_list[0], pd.Series):
# outDF = pd.concat(ret_list, axis=1).T
# return outDF
# #outDF.index
#else:
return pd.concat(ret_list)
def applymapParallel(groupedDF, func, cpucount=None, chunksize=None):
'''Combination of user Pietro Battiston's solution from
http://stackoverflow.com/a/29281494
and Mike McKerns answer at http://stackoverflow.com/a/21345423
This differs from applyParallel in that the func should only return a
scalar or vector (unindexed) result.
TODO: make this work (only roughed out thus far!)
'''
raise NotImplementedError('This function has yet to be fully fleshed out.')
def metafunc(func):
def dropinfunc(idx_grp):
return {idx_grp[0]: func(idx_grp[1])}
if cpucount is None:
#cpucount = int(np.ceil(multiprocessing.cpu_count()/2.0*0.75))
cpucount = int(np.ceil(multiprocessing.cpu_count()))
with multi.ProcessingPool(cpucount) as pool:
pool = multi.ProcessingPool(cpucount)
# The following MUST be one of the ordered pool methods (either `map`
# or `imap`), # or else reattaching an index will be arbitrary
ret_list = pool.map(dropinfunc, [idx_grp for idx_grp in groupedDF], chunksize=chunksize)
if isinstance(ret_list[0], pd.Series):
outDF = pd.concat(ret_list, axis=1).T
# TODO: give it an index!
elif isinstance(ret_list[0], pd.DataFrame):
return pd.concat(ret_list)
|
{
"content_hash": "64d91f08be1e2bc2080dabf79c9725f3",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 96,
"avg_line_length": 38.55813953488372,
"alnum_prop": 0.6504825090470446,
"repo_name": "jllanfranchi/pygeneric",
"id": "e3a0b1fd16c7dbdc5c38959cee71eb05ff0612be",
"size": "3347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandasUtils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "18292"
},
{
"name": "Python",
"bytes": "313385"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.backend.jvm.tasks.jvm_compile.execution_graph import (ExecutionFailure, ExecutionGraph,
Job, JobExistsError,
NoRootJobError, UnknownJobError)
class ImmediatelyExecutingPool(object):
def submit_async_work(self, work):
work.func(*work.args_tuples[0])
class PrintLogger(object):
def error(self, msg):
print(msg)
def debug(self, msg):
print(msg)
def passing_fn():
pass
def raising_fn():
raise Exception("I'm an error")
class ExecutionGraphTest(unittest.TestCase):
def setUp(self):
self.jobs_run = []
def execute(self, exec_graph):
exec_graph.execute(ImmediatelyExecutingPool(), PrintLogger())
def job(self, name, fn, dependencies, on_success=None, on_failure=None):
def recording_fn():
self.jobs_run.append(name)
fn()
return Job(name, recording_fn, dependencies, on_success, on_failure)
def test_single_job(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [])])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A"])
def test_single_dependency(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, [])])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["B", "A"])
def test_simple_binary_tree(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B", "C"]),
self.job("B", passing_fn, []),
self.job("C", passing_fn, [])])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["B", "C", "A"])
def test_simple_linear_dependencies(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, ["C"]),
self.job("C", passing_fn, [])])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["C", "B", "A"])
def test_simple_unconnected(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, []),
self.job("B", passing_fn, []),
])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B"])
def test_simple_unconnected_tree(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, []),
self.job("C", passing_fn, []),
])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["B", "C", "A"])
def test_dependee_depends_on_dependency_of_its_dependency(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B", "C"]),
self.job("B", passing_fn, ["C"]),
self.job("C", passing_fn, []),
])
self.execute(exec_graph)
self.assertEqual(["C", "B", "A"], self.jobs_run)
def test_one_failure_raises_exception(self):
exec_graph = ExecutionGraph([self.job("A", raising_fn, [])])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Failed jobs: A", str(cm.exception))
def test_failure_of_dependency_does_not_run_dependents(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["F"]),
self.job("F", raising_fn, [])])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual(["F"], self.jobs_run)
self.assertEqual("Failed jobs: F", str(cm.exception))
def test_failure_of_dependency_does_not_run_second_order_dependents(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, ["F"]),
self.job("F", raising_fn, [])])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual(["F"], self.jobs_run)
self.assertEqual("Failed jobs: F", str(cm.exception))
def test_failure_of_one_leg_of_tree_does_not_cancel_other(self):
# TODO do we want this behavior, or do we want to fail fast on the first failed job?
exec_graph = ExecutionGraph([self.job("B", passing_fn, []),
self.job("F", raising_fn, ["B"]),
self.job("A", passing_fn, ["B"])])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual(["B", "F", "A"], self.jobs_run)
self.assertEqual("Failed jobs: F", str(cm.exception))
def test_failure_of_disconnected_job_does_not_cancel_non_dependents(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, []),
self.job("F", raising_fn, [])])
with self.assertRaises(ExecutionFailure):
self.execute(exec_graph)
self.assertEqual(["A", "F"], self.jobs_run)
def test_cycle_in_graph_causes_failure(self):
with self.assertRaises(NoRootJobError) as cm:
ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, ["A"])])
self.assertEqual(
"Unexecutable graph: All scheduled jobs have dependencies. "
"There must be a circular dependency.",
str(cm.exception))
def test_non_existent_dependency_causes_failure(self):
with self.assertRaises(UnknownJobError) as cm:
ExecutionGraph([self.job("A", passing_fn, []),
self.job("B", passing_fn, ["Z"])])
self.assertEqual("Unexecutable graph: Undefined dependencies u'Z'", str(cm.exception))
def test_on_success_callback_raises_error(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], on_success=raising_fn)])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Error in on_success for A: I'm an error", str(cm.exception))
def test_on_failure_callback_raises_error(self):
exec_graph = ExecutionGraph([self.job("A", raising_fn, [], on_failure=raising_fn)])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Error in on_failure for A: I'm an error", str(cm.exception))
def test_same_key_scheduled_twice_is_error(self):
with self.assertRaises(JobExistsError) as cm:
ExecutionGraph([self.job("Same", passing_fn, []),
self.job("Same", passing_fn, [])])
self.assertEqual("Unexecutable graph: Job already scheduled u'Same'", str(cm.exception))
|
{
"content_hash": "5de9f05573f67394e25b46ef9fb9b4ea",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 98,
"avg_line_length": 35.260416666666664,
"alnum_prop": 0.5921713441654357,
"repo_name": "digwanderlust/pants",
"id": "96375b1be138f7938d0887a21dc6d69f90a997fc",
"size": "6917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/tasks/test_execution_graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "4818"
},
{
"name": "HTML",
"bytes": "68162"
},
{
"name": "Java",
"bytes": "310901"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "7038"
},
{
"name": "Python",
"bytes": "3049918"
},
{
"name": "Scala",
"bytes": "77693"
},
{
"name": "Shell",
"bytes": "47201"
},
{
"name": "Thrift",
"bytes": "2824"
}
],
"symlink_target": ""
}
|
''' Silta STM32F407 Discovery Bridge
--- Supported Pins
I2C:
* PB6 - I2C1 SCL
* PB7 - I2C1 SDA (optional)
* PB8 - I2C1 SCL (optional)
* PB9 - I2C1 SDA
SPI:
* PA5 - SPI1 SCK
* PA6 - SPI1 MISO
* PA7 - SPI1 MOSI
ADC:
* PA0 - ADC1_0
* PA1 - ADC1_1
* PA2 - ADC1_2
* PA3 - ADC1_3
* PA4 - ADC1_4 (Will disable DAC)
* PA5 - ADC1_5 (Will disable DAC)
* PA6 - ADC1_6
* PA7 - ADC1_7
* PB0 - ADC1_8
* PB1 - ADC1_9
* PC0 - ADC1_10
* PC1 - ADC1_11
* PC2 - ADC1_12
* PC3 - ADC1_13
* PC4 - ADC1_14
* PC5 - ADC1_15
DAC:
* PA4 - DAC1
* PA5 - DAC2
PWM:
NOTE: PWM is currently locked at 10ms period, mainly for use with servos.
* PE5
* PE6
GPIO:
Most other pins in ports A-E should work as GPIOs
Notable/useful ones:
* PD12 - Green LED
* PD13 - Orange LED
* PD14 - Red LED
'''
import re
import string
import serial
PIN_RE = re.compile(r'P([A-E])([0-9]+)', re.IGNORECASE)
def _get_pin(raw_str):
''' Parse and validate pin definition
Args:
raw_str: string of the form PXY
Returns:
str, int
'''
match = PIN_RE.search(raw_str)
if not match:
raise ValueError(
'Invalid pin definition. Pins are defined as '
'PXY where X is A-E and Y is 0-15 (e.g. PB5)')
port, pin = match.groups()
pin = int(pin)
if pin > 15:
raise ValueError('Invalid pin. Should be a number from 0-15')
return port, pin
class bridge(object):
''' Silta STM32F407 Discovery Bridge '''
__pinModes = {
'input': 'in',
'output': 'outpp',
'output-od': 'outod',
'analog': 'analog'
}
__pullModes = {
'up': 'pullup',
'down': 'pulldown',
'none': 'nopull'
}
__adcs = {}
__dacs = {
'PA4': 0,
'PA5': 1
}
__pwms = {
'PE5': 0,
'PE6': 1
}
__ADC_MAX_VOLTAGE = 3.0
__ADC_MAX_VAL = 4095
__DAC_MAX_VOLTAGE = 3.0
__DAC_MAX_VAL = 4095
DEBUG = False
# Hardcoding until we can read values back from device
__CMD_MAX_STR_LEN = 4095
__SPI_MAX_BYTES = 1024
__I2C_MAX_BYTES = 1024
PIN = [1 << i for i in range(15)]
def __init__(self, serial_device, baud_rate=None):
''' Initialize Silta STM32F407 Bridge
Args:
USB serial device path (e.g. /dev/ttyACMX)
'''
self.stream = None
self.lastcspin = None
try:
self.stream = serial.Serial()
self.stream.port = serial_device
self.stream.timeout = 0.1
if baud_rate:
self.stream.baudrate = baud_rate
self.stream.open()
except OSError:
raise IOError('could not open ' + serial_device)
if self.stream:
self.stream.flush()
# Flush any remaining data in the silta's buffer
self.__send_cmd('\n')
# Get device serial number and save it
line = self.__send_cmd('sn\n')
result = line.strip().split(' ')
if result[0] == 'OK':
self.serial_number = ''.join(result[1:])
else:
self.serial_number = None
print('Warning: Could not read device serial number.')
print('You might want to update firmware on your board')
# Get device serial number and save it
line = self.__send_cmd('version\n')
result = line.strip().split(' ')
if result[0] == 'OK':
self.firmware_version = result[1]
else:
self.firmware_version = None
print('Warning: Could not read device firmware version.')
print('You might want to update firmware on your board')
def close(self):
''' Disconnect from USB-serial device. '''
self.stream.close()
# Send terminal command and wait for response
def __send_cmd(self, cmd):
if (len(cmd) + 1) > self.__CMD_MAX_STR_LEN:
raise RuntimeError('Command string too long')
self.stream.write('{}\n'.format(cmd).encode())
if self.DEBUG is True:
print('CMD : {}'.format(cmd))
line = self.stream.readline()
if self.DEBUG is True:
print('RESP: {}'.format(line))
return line.decode()
def i2c_speed(self, speed):
''' Alias of i2c1_speed method '''
return self.i2c1_speed(speed)
# Set I2C Speed
def i2c1_speed(self, speed):
''' Set I2C speed in Hz. '''
cmd = 'config i2cspeed ' + str(speed)
line = self.__send_cmd(cmd)
result = line.strip().split(' ')
if result[0] == 'OK':
return True
else:
return False
# Set I2C pins
def i2c1_pins(self, pins):
''' Set I2C1 pins on GPIOB
Args:
pins: A 32 bit integer, bitwise selection of pins to use for i2c1
peripheral
Example:
Select PB6 and PB9:
my_bridge.i2c1_pins(my_bridge.PIN[6] + my_bridge.PIN[9])
Select PB7 and PB8:
my_bridge.i2c1_pins(my_bridge.PIN[7] | my_bridge.PIN[8])
OR
my_bridge.i2c1_pins(128 | 256)
OR
my_bridge.i2c1_pins(0x180)
'''
cmd = 'config i2cpins ' + str(pins)
line = self.__send_cmd(cmd)
result = line.strip().split(' ')
if result[0] == 'OK':
return True
else:
return False
def i2c(self, addr, rlen, wbytes=[]):
''' Alias of i2c1 method '''
return self.i2c1(addr, rlen, wbytes)
# I2C Transaction (wbytes is a list of bytes to tx)
def i2c1(self, addr, rlen, wbytes=[]):
''' I2C Transaction (write-then-read)
Args:
addr: 8 bit I2C address
rlen: Number of bytes to read
wbytes: List of bytes to write
Return value:
Integer with error code
or
List with read bytes (or empty list if write-only command)
'''
if len(wbytes) > self.__I2C_MAX_BYTES:
raise ValueError('wbytes too long. Max:', self.__I2C_MAX_BYTES)
if rlen > self.__I2C_MAX_BYTES:
raise ValueError('rlen too long. Max:', self.__I2C_MAX_BYTES)
rbytes = []
cmd = 'i2c ' + format(addr, '02X') + ' ' + str(rlen)
for byte in wbytes:
cmd += format(byte, ' 02X')
line = self.__send_cmd(cmd)
result = line.strip().split(' ')
if result[0] == 'OK':
for byte in result[1:]:
rbytes.append(int(byte, 16))
else:
rbytes = int(result[1])
return rbytes
# Set the spi CS line to use on the next transaction
def __set_spi_cs(self, cspin):
''' Select SPI chip select pin for next transaction '''
# Only configure CS if we haven't already
if self.lastcspin != cspin:
self.lastcspin = cspin
port, pin = _get_pin(cspin)
cmd = 'spics ' + port + ' ' + str(pin)
line = self.__send_cmd(cmd)
result = line.strip().split(' ')
if result[0] != 'OK':
raise ValueError('Unable to configure SPI CS pin')
# SPI Transaction (wbytes is list of bytes)
def spi(self, cspin, wbytes=[]):
''' SPI Transaction
Args:
cspin: Chip/Slave select pin for transaction
wbytes: List of bytes to write out
Returns:
Integer error code
or
List of read bytes
'''
if len(wbytes) > self.__SPI_MAX_BYTES:
raise ValueError('wbytes too long. Max:', self.__SPI_MAX_BYTES)
rbytes = []
# Make sure the CS pin is selected
self.__set_spi_cs(cspin)
cmd = 'spi'
for byte in wbytes:
cmd += format(byte, ' 02X')
line = self.__send_cmd(cmd)
result = line.strip().split(' ')
if result[0] == 'OK':
for byte in result[1:]:
rbytes.append(int(byte, 16))
else:
rbytes = int(result[1])
return rbytes
def spicfg(self, speed, cpol, cpha):
''' SPI Configuration
Args:
speed: SPI Speed in Hz
Supported speeds: 42000000, 21000000, 10500000, 5250000,
2625000, 1312500, 656250, 328125
cpol: Clock polarity
cpha: Clock phase
Returns:
True for success
or
False for failure
'''
cmd = 'spicfg {} {} {}'.format(speed, int(cpol) & 1, int(cpha) & 1)
line = self.__send_cmd(cmd)
result = line.strip().split(' ')
if result[0] == 'OK':
return True
else:
return False
# Configure GPIO as input/output/etc
def gpiocfg(self, name, mode='input', pull=None):
''' GPIO Configuration
Args:
name: Pin name with format P<port><pin> (e.g. PA3, PD11, PB0)
mode: Pin mode
Available modes:
input - Digital Input
output - Push-pull output
output-od - Open drain output
analog - Analog input
pull: Pull-resistor
None (default) - No pull
up - Pull-up
down - Pull-down
'''
port, pin = _get_pin(name)
if mode not in self.__pinModes:
raise ValueError('Invalid pin mode. Valid modes: <' + string.join(
self.__pinModes.keys(), '|') + '>')
if pull is not None and pull not in self.__pullModes:
raise ValueError('Invalid pull mode. Valid modes: <' + string.join(
self.__pullModes.keys(), '|') + '>')
cmd = 'gpiocfg ' + port + ' ' + str(pin) + ' ' + self.__pinModes[mode]
if pull is not None:
cmd += ' ' + self.__pullModes[pull]
line = self.__send_cmd(cmd)
result = line.strip().split(' ')
if result[0] != 'OK':
print("Error configuring pin")
# Read/write gpio value
def gpio(self, name, value=None):
''' Read/Write GPIO (Digital only for now)
Args:
name: Pin name (e.g. PA3, PD11, PB0)
value: (If setting) - 0 or 1
Returns:
None - if set was succesful
None - if get failed
Integer - pin value
'''
port, pin = _get_pin(name)
cmd = 'gpio ' + port + ' ' + str(pin)
if value is not None:
cmd += ' ' + str(value)
line = self.__send_cmd(cmd)
result = line.strip().split(' ')
if result[0] == 'OK':
if value is not None:
return
else:
return int(result[1])
else:
return None
# 'Private' function to get an ADC number from a port + pin combination
def __adc_get_num(self, name):
''' Get ADC number from pin name '''
port, pin = _get_pin(name)
cmd = 'adcnum ' + port + ' ' + str(pin)
line = self.__send_cmd(cmd)
result = line.strip().split(' ')
if result[0] == 'OK':
self.__adcs[name] = int(result[1])
else:
self.__adcs[name] = None
# Read adc pin
def adc(self, name):
''' Read ADC pin
Args:
name: Pin name
Returns:
None - if read failed
float - Pin value in volts
'''
name = name.upper()
# Get adc number from port+pin and save it
if name not in self.__adcs:
self.__adc_get_num(name)
if self.__adcs[name] is None:
raise ValueError('Not an ADC pin')
cmd = 'adc ' + str(self.__adcs[name])
line = self.__send_cmd(cmd)
result = line.strip().split(' ')
if result[0] == 'OK':
return int(result[1]) * self.__ADC_MAX_VOLTAGE/self.__ADC_MAX_VAL
else:
return None
def dac_enable(self):
''' Enable DACs
Returns:
None - Failed setting DAC value
True - Value set successfully
'''
line = self.__send_cmd('dacenable')
result = line.strip().split(' ')
if result[0] == 'OK':
return True
else:
return None
# Set DAC output for pin
def dac(self, name, voltage):
''' Set DAC Output
Args:
name: DAC pin
voltage: Voltage setting for pin
Returns:
None - Failed setting DAC value
True - Value set successfully
'''
name = name.upper()
if name not in self.__dacs:
raise ValueError('Not a DAC pin')
if voltage > self.__DAC_MAX_VOLTAGE:
voltage = self.__DAC_MAX_VOLTAGE
dac_val = int(voltage/self.__DAC_MAX_VOLTAGE * self.__DAC_MAX_VAL)
line = self.__send_cmd('dac {} {}'.format(self.__dacs[name], dac_val))
result = line.strip().split(' ')
if result[0] == 'OK':
return True
else:
return None
# Set PWM output for pin
def pwm(self, name, duty_cycle):
''' Set PWM Output
Args:
name: PWM pin
duty_cycle: Value from 0-1
Returns:
None - Failed setting PWM value
True - Value set successfully
'''
name = name.upper()
if name not in self.__pwms:
raise ValueError('Not a PWM pin')
if duty_cycle < 0 or duty_cycle > 1:
raise ValueError('Duty cycle must be between 0 and 1')
period = 10000
val = int(period * duty_cycle)
line = self.__send_cmd('pwm {} {}'.format(self.__pwms[name], val))
result = line.strip().split(' ')
if result[0] == 'OK':
return True
else:
return None
|
{
"content_hash": "c7ee340714eec557f32fbf2a4ffce555",
"timestamp": "",
"source": "github",
"line_count": 560,
"max_line_length": 79,
"avg_line_length": 25.401785714285715,
"alnum_prop": 0.49729349736379613,
"repo_name": "alvarop/silta",
"id": "0dba9155c0a7f1beec4d4145378ff18e71292484",
"size": "14225",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sw/silta/stm32f407.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "3937527"
},
{
"name": "HTML",
"bytes": "204656"
},
{
"name": "Makefile",
"bytes": "9030"
},
{
"name": "Python",
"bytes": "34464"
}
],
"symlink_target": ""
}
|
import io
import json
import os
import unittest
from . import flag
from .fhirdate import FHIRDate
class FlagTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Flag", js["resourceType"])
return flag.Flag(js)
def testFlag1(self):
inst = self.instantiate_from("flag-example-encounter.json")
self.assertIsNotNone(inst, "Must have instantiated a Flag instance")
self.implFlag1(inst)
js = inst.as_json()
self.assertEqual("Flag", js["resourceType"])
inst2 = flag.Flag(js)
self.implFlag1(inst2)
def implFlag1(self, inst):
self.assertEqual(inst.category.coding[0].code, "infection")
self.assertEqual(inst.category.coding[0].display, "Infection Control Level")
self.assertEqual(inst.category.coding[0].system, "http://example.org/local")
self.assertEqual(inst.code.coding[0].code, "l3")
self.assertEqual(inst.code.coding[0].display, "Follow Level 3 Protocol")
self.assertEqual(inst.code.coding[0].system, "http://example.org/local/if1")
self.assertEqual(inst.id, "example-encounter")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Follow Infection Control Level 3 Protocol</div>")
self.assertEqual(inst.text.status, "generated")
def testFlag2(self):
inst = self.instantiate_from("flag-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Flag instance")
self.implFlag2(inst)
js = inst.as_json()
self.assertEqual("Flag", js["resourceType"])
inst2 = flag.Flag(js)
self.implFlag2(inst2)
def implFlag2(self, inst):
self.assertEqual(inst.category.coding[0].code, "admin")
self.assertEqual(inst.category.coding[0].display, "Admin")
self.assertEqual(inst.category.coding[0].system, "http://example.org/local")
self.assertEqual(inst.category.text, "admin")
self.assertEqual(inst.code.coding[0].code, "bigdog")
self.assertEqual(inst.code.coding[0].display, "Big dog")
self.assertEqual(inst.code.coding[0].system, "http://example.org/local")
self.assertEqual(inst.code.text, "Patient has a big dog at his home. Always always wear a suit of armor or take other active counter-measures")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertEqual(inst.period.end.date, FHIRDate("2016-12-01").date)
self.assertEqual(inst.period.end.as_json(), "2016-12-01")
self.assertEqual(inst.period.start.date, FHIRDate("2015-01-17").date)
self.assertEqual(inst.period.start.as_json(), "2015-01-17")
self.assertEqual(inst.status, "inactive")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Large Dog warning for Peter Patient</div>")
self.assertEqual(inst.text.status, "generated")
|
{
"content_hash": "d91ee370f170dd3d64a5b18da30c21b4",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 151,
"avg_line_length": 47.5735294117647,
"alnum_prop": 0.6593508500772798,
"repo_name": "all-of-us/raw-data-repository",
"id": "63f53a57f519c1d9f12ae17310dda2c472cae7d5",
"size": "3361",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/lib_fhir/fhirclient_3_0_0/models/flag_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
}
|
"""Support for RFXtrx lights."""
import logging
import voluptuous as vol
from homeassistant.components import rfxtrx
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
Light,
)
from homeassistant.const import CONF_NAME
from homeassistant.helpers import config_validation as cv
from . import (
CONF_AUTOMATIC_ADD,
CONF_DEVICES,
CONF_FIRE_EVENT,
CONF_SIGNAL_REPETITIONS,
DEFAULT_SIGNAL_REPETITIONS,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICES, default={}): {
cv.string: vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
}
)
},
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
vol.Optional(
CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS
): vol.Coerce(int),
}
)
SUPPORT_RFXTRX = SUPPORT_BRIGHTNESS
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the RFXtrx platform."""
import RFXtrx as rfxtrxmod
lights = rfxtrx.get_devices_from_config(config, RfxtrxLight)
add_entities(lights)
def light_update(event):
"""Handle light updates from the RFXtrx gateway."""
if (
not isinstance(event.device, rfxtrxmod.LightingDevice)
or not event.device.known_to_be_dimmable
):
return
new_device = rfxtrx.get_new_device(event, config, RfxtrxLight)
if new_device:
add_entities([new_device])
rfxtrx.apply_received_command(event)
# Subscribe to main RFXtrx events
if light_update not in rfxtrx.RECEIVED_EVT_SUBSCRIBERS:
rfxtrx.RECEIVED_EVT_SUBSCRIBERS.append(light_update)
class RfxtrxLight(rfxtrx.RfxtrxDevice, Light):
"""Representation of a RFXtrx light."""
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_RFXTRX
def turn_on(self, **kwargs):
"""Turn the light on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
if brightness is None:
self._brightness = 255
self._send_command("turn_on")
else:
self._brightness = brightness
_brightness = brightness * 100 // 255
self._send_command("dim", _brightness)
|
{
"content_hash": "cdad2fc2918006e66e9a942cc890b574",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 77,
"avg_line_length": 28.138297872340427,
"alnum_prop": 0.6287334593572779,
"repo_name": "fbradyirl/home-assistant",
"id": "d2d2e842c0aa8c2511cec6a42b9142fe9607c24b",
"size": "2645",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/rfxtrx/light.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
}
|
"""Base test cases for all neutron tests.
"""
import contextlib
import gc
import logging as std_logging
import os
import os.path
import random
import weakref
import eventlet.timeout
import fixtures
import mock
from oslo_concurrency.fixture import lockutils
from oslo_config import cfg
from oslo_messaging import conffixture as messaging_conffixture
from oslo_utils import strutils
import six
import testtools
from neutron.agent.linux import external_process
from neutron.callbacks import manager as registry_manager
from neutron.callbacks import registry
from neutron.common import config
from neutron.common import rpc as n_rpc
from neutron.db import agentschedulers_db
from neutron import manager
from neutron import policy
from neutron.tests import fake_notifier
from neutron.tests import post_mortem_debug
from neutron.tests import tools
CONF = cfg.CONF
CONF.import_opt('state_path', 'neutron.common.config')
LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
ROOTDIR = os.path.dirname(__file__)
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def fake_use_fatal_exceptions(*args):
return True
def fake_consume_in_threads(self):
return []
def get_rand_name(max_length=None, prefix='test'):
"""Return a random string.
The string will start with 'prefix' and will be exactly 'max_length'.
If 'max_length' is None, then exactly 8 random characters, each
hexadecimal, will be added. In case len(prefix) <= len(max_length),
ValueError will be raised to indicate the problem.
"""
if max_length:
length = max_length - len(prefix)
if length <= 0:
raise ValueError("'max_length' must be bigger than 'len(prefix)'.")
suffix = ''.join(str(random.randint(0, 9)) for i in range(length))
else:
suffix = hex(random.randint(0x10000000, 0x7fffffff))[2:]
return prefix + suffix
def bool_from_env(key, strict=False, default=False):
value = os.environ.get(key)
return strutils.bool_from_string(value, strict=strict, default=default)
def get_test_timeout(default=0):
return int(os.environ.get('OS_TEST_TIMEOUT', 0))
class AttributeDict(dict):
"""
Provide attribute access (dict.key) to dictionary values.
"""
def __getattr__(self, name):
"""Allow attribute access for all keys in the dict."""
if name in self:
return self[name]
raise AttributeError(_("Unknown attribute '%s'.") % name)
class DietTestCase(testtools.TestCase):
"""Same great taste, less filling.
BaseTestCase is responsible for doing lots of plugin-centric setup
that not all tests require (or can tolerate). This class provides
only functionality that is common across all tests.
"""
def setUp(self):
super(DietTestCase, self).setUp()
# Configure this first to ensure pm debugging support for setUp()
debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
if debugger:
self.addOnException(post_mortem_debug.get_exception_handler(
debugger))
# Make sure we see all relevant deprecation warnings when running tests
self.useFixture(tools.WarningsFixture())
if bool_from_env('OS_DEBUG'):
_level = std_logging.DEBUG
else:
_level = std_logging.INFO
capture_logs = bool_from_env('OS_LOG_CAPTURE')
if not capture_logs:
std_logging.basicConfig(format=LOG_FORMAT, level=_level)
self.log_fixture = self.useFixture(
fixtures.FakeLogger(
format=LOG_FORMAT,
level=_level,
nuke_handlers=capture_logs,
))
test_timeout = get_test_timeout()
if test_timeout == -1:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
# If someone does use tempfile directly, ensure that it's cleaned up
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.addCleanup(mock.patch.stopall)
if bool_from_env('OS_STDOUT_CAPTURE'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if bool_from_env('OS_STDERR_CAPTURE'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.addOnException(self.check_for_systemexit)
self.orig_pid = os.getpid()
def check_for_systemexit(self, exc_info):
if isinstance(exc_info[1], SystemExit):
if os.getpid() != self.orig_pid:
# Subprocess - let it just exit
raise
# This makes sys.exit(0) still a failure
self.force_failure = True
@contextlib.contextmanager
def assert_max_execution_time(self, max_execution_time=5):
with eventlet.timeout.Timeout(max_execution_time, False):
yield
return
self.fail('Execution of this test timed out')
def assertOrderedEqual(self, expected, actual):
expect_val = self.sort_dict_lists(expected)
actual_val = self.sort_dict_lists(actual)
self.assertEqual(expect_val, actual_val)
def sort_dict_lists(self, dic):
for key, value in six.iteritems(dic):
if isinstance(value, list):
dic[key] = sorted(value)
elif isinstance(value, dict):
dic[key] = self.sort_dict_lists(value)
return dic
def assertDictSupersetOf(self, expected_subset, actual_superset):
"""Checks that actual dict contains the expected dict.
After checking that the arguments are of the right type, this checks
that each item in expected_subset is in, and matches, what is in
actual_superset. Separate tests are done, so that detailed info can
be reported upon failure.
"""
if not isinstance(expected_subset, dict):
self.fail("expected_subset (%s) is not an instance of dict" %
type(expected_subset))
if not isinstance(actual_superset, dict):
self.fail("actual_superset (%s) is not an instance of dict" %
type(actual_superset))
for k, v in expected_subset.items():
self.assertIn(k, actual_superset)
self.assertEqual(v, actual_superset[k],
"Key %(key)s expected: %(exp)r, actual %(act)r" %
{'key': k, 'exp': v, 'act': actual_superset[k]})
class ProcessMonitorFixture(fixtures.Fixture):
"""Test fixture to capture and cleanup any spawn process monitor."""
def _setUp(self):
self.old_callable = (
external_process.ProcessMonitor._spawn_checking_thread)
p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor."
"_spawn_checking_thread",
new=lambda x: self.record_calls(x))
p.start()
self.instances = []
self.addCleanup(self.stop)
def stop(self):
for instance in self.instances:
instance.stop()
def record_calls(self, instance):
self.old_callable(instance)
self.instances.append(instance)
class BaseTestCase(DietTestCase):
@staticmethod
def config_parse(conf=None, args=None):
"""Create the default configurations."""
# neutron.conf includes rpc_backend which needs to be cleaned up
if args is None:
args = []
args += ['--config-file', etcdir('neutron.conf')]
if conf is None:
config.init(args=args)
else:
conf(args)
def setUp(self):
super(BaseTestCase, self).setUp()
# suppress all but errors here
capture_logs = bool_from_env('OS_LOG_CAPTURE')
self.useFixture(
fixtures.FakeLogger(
name='neutron.api.extensions',
format=LOG_FORMAT,
level=std_logging.ERROR,
nuke_handlers=capture_logs,
))
self.useFixture(lockutils.ExternalLockFixture())
cfg.CONF.set_override('state_path', self.get_default_temp_dir().path)
self.addCleanup(CONF.reset)
self.useFixture(ProcessMonitorFixture())
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.exceptions.NeutronException.use_fatal_exceptions',
fake_use_fatal_exceptions))
self.useFixture(fixtures.MonkeyPatch(
'oslo_config.cfg.find_config_files',
lambda project=None, prog=None, extension=None: []))
self.setup_rpc_mocks()
self.setup_config()
self.setup_test_registry_instance()
policy.init()
self.addCleanup(policy.reset)
def get_new_temp_dir(self):
"""Create a new temporary directory.
:returns fixtures.TempDir
"""
return self.useFixture(fixtures.TempDir())
def get_default_temp_dir(self):
"""Create a default temporary directory.
Returns the same directory during the whole test case.
:returns fixtures.TempDir
"""
if not hasattr(self, '_temp_dir'):
self._temp_dir = self.get_new_temp_dir()
return self._temp_dir
def get_temp_file_path(self, filename, root=None):
"""Returns an absolute path for a temporary file.
If root is None, the file is created in default temporary directory. It
also creates the directory if it's not initialized yet.
If root is not None, the file is created inside the directory passed as
root= argument.
:param filename: filename
:type filename: string
:param root: temporary directory to create a new file in
:type root: fixtures.TempDir
:returns absolute file path string
"""
root = root or self.get_default_temp_dir()
return root.join(filename)
def setup_rpc_mocks(self):
# don't actually start RPC listeners when testing
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.rpc.Connection.consume_in_threads',
fake_consume_in_threads))
self.useFixture(fixtures.MonkeyPatch(
'oslo_messaging.Notifier', fake_notifier.FakeNotifier))
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
# NOTE(russellb) We want all calls to return immediately.
self.messaging_conf.response_timeout = 0
self.useFixture(self.messaging_conf)
self.addCleanup(n_rpc.clear_extra_exmods)
n_rpc.add_extra_exmods('neutron.test')
self.addCleanup(n_rpc.cleanup)
n_rpc.init(CONF)
def setup_test_registry_instance(self):
"""Give a private copy of the registry to each test."""
self._callback_manager = registry_manager.CallbacksManager()
mock.patch.object(registry, '_get_callback_manager',
return_value=self._callback_manager).start()
def setup_config(self, args=None):
"""Tests that need a non-default config can override this method."""
self.config_parse(args=args)
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
CONF.set_override(k, v, group)
def setup_coreplugin(self, core_plugin=None):
cp = PluginFixture(core_plugin)
self.useFixture(cp)
self.patched_dhcp_periodic = cp.patched_dhcp_periodic
def setup_notification_driver(self, notification_driver=None):
self.addCleanup(fake_notifier.reset)
if notification_driver is None:
notification_driver = [fake_notifier.__name__]
cfg.CONF.set_override("notification_driver", notification_driver)
class PluginFixture(fixtures.Fixture):
def __init__(self, core_plugin=None):
super(PluginFixture, self).__init__()
self.core_plugin = core_plugin
def _setUp(self):
self.dhcp_periodic_p = mock.patch(
'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
'start_periodic_dhcp_agent_status_check')
self.patched_dhcp_periodic = self.dhcp_periodic_p.start()
# Plugin cleanup should be triggered last so that
# test-specific cleanup has a chance to release references.
self.addCleanup(self.cleanup_core_plugin)
if self.core_plugin is not None:
cfg.CONF.set_override('core_plugin', self.core_plugin)
def cleanup_core_plugin(self):
"""Ensure that the core plugin is deallocated."""
nm = manager.NeutronManager
if not nm.has_instance():
return
# TODO(marun) Fix plugins that do not properly initialize notifiers
agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
# Perform a check for deallocation only if explicitly
# configured to do so since calling gc.collect() after every
# test increases test suite execution time by ~50%.
check_plugin_deallocation = (
bool_from_env('OS_CHECK_PLUGIN_DEALLOCATION'))
if check_plugin_deallocation:
plugin = weakref.ref(nm._instance.plugin)
nm.clear_instance()
if check_plugin_deallocation:
gc.collect()
# TODO(marun) Ensure that mocks are deallocated?
if plugin() and not isinstance(plugin(), mock.Base):
raise AssertionError(
'The plugin for this test was not deallocated.')
|
{
"content_hash": "3132cd3912b27e30af01dd7ad6389a2e",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 79,
"avg_line_length": 34.58536585365854,
"alnum_prop": 0.636459802538787,
"repo_name": "eonpatapon/neutron",
"id": "d9b6e0b6e131260cd0cb900a6f53d97de8e29b50",
"size": "14794",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/tests/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7388312"
},
{
"name": "Shell",
"bytes": "12912"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('radio', '0050_city'),
]
operations = [
migrations.AddField(
model_name='city',
name='ems_service',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ems_service', to='radio.Agency'),
),
migrations.AddField(
model_name='city',
name='fire_service',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='fire_service', to='radio.Agency'),
),
migrations.AddField(
model_name='city',
name='police_service',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='police_service', to='radio.Agency'),
),
]
|
{
"content_hash": "089053b5a844e8bd477c8abe5d522ae6",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 154,
"avg_line_length": 35,
"alnum_prop": 0.6216748768472906,
"repo_name": "ScanOC/trunk-player",
"id": "332fcdd552fbd5079efdef461a7e32c232869f06",
"size": "1088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radio/migrations/0051_auto_20180107_1729.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5493"
},
{
"name": "Dockerfile",
"bytes": "768"
},
{
"name": "HTML",
"bytes": "47516"
},
{
"name": "JavaScript",
"bytes": "22401"
},
{
"name": "Python",
"bytes": "167619"
},
{
"name": "Shell",
"bytes": "5505"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import *
from django.forms import CheckboxSelectMultiple
from .forms import *
from comunicacion.lugar.models import *
from nested_inline.admin import NestedStackedInline, NestedModelAdmin, NestedTabularInline
from django.utils.translation import ugettext_lazy as _
# Register your models here.
class Pregunta_1_Inline(NestedTabularInline):
model = Pregunta_1
can_delete = False
extra = 1
can_delete = True
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_manytomany(self, db_field, request, **kwargs):
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
try:
a = Entrevista.objects.get(id=_identrevista)
if db_field.name == 'ubicacion':
kwargs["queryset"] = Municipio.objects.filter(departamento__id__in=[x.id for x in a.departamento.all()])
except Exception, e:
pass
else:
kwargs["queryset"] = Municipio.objects.filter(departamento__id='0')
return super(Pregunta_1_Inline, self).formfield_for_manytomany(db_field, request, **kwargs)
class Pregunta_2_Inline(NestedTabularInline):
model = Pregunta_2
extra = 1
max_num = 4
can_delete = True
class Pregunta_3_Inline(NestedTabularInline):
model = Pregunta_3
max_num = 1
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
class Pregunta_4_Inline(NestedTabularInline):
model = Pregunta_4
extra = 1
can_delete = True
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
class Pregunta_5a_Inline(NestedTabularInline):
model = Pregunta_5a
form = Pregunta_5aForm
extra = 1
can_delete = True
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_manytomany(self, db_field, request, **kwargs):
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
try:
a = Entrevista.objects.get(id=_identrevista)
if db_field.name == 'ubicacion':
kwargs["queryset"] = Municipio.objects.filter(departamento__id__in=[x.id for x in a.departamento.all()])
except Exception, e:
pass
else:
kwargs["queryset"] = Municipio.objects.filter(departamento__id='0')
return super(Pregunta_5a_Inline, self).formfield_for_manytomany(db_field, request, **kwargs)
class Pregunta_5c_nestedInline(NestedTabularInline):
model = Pregunta_5c_nested
extra = 1
max_num = 5
fk_name = 'pregunta_5c'
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
class Pregunta_5c_Inline(NestedTabularInline):
model = Pregunta_5c
inlines = [Pregunta_5c_nestedInline]
# form = Pregunta_5cForm
max_num = 2
can_delete = False
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'innovacion':
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
kwargs["queryset"] = Pregunta_5a.objects.filter(prioritizado='1',entrevistado__pk=_identrevista)
else:
kwargs["queryset"] = Pregunta_5a.objects.filter(prioritizado='2')
return super(Pregunta_5c_Inline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class Pregunta_5d_Inline(NestedTabularInline):
model = Pregunta_5d
# form = Pregunta_5dForm
max_num = 2
extra = 2
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'innovacion':
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
kwargs["queryset"] = Pregunta_5a.objects.filter(prioritizado='1',entrevistado__pk=_identrevista)
else:
kwargs["queryset"] = Pregunta_5a.objects.filter(prioritizado='2')
return super(Pregunta_5d_Inline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class Pregunta_5e_Inline(NestedTabularInline):
model = Pregunta_5e
# form = Pregunta_5eForm
max_num = 2
extra = 2
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'innovacion':
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
kwargs["queryset"] = Pregunta_5a.objects.filter(prioritizado='1',entrevistado__pk=_identrevista)
else:
kwargs["queryset"] = Pregunta_5a.objects.filter(prioritizado='2')
return super(Pregunta_5e_Inline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class Pregunta_6a_Inline(NestedTabularInline):
model = Pregunta_6a
form = Pregunta_6aForm
extra = 1
can_delete = True
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_manytomany(self, db_field, request, **kwargs):
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
try:
a = Entrevista.objects.get(id=_identrevista)
if db_field.name == 'ubicacion':
kwargs["queryset"] = Municipio.objects.filter(departamento__id__in=[x.id for x in a.departamento.all()])
except Exception, e:
pass
else:
kwargs["queryset"] = Municipio.objects.filter(departamento__id='0')
return super(Pregunta_6a_Inline, self).formfield_for_manytomany(db_field, request, **kwargs)
class Pregunta_6c_nestedInline(NestedTabularInline):
model = Pregunta_6c_nested
extra = 1
max_num = 5
fk_name = 'pregunta_6c'
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
class Pregunta_6c_Inline(NestedTabularInline):
model = Pregunta_6c
inlines = [Pregunta_6c_nestedInline]
max_num = 2
extra = 2
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'innovacion':
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
kwargs["queryset"] = Pregunta_6a.objects.filter(prioritizado='1',entrevistado__pk=_identrevista)
else:
kwargs["queryset"] = Pregunta_6a.objects.filter(prioritizado='2')
return super(Pregunta_6c_Inline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class Pregunta_6d_Inline(NestedTabularInline):
model = Pregunta_6d
max_num = 2
extra = 2
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'innovacion':
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
kwargs["queryset"] = Pregunta_6a.objects.filter(prioritizado='1',entrevistado__pk=_identrevista)
else:
kwargs["queryset"] = Pregunta_6a.objects.filter(prioritizado='2')
return super(Pregunta_6d_Inline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class Pregunta_6e_Inline(NestedTabularInline):
model = Pregunta_6e
max_num = 2
extra = 2
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'innovacion':
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
kwargs["queryset"] = Pregunta_6a.objects.filter(prioritizado='1',entrevistado__pk=_identrevista)
else:
kwargs["queryset"] = Pregunta_6a.objects.filter(prioritizado='2')
return super(Pregunta_6e_Inline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class Pregunta_7a_Inline(NestedTabularInline):
model = Pregunta_7a
extra = 1
can_delete = True
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_manytomany(self, db_field, request, **kwargs):
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
try:
a = Entrevista.objects.get(id=_identrevista)
if db_field.name == 'ubicacion':
kwargs["queryset"] = Municipio.objects.filter(departamento__id__in=[x.id for x in a.departamento.all()])
except Exception, e:
pass
else:
kwargs["queryset"] = Municipio.objects.filter(departamento__id='0')
return super(Pregunta_7a_Inline, self).formfield_for_manytomany(db_field, request, **kwargs)
class Pregunta_7b_Inline(NestedTabularInline):
model = Pregunta_7b
max_num = 1
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
class Pregunta_8_Inline(NestedTabularInline):
model = Pregunta_8
extra = 1
can_delete = True
fields = (('organizacion','territorio1'),('periodo1','profundidad1'),('tema'))
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
class Pregunta_9_Inline(NestedTabularInline):
model = Pregunta_9
extra = 7
max_num = 7
can_delete = False
# fieldsets = [
# (None, {'fields' : ('tema','prioridad','papel')}),
# ('Auto-evaluación de la capacidad de la organización', {'fields' : ('conocimiento','experiencia')}),
# ]
class Pregunta_11_Inline(NestedTabularInline):
model = Pregunta_11
extra = 7
max_num = 7
can_delete = False
class EntrevistaAdmin(NestedModelAdmin):
def queryset(self, request):
if request.user.is_superuser:
return Entrevista.objects.all()
return Entrevista.objects.filter(usuario=request.user)
def save_model(self, request, obj, form, change):
obj.usuario = request.user
obj.save()
exclude = ('usuario',)
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
fieldsets = [
(_('Informacion de la persona entrevistada'), {'fields' : (('nombre','posicion','email','organizacion','pais','departamento','telefono'),('fecha1','alcance1','tipo_estudio',))}),
]
list_display = ('nombre','posicion','organizacion','fecha1','tipo_estudio')
list_filter = ('pais',)
inlines = [Pregunta_1_Inline, Pregunta_2_Inline, Pregunta_3_Inline, Pregunta_4_Inline,
Pregunta_5a_Inline, Pregunta_5c_Inline, Pregunta_5d_Inline, Pregunta_5e_Inline,
Pregunta_6a_Inline, Pregunta_6c_Inline,Pregunta_6d_Inline,Pregunta_6e_Inline,
Pregunta_7a_Inline,Pregunta_7b_Inline,Pregunta_8_Inline,Pregunta_9_Inline,Pregunta_11_Inline]
class Media:
js = ('analisis/js/custom.js',)
css = {
'all': ('analisis/css/admin.css',)
}
# def formfield_for_manytomany(self, db_field, request, **kwargs):
# urlactual=request.get_full_path()
# urlactual=urlactual.split('/')
# if urlactual[4]=='add':
# kwargs["queryset"] = Departamento.objects.filter(pais='0')
# return super(EntrevistaAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
try:
a = Entrevista.objects.get(id=_identrevista)
if db_field.name == 'departamento':
kwargs["queryset"] = Departamento.objects.filter(pais=a.pais)
except Exception, e:
pass
return super(EntrevistaAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
# else:
# kwargs["queryset"] = Departamento.objects.filter(pais='0')
admin.site.register(Entrevista,EntrevistaAdmin)
|
{
"content_hash": "dc150b3dc719dc71a59924f15715a3b0",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 180,
"avg_line_length": 32.92602739726028,
"alnum_prop": 0.7066067565318689,
"repo_name": "ErickMurillo/ciat_plataforma",
"id": "6b88e015e0ac9a71e17bb80e32bcc47ce5028fc4",
"size": "12044",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "analysis/analysis/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "468415"
},
{
"name": "HTML",
"bytes": "1672727"
},
{
"name": "JavaScript",
"bytes": "1492281"
},
{
"name": "Python",
"bytes": "3447877"
}
],
"symlink_target": ""
}
|
"""
Created on Wed Oct 4 17:54:07 2017
@author: slerp4
"""
# import keras deep learning essentials
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.preprocessing import image
from keras.models import Model
import numpy as np
# import other api
from timeit import default_timer as timer
def vgg16_feature_extractor(img_path='dog.jpg'):
# load complete VGG-16 cnn model with weights trained on ImageNet
base_model = VGG16(weights='imagenet')
# the model below is VGG-16 with last fully-connected layer 'fc2' excluded
model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc2').output)
time_start = timer()
# Load input image and proprocessing
# img_path = 'dog.jpg' is default test image
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# Extract feature, get the output by method .predict()
features = model.predict(x)
time_end = timer()
# Print results
print('\nDone! Features of dimension {} are extracted from 2nd last FC layer.'.format(features.shape))
print('\nTime elapsed: {}'.format(time_end-time_start))
return features
|
{
"content_hash": "26702a13ad12e44c338dcd2775435197",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 106,
"avg_line_length": 31.692307692307693,
"alnum_prop": 0.7046925566343042,
"repo_name": "ywang037/delta-ntu-slerp4",
"id": "48d880400ceaab53630bf4f0940616915e4c9dea",
"size": "1283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras_implementation/my_lib/vgg16_feature_extraction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73665"
}
],
"symlink_target": ""
}
|
class MarshmallowSQLAlchemyError(Exception):
"""Base exception class from which all exceptions related to
marshmallow-sqlalchemy inherit.
"""
pass
class ModelConversionError(MarshmallowSQLAlchemyError):
"""Raised when an error occurs in converting a SQLAlchemy construct
to a marshmallow object.
"""
pass
|
{
"content_hash": "6a0777376f362f5b0516d0014d72f53e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 71,
"avg_line_length": 30.727272727272727,
"alnum_prop": 0.742603550295858,
"repo_name": "jmcarp/marshmallow-sqlalchemy",
"id": "638124b4b47e0ffd0c5ec1e5eb3b8bb7c9af1b9d",
"size": "363",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "marshmallow_sqlalchemy/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32009"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.template import Context, Template, TemplateDoesNotExist
from django.test import TestCase
from .models import Story
class MultipleIncludeTest(TestCase):
def setUp(self):
self.story1 = Story.objects.create(title='story1', category_slug='category1')
self.story2 = Story.objects.create(title='story2', category_slug='category2')
self.story3 = Story.objects.create(title='story3', category_slug='category3')
def render(self, template_content, context=None):
if context is None:
context = {}
t = Template(template_content)
c = Context(context)
return t.render(c)
def test_simple_include(self):
template_content = '{% load multiple_include %}{% multiple_include "story_category1.html" %}'
rendered = self.render(template_content)
self.assertEqual(rendered, 'story_category1.html')
def test_constant_include(self):
template_content = '{% load multiple_include %}{% multiple_include "story_category1.html" "story_category2.html" %}'
rendered = self.render(template_content)
self.assertEqual(rendered, 'story_category1.html')
template_content = '{% load multiple_include %}{% multiple_include "story_category2.html" "story_category1.html" %}'
rendered = self.render(template_content)
self.assertEqual(rendered, 'story_category2.html')
def test_fallback_include(self):
template_content = '{% load multiple_include %}{% multiple_include "story_category3.html" "story_default.html" %}'
rendered = self.render(template_content)
self.assertEqual(rendered, 'story_default.html')
template_content = '{% load multiple_include %}{% multiple_include "story_category3.html" %}'
# should fail
self.assertRaises(TemplateDoesNotExist, self.render, template_content)
def test_include(self):
template_content = '{{ "story_"|add:object.category_slug|add:".html" }}'
rendered = self.render(template_content, {'object': self.story1})
self.assertEqual(rendered, 'story_category1.html')
template_content = '{% load multiple_include %}{% with "story_"|add:object.category_slug|add:".html" as template_name %}{% multiple_include template_name "story_default.html" %}{% endwith %}'
rendered = self.render(template_content, {'object': self.story1})
self.assertEqual(rendered, 'story_category1.html')
template_content = '{% load multiple_include %}{% multiple_include "story_"|add:object.category_slug|add:".html" "story_default.html" %}'
rendered = self.render(template_content, {'object': self.story1})
self.assertEqual(rendered, 'story_category1.html')
rendered = self.render(template_content, {'object': self.story3})
self.assertEqual(rendered, 'story_default.html')
def test_with(self):
template_content = '{% load multiple_include %}{% multiple_include "story_title.html" with story=object %}'
rendered = self.render(template_content, {'object': self.story1})
self.assertEqual(rendered, 'story1')
def test_only(self):
template_content = '{% load multiple_include %}{% multiple_include "story_title.html" %}'
rendered = self.render(template_content, {'story': self.story1})
self.assertEqual(rendered, 'story1')
template_content = '{% load multiple_include %}{% multiple_include "story_title.html" only %}'
rendered = self.render(template_content, {'story': self.story1})
self.assertEqual(rendered, '')
|
{
"content_hash": "5f7526f069d6c3bd9aca19e3627fe9ed",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 199,
"avg_line_length": 46.47435897435897,
"alnum_prop": 0.6692413793103448,
"repo_name": "fcurella/django-multiple-include",
"id": "4504c3cf6d4bd5c85a79d10576ac47dbdc990fc2",
"size": "3625",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "multiple_include/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12868"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ringapp', '0017_auto_20150121_1839'),
]
operations = [
migrations.AlterModelTable(
name='comminvariance',
table='comm_property_metaproperty',
),
migrations.AlterModelTable(
name='invariance',
table='property_metaproperty',
),
]
|
{
"content_hash": "a82ecff02680158c4c1876a051816557",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 47,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.5945378151260504,
"repo_name": "rschwiebert/RingApp",
"id": "f9df674b4e85a1b27a792e9c33ef534d1f88992b",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ringapp/migrations/0018_auto_20150121_1841.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "645"
},
{
"name": "HTML",
"bytes": "112733"
},
{
"name": "JavaScript",
"bytes": "1823"
},
{
"name": "Procfile",
"bytes": "40"
},
{
"name": "Python",
"bytes": "275458"
}
],
"symlink_target": ""
}
|
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.exc import DatabaseError
from sqlalchemy.sql import func
from sqlalchemy.sql.expression import union_all
from socket import inet_aton, inet_ntoa
from struct import unpack, pack, error as struct_error
from passlib.hash import bcrypt_sha256
import datetime
import hashlib
import json
def sha512(string):
return hashlib.sha512(string).hexdigest()
def ip2long(ip):
return unpack('!i', inet_aton(ip))[0]
def long2ip(ip_int):
try:
return inet_ntoa(pack('!i', ip_int))
except struct_error:
# Backwards compatibility with old CTFd databases
return inet_ntoa(pack('!I', ip_int))
def get_standings(admin=False, count=None):
standings = []
q = db.session.query(Solves).count()
if( q > 0):
score = db.func.sum(Challenges.value).label('score')
date = db.func.max(Solves.date).label('date')
scores = db.session.query(Solves.userid.label('userid'), score, date).join(Challenges).group_by(Solves.userid)
print(score)
awards = db.session.query(Awards.userid.label('userid'), db.func.sum(Awards.value).label('score'),
db.func.max(Awards.date).label('date')) \
.group_by(Awards.userid)
results = union_all(scores, awards).alias('results')
sumscores = db.session.query(results.columns.userid, db.func.sum(results.columns.score).label('score'),
db.func.max(results.columns.date).label('date')) \
.group_by(results.columns.userid).subquery()
if admin:
db.session.query(Users.teamid.label('teamid'),
Teams.name.label('name'),
Teams.banned,
db.func.sum(sumscores.columns.score).label('score')) \
.join(sumscores, Users.id == sumscores.columns.userid) \
.join(Teams, Users.teamid == Teams.id) \
.filter(Users.banned == False) \
.order_by(score.desc(), sumscores.columns.date) \
.group_by(Teams.name)
else:
standings_query = db.session.query(Users.teamid.label('teamid'),
Teams.name.label('name'),
db.func.sum(sumscores.columns.score).label('score')) \
.join(sumscores, Users.id == sumscores.columns.userid) \
.join(Teams, Users.teamid == Teams.id) \
.filter(Teams.banned == False) \
.order_by(score.desc(), sumscores.columns.date) \
.group_by(Teams.name)
if count is None:
standings = standings_query.all()
else:
standings = standings_query.limit(count).all()
db.session.close()
return standings
db = SQLAlchemy()
class Pages(db.Model):
id = db.Column(db.Integer, primary_key=True)
route = db.Column(db.String(80), unique=True)
html = db.Column(db.Text)
def __init__(self, route, html):
self.route = route
self.html = html
def __repr__(self):
return "<Pages {0} for challenge {1}>".format(self.tag, self.chal)
class Containers(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
buildfile = db.Column(db.Text)
def __init__(self, name, buildfile):
self.name = name
self.buildfile = buildfile
def __repr__(self):
return "<Container ID:(0) {1}>".format(self.id, self.name)
class Challenges(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
description = db.Column(db.Text)
value = db.Column(db.Integer)
category = db.Column(db.String(80))
flags = db.Column(db.Text)
hidden = db.Column(db.Boolean)
def __init__(self, name, description, value, category, flags):
self.name = name
self.description = description
self.value = value
self.category = category
self.flags = json.dumps(flags)
def __repr__(self):
return '<chal %r>' % self.name
class Awards(db.Model):
id = db.Column(db.Integer, primary_key=True)
userid = db.Column(db.Integer, db.ForeignKey('users.id'))
name = db.Column(db.String(80))
description = db.Column(db.Text)
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
value = db.Column(db.Integer)
category = db.Column(db.String(80))
icon = db.Column(db.Text)
def __init__(self, userid, name, value):
self.userid = userid
self.name = name
self.value = value
def __repr__(self):
return '<award %r>' % self.name
class Tags(db.Model):
id = db.Column(db.Integer, primary_key=True)
chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))
tag = db.Column(db.String(80))
def __init__(self, chal, tag):
self.chal = chal
self.tag = tag
def __repr__(self):
return "<Tag {0} for challenge {1}>".format(self.tag, self.chal)
class Files(db.Model):
id = db.Column(db.Integer, primary_key=True)
chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))
location = db.Column(db.Text)
def __init__(self, chal, location):
self.chal = chal
self.location = location
def __repr__(self):
return "<File {0} for challenge {1}>".format(self.location, self.chal)
class Keys(db.Model):
id = db.Column(db.Integer, primary_key=True)
chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))
key_type = db.Column(db.Integer)
flag = db.Column(db.Text)
def __init__(self, chal, flag, key_type):
self.chal = chal
self.flag = flag
self.key_type = key_type
def __repr__(self):
return self.flag
class Teams(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=True)
captain = db.Column(db.Integer, db.ForeignKey('users.id'))
website = db.Column(db.String(128))
affiliation = db.Column(db.String(128))
country = db.Column(db.String(32))
bracket = db.Column(db.String(32))
banned = db.Column(db.Boolean, default=False)
def __init__(self, name, captain):
self.name = name
self.captain = captain
def __repr__(self):
return '<Team %r>' % self.name
def score(self):
users = Users.query.filter_by(teamid=self.id).all()
return sum(user.score() for user in users)
def place(self):
standings = get_standings()
# http://codegolf.stackexchange.com/a/4712
try:
i = [y[0] for y in standings].index(self.id) + 1
k = i % 10
print i, k
return "%d%s" % (i, "tsnrhtdd"[(i / 10 % 10 != 1) * (k < 4) * k::4])
except ValueError:
return 0
class Users(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=True)
email = db.Column(db.String(128), unique=True)
password = db.Column(db.String(128))
banned = db.Column(db.Boolean, default=False)
share = db.Column(db.Boolean, default=True)
verified = db.Column(db.Boolean, default=False)
admin = db.Column(db.Boolean, default=False)
joined = db.Column(db.DateTime, default=datetime.datetime.utcnow)
teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = bcrypt_sha256.encrypt(str(password))
def __repr__(self):
return '<User %r>' % self.name
def score(self):
score = db.func.sum(Challenges.value).label('score')
team = db.session.query(Solves.userid, score).join(Users).join(Challenges).filter(Users.banned == False, Users.id == self.id).group_by(Solves.userid).first()
award_score = db.func.sum(Awards.value).label('award_score')
award = db.session.query(award_score).filter_by(userid=self.id).first()
if team:
return int(team.score or 0) + int(award.award_score or 0)
else:
return 0
def place(self):
score = db.func.sum(Challenges.value).label('score')
quickest = db.func.max(Solves.date).label('quickest')
teams = db.session.query(Solves.userid).join(Users).join(Challenges).filter(Users.banned == False).group_by(Solves.userid).order_by(score.desc(), quickest).all()
#http://codegolf.stackexchange.com/a/4712
try:
i = teams.index((self.id,)) + 1
k = i % 10
return "%d%s" % (i, "tsnrhtdd"[(i / 10 % 10 != 1) * (k < 4) * k::4])
except ValueError:
return 0
class Solves(db.Model):
__table_args__ = (db.UniqueConstraint('chalid', 'userid'), {})
id = db.Column(db.Integer, primary_key=True)
chalid = db.Column(db.Integer, db.ForeignKey('challenges.id'))
userid = db.Column(db.Integer, db.ForeignKey('users.id'))
ip = db.Column(db.Integer)
flag = db.Column(db.Text)
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
user = db.relationship('Users', foreign_keys="Solves.userid", lazy='joined')
chal = db.relationship('Challenges', foreign_keys="Solves.chalid", lazy='joined')
# value = db.Column(db.Integer)
def __init__(self, chalid, userid, ip, flag):
self.ip = ip2long(ip)
self.chalid = chalid
self.userid = userid
self.flag = flag
# self.value = value
def __repr__(self):
return '<solves %r>' % self.chal
class WrongKeys(db.Model):
id = db.Column(db.Integer, primary_key=True)
chalid = db.Column(db.Integer, db.ForeignKey('challenges.id'))
userid = db.Column(db.Integer, db.ForeignKey('users.id'))
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
flag = db.Column(db.Text)
chal = db.relationship('Challenges', foreign_keys="WrongKeys.chalid", lazy='joined')
def __init__(self, userid, chalid, flag):
self.userid = userid
self.chalid = chalid
self.flag = flag
def __repr__(self):
return '<wrong %r>' % self.flag
class Tracking(db.Model):
id = db.Column(db.Integer, primary_key=True)
ip = db.Column(db.BigInteger)
user = db.Column(db.Integer, db.ForeignKey('users.id'))
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
def __init__(self, ip, user):
self.ip = ip2long(ip)
self.user = user
def __repr__(self):
return '<ip %r>' % self.user
class Config(db.Model):
id = db.Column(db.Integer, primary_key=True)
key = db.Column(db.Text)
value = db.Column(db.Text)
def __init__(self, key, value):
self.key = key
self.value = value
|
{
"content_hash": "f6a1d3bb5931d29a74a56683d957da65",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 169,
"avg_line_length": 34.24842767295598,
"alnum_prop": 0.5971903406482416,
"repo_name": "RITC3/RC3_CTFD",
"id": "7998b97b08326a53a31afe2ca149e65f514766d0",
"size": "10891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CTFd/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7231"
},
{
"name": "HTML",
"bytes": "172560"
},
{
"name": "JavaScript",
"bytes": "35241"
},
{
"name": "Python",
"bytes": "124137"
},
{
"name": "Shell",
"bytes": "564"
}
],
"symlink_target": ""
}
|
import unittest
import logging
from nose.tools import * # flake8: noqa (PEP8 asserts)
from framework.auth.core import Auth
from website import settings
import website.search.search as search
from website.search import elastic_search
from website.search.util import build_query
from website.search_migration.migrate import migrate
from tests.base import OsfTestCase
from tests.test_features import requires_search
from tests.factories import (
UserFactory, ProjectFactory, NodeFactory,
UnregUserFactory, UnconfirmedUserFactory
)
@requires_search
class SearchTestCase(OsfTestCase):
def tearDown(self):
super(SearchTestCase, self).tearDown()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
def setUp(self):
super(SearchTestCase, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
def query(term):
results = search.search(build_query(term), index=elastic_search.INDEX)
return results
def query_user(name):
term = 'category:user AND "{}"'.format(name)
return query(term)
@requires_search
class TestUserUpdate(SearchTestCase):
def setUp(self):
super(TestUserUpdate, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = UserFactory(fullname='David Bowie')
def test_new_user(self):
# Verify that user has been added to Elastic Search
docs = query_user(self.user.fullname)['results']
assert_equal(len(docs), 1)
def test_new_user_unconfirmed(self):
user = UnconfirmedUserFactory()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 0)
token = user.get_confirmation_token(user.username)
user.confirm_email(token)
user.save()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 1)
def test_change_name(self):
"""Add a user, change her name, and verify that only the new name is
found in search.
"""
user = UserFactory(fullname='Barry Mitchell')
fullname_original = user.fullname
user.fullname = user.fullname[::-1]
user.save()
docs_original = query_user(fullname_original)['results']
assert_equal(len(docs_original), 0)
docs_current = query_user(user.fullname)['results']
assert_equal(len(docs_current), 1)
def test_disabled_user(self):
"""Test that disabled users are not in search index"""
user = UserFactory(fullname='Bettie Page')
user.save()
# Ensure user is in search index
assert_equal(len(query_user(user.fullname)['results']), 1)
# Disable the user
user.is_disabled = True
user.save()
# Ensure user is not in search index
assert_equal(len(query_user(user.fullname)['results']), 0)
def test_merged_user(self):
user = UserFactory(fullname='Annie Lennox')
merged_user = UserFactory(fullname='Lisa Stansfield')
user.save()
merged_user.save()
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 1)
user.merge_user(merged_user)
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 0)
def test_employment(self):
user = UserFactory(fullname='Helga Finn')
user.save()
institution = 'Finn\'s Fine Filers'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.jobs.append({
'institution': institution,
'title': 'The Big Finn',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_education(self):
user = UserFactory(fullname='Henry Johnson')
user.save()
institution = 'Henry\'s Amazing School!!!'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.schools.append({
'institution': institution,
'degree': 'failed all classes',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_name_fields(self):
names = ['Bill Nye', 'William', 'the science guy', 'Sanford', 'the Great']
user = UserFactory(fullname=names[0])
user.given_name = names[1]
user.middle_names = names[2]
user.family_name = names[3]
user.suffix = names[4]
user.save()
docs = [query_user(name)['results'] for name in names]
assert_equal(sum(map(len, docs)), len(docs)) # 1 result each
assert_true(all([user._id == doc[0]['id'] for doc in docs]))
@requires_search
class TestProject(SearchTestCase):
def setUp(self):
super(TestProject, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = UserFactory(fullname='John Deacon')
self.project = ProjectFactory(title='Red Special', creator=self.user)
def test_new_project_private(self):
"""Verify that a private project is not present in Elastic Search.
"""
docs = query(self.project.title)['results']
assert_equal(len(docs), 0)
def test_make_public(self):
"""Make project public, and verify that it is present in Elastic
Search.
"""
self.project.set_privacy('public')
docs = query(self.project.title)['results']
assert_equal(len(docs), 1)
@requires_search
class TestPublicNodes(SearchTestCase):
def setUp(self):
super(TestPublicNodes, self).setUp()
self.user = UserFactory(usename='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(
title=self.title,
creator=self.user,
is_public=True,
)
self.component = NodeFactory(
parent=self.project,
title=self.title,
creator=self.user,
is_public=True
)
self.registration = ProjectFactory(
title=self.title,
creator=self.user,
is_public=True,
is_registration=True
)
def test_make_private(self):
"""Make project public, then private, and verify that it is not present
in search.
"""
self.project.set_privacy('private')
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
self.component.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
self.registration.set_privacy('private')
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_public_parent_title(self):
self.project.set_title('hello & world', self.consolidate_auth)
self.project.save()
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], 'hello & world')
assert_true(docs[0]['parent_url'])
def test_make_parent_private(self):
"""Make parent of component, public, then private, and verify that the
component still appears but doesn't link to the parent in search.
"""
self.project.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], '-- private project --')
assert_false(docs[0]['parent_url'])
def test_delete_project(self):
"""
"""
self.component.remove_node(self.consolidate_auth)
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
self.project.remove_node(self.consolidate_auth)
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_change_title(self):
"""
"""
title_original = self.project.title
self.project.set_title(
'Blue Ordinary', self.consolidate_auth, save=True)
docs = query('category:project AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:project AND ' + self.project.title)['results']
assert_equal(len(docs), 1)
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.project.add_tag(tag, self.consolidate_auth, save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
self.project.remove_tag(tag, self.consolidate_auth, save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_update_wiki(self):
"""Add text to a wiki page, then verify that project is found when
searching for wiki text.
"""
wiki_content = {
'home': 'Hammer to fall',
'swag': '#YOLO'
}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
self.project.update_node_wiki(
key, value, self.consolidate_auth,
)
docs = query(value)['results']
assert_equal(len(docs), 1)
def test_clear_wiki(self):
"""Add wiki text to page, then delete, then verify that project is not
found when searching for wiki text.
"""
wiki_content = 'Hammer to fall'
self.project.update_node_wiki(
'home', wiki_content, self.consolidate_auth,
)
self.project.update_node_wiki('home', '', self.consolidate_auth)
docs = query(wiki_content)['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
"""Add a contributor, then verify that project is found when searching
for contributor.
"""
user2 = UserFactory(fullname='Adam Lambert')
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
self.project.add_contributor(user2, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
"""Add and remove a contributor, then verify that project is not found
when searching for contributor.
"""
user2 = UserFactory(fullname='Brian May')
self.project.add_contributor(user2, save=True)
self.project.remove_contributor(user2, self.consolidate_auth)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = UserFactory(fullname='Brian May')
self.project.add_contributor(user2)
self.project.set_visible(user2, False, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
self.project.set_visible(user2, True, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_wrong_order_search(self):
title_parts = self.title.split(' ')
title_parts.reverse()
title_search = ' '.join(title_parts)
docs = query(title_search)['results']
assert_equal(len(docs), 3)
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
docs = query(self.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
@requires_search
class TestAddContributor(SearchTestCase):
"""Tests of the search.search_contributor method
"""
def setUp(self):
super(TestAddContributor, self).setUp()
self.name1 = 'Roger1 Taylor1'
self.name2 = 'John2 Deacon2'
self.user = UserFactory(fullname=self.name1)
def test_unreg_users_dont_show_in_search(self):
unreg = UnregUserFactory()
contribs = search.search_contributor(unreg.fullname)
assert_equal(len(contribs['users']), 0)
def test_unreg_users_do_show_on_projects(self):
unreg = UnregUserFactory(fullname='Robert Paulson')
self.project = ProjectFactory(
title='Glamour Rock',
creator=unreg,
is_public=True,
)
results = query(unreg.fullname)['results']
assert_equal(len(results), 1)
def test_search_fullname(self):
"""Verify that searching for full name yields exactly one result.
"""
contribs = search.search_contributor(self.name1)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2)
assert_equal(len(contribs['users']), 0)
def test_search_firstname(self):
"""Verify that searching for first name yields exactly one result.
"""
contribs = search.search_contributor(self.name1.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial(self):
"""Verify that searching for part of first name yields exactly one
result.
"""
contribs = search.search_contributor(self.name1.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
class TestSearchExceptions(OsfTestCase):
"""
Verify that the correct exception is thrown when the connection is lost
"""
@classmethod
def setUpClass(cls):
logging.getLogger('website.project.model').setLevel(logging.CRITICAL)
super(TestSearchExceptions, cls).setUpClass()
if settings.SEARCH_ENGINE == 'elastic':
cls._es = search.search_engine.es
search.search_engine.es = None
@classmethod
def tearDownClass(cls):
super(TestSearchExceptions, cls).tearDownClass()
if settings.SEARCH_ENGINE == 'elastic':
search.search_engine.es = cls._es
def test_connection_error(self):
# Ensures that saving projects/users doesn't break as a result of connection errors
self.user = UserFactory(usename='Doug Bogie')
self.project = ProjectFactory(
title="Tom Sawyer",
creator=self.user,
is_public=True,
)
self.user.save()
self.project.save()
class TestSearchMigration(SearchTestCase):
# Verify that the correct indices are created/deleted during migration
@classmethod
def tearDownClass(cls):
super(TestSearchMigration, cls).tearDownClass()
search.create_index(settings.ELASTIC_INDEX)
def setUp(self):
super(TestSearchMigration, self).setUp()
self.es = search.search_engine.es
search.delete_index(settings.ELASTIC_INDEX)
search.create_index(settings.ELASTIC_INDEX)
self.user = UserFactory(fullname='David Bowie')
self.project = ProjectFactory(
title=settings.ELASTIC_INDEX,
creator=self.user,
is_public=True
)
def test_first_migration_no_delete(self):
migrate(delete=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_no_delete(self):
for n in xrange(1, 21):
migrate(delete=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_first_migration_with_delete(self):
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_with_delete(self):
for n in xrange(1, 21, 2):
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n + 1)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
assert not var.get(settings.ELASTIC_INDEX + '_v{}'.format(n))
|
{
"content_hash": "3d6d68ac01607488c58264b5c33e2ec5",
"timestamp": "",
"source": "github",
"line_count": 518,
"max_line_length": 121,
"avg_line_length": 34.465250965250966,
"alnum_prop": 0.6164230101383521,
"repo_name": "revanthkolli/osf.io",
"id": "0c9aeb20c0aa8254d8c8cbfeb4fa3f5011298d6e",
"size": "17853",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/test_elastic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "84010"
},
{
"name": "HTML",
"bytes": "16454"
},
{
"name": "JavaScript",
"bytes": "971670"
},
{
"name": "Mako",
"bytes": "470511"
},
{
"name": "Python",
"bytes": "2641374"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
}
|
"""service-management describe command."""
from googlecloudsdk.api_lib.service_management import services_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.service_management import arg_parsers
from googlecloudsdk.command_lib.service_management import common_flags
class Describe(base.DescribeCommand):
"""Describes a service given a service name."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
common_flags.producer_service_flag(suffix='to describe').AddToParser(parser)
def Run(self, args):
"""Run 'service-management describe'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
The response from the Get API call.
"""
messages = services_util.GetMessagesModule()
client = services_util.GetClientInstance()
service = arg_parsers.GetServiceNameFromArg(args.service)
request = messages.ServicemanagementServicesGetRequest(
serviceName=service,)
return client.services.Get(request)
def Collection(self):
return services_util.SERVICES_COLLECTION
|
{
"content_hash": "130f3ad7f40894f127f5443bc5557502",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 80,
"avg_line_length": 30.818181818181817,
"alnum_prop": 0.7315634218289085,
"repo_name": "KaranToor/MA450",
"id": "8b60631f649bb85dc5a9db1c32866092a610c7af",
"size": "1952",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/surface/service_management/describe.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
}
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="adafruit-circuitpython-hid",
use_scm_version=True,
setup_requires=["setuptools_scm"],
description="CircuitPython helper library for simulating HID devices.",
long_description=long_description,
long_description_content_type="text/x-rst",
# The project's main homepage.
url="https://github.com/adafruit/Adafruit_CircuitPython_HID",
# Author details
author="Adafruit Industries",
author_email="circuitpython@adafruit.com",
install_requires=["Adafruit-Blinka"],
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Hardware",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
# What does your project relate to?
keywords="adafruit hid human interface device keyboard mouse keycode keypad"
"hardware micropython circuitpython",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=["adafruit_hid"],
)
|
{
"content_hash": "998cb33390417b0909504a1e3891485e",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 80,
"avg_line_length": 34.716981132075475,
"alnum_prop": 0.6885869565217392,
"repo_name": "dhalbert/Adafruit_CircuitPython_HID",
"id": "0278044bca9e8428765af242919bd90137229d70",
"size": "1937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43706"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2017-2022, Vanessa Sochat
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from expfactory.logger import bot
from expfactory.defaults import EXPFACTORY_LIBRARY
import requests
def main(args, parser, subparser):
response = requests.get(EXPFACTORY_LIBRARY)
if response.status_code == 200:
library = response.json()
bot.info("Experiments")
rows = []
for experiment in library:
rows.append([experiment["name"], experiment["github"]])
bot.table(rows)
|
{
"content_hash": "85e26f860ea36b364d5716309aa263d6",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 39.673469387755105,
"alnum_prop": 0.7731481481481481,
"repo_name": "expfactory/expfactory",
"id": "5be9c3b1a67a33694d9c8a6c2f92f8549ba34f7c",
"size": "1944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expfactory/cli/list.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11293"
},
{
"name": "Dockerfile",
"bytes": "2058"
},
{
"name": "HTML",
"bytes": "53042"
},
{
"name": "JavaScript",
"bytes": "8431"
},
{
"name": "Python",
"bytes": "171261"
},
{
"name": "Shell",
"bytes": "13400"
},
{
"name": "Singularity",
"bytes": "2154"
},
{
"name": "TeX",
"bytes": "24350"
}
],
"symlink_target": ""
}
|
import os
from oslo_concurrency import processutils
from nova import exception as nova_exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
SOURCE_PROTOCOL = 'quobyte'
SOURCE_TYPE = 'file'
DRIVER_CACHE = 'none'
DRIVER_IO = 'native'
def mount_volume(volume, mnt_base, configfile=None):
"""Wraps execute calls for mounting a Quobyte volume"""
fileutils.ensure_tree(mnt_base)
command = ['mount.quobyte', volume, mnt_base]
if configfile:
command.extend(['-c', configfile])
LOG.debug('Mounting volume %s at mount point %s ...',
volume,
mnt_base)
# Run mount command but do not fail on already mounted exit code
utils.execute(*command, check_exit_code=[0, 4])
LOG.info(_LI('Mounted volume: %s'), volume)
def umount_volume(mnt_base):
"""Wraps execute calls for unmouting a Quobyte volume"""
try:
utils.execute('umount.quobyte', mnt_base)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in exc.message:
LOG.error(_LE("The Quobyte volume at %s is still in use."),
mnt_base)
else:
LOG.exception(_LE("Couldn't unmount the Quobyte Volume at %s"),
mnt_base)
def validate_volume(mnt_base):
"""Wraps execute calls for checking validity of a Quobyte volume"""
command = ['getfattr', "-n", "quobyte.info", mnt_base]
try:
utils.execute(*command)
except processutils.ProcessExecutionError as exc:
msg = (_("The mount %(mount_path)s is not a valid"
" Quobyte volume. Error: %(exc)s")
% {'mount_path': mnt_base, 'exc': exc})
raise nova_exception.NovaException(msg)
if not os.access(mnt_base, os.W_OK | os.X_OK):
msg = (_LE("Volume is not writable. Please broaden the file"
" permissions. Mount: %s") % mnt_base)
raise nova_exception.NovaException(msg)
|
{
"content_hash": "abf1eb624b933dc9627f6de055444163",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 75,
"avg_line_length": 33.01538461538462,
"alnum_prop": 0.6374650512581547,
"repo_name": "sajeeshcs/nested_quota_final",
"id": "a4ed046cc02fd5e482475a37fcd922f71062fd17",
"size": "2778",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/quobyte.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "5941"
},
{
"name": "Python",
"bytes": "15636233"
},
{
"name": "Shell",
"bytes": "17729"
},
{
"name": "XML",
"bytes": "45372"
}
],
"symlink_target": ""
}
|
"""
util tests
"""
import os
import stat
import sys
import time
import shutil
import tempfile
import pytest
from mock import Mock, patch
from pip.exceptions import HashMismatch, HashMissing, InstallationError
from pip.utils import (egg_link_path, get_installed_distributions,
untar_file, unzip_file, rmtree, normalize_path)
from pip.utils.hashes import Hashes, MissingHashes
from pip.operations.freeze import freeze_excludes
from pip._vendor.six import BytesIO
class Tests_EgglinkPath:
"util.egg_link_path() tests"
def setup(self):
project = 'foo'
self.mock_dist = Mock(project_name=project)
self.site_packages = 'SITE_PACKAGES'
self.user_site = 'USER_SITE'
self.user_site_egglink = os.path.join(
self.user_site,
'%s.egg-link' % project
)
self.site_packages_egglink = os.path.join(
self.site_packages,
'%s.egg-link' % project,
)
# patches
from pip import utils
self.old_site_packages = utils.site_packages
self.mock_site_packages = utils.site_packages = 'SITE_PACKAGES'
self.old_running_under_virtualenv = utils.running_under_virtualenv
self.mock_running_under_virtualenv = utils.running_under_virtualenv = \
Mock()
self.old_virtualenv_no_global = utils.virtualenv_no_global
self.mock_virtualenv_no_global = utils.virtualenv_no_global = Mock()
self.old_user_site = utils.user_site
self.mock_user_site = utils.user_site = self.user_site
from os import path
self.old_isfile = path.isfile
self.mock_isfile = path.isfile = Mock()
def teardown(self):
from pip import utils
utils.site_packages = self.old_site_packages
utils.running_under_virtualenv = self.old_running_under_virtualenv
utils.virtualenv_no_global = self.old_virtualenv_no_global
utils.user_site = self.old_user_site
from os import path
path.isfile = self.old_isfile
def eggLinkInUserSite(self, egglink):
return egglink == self.user_site_egglink
def eggLinkInSitePackages(self, egglink):
return egglink == self.site_packages_egglink
# ####################### #
# # egglink in usersite # #
# ####################### #
def test_egglink_in_usersite_notvenv(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = False
self.mock_isfile.side_effect = self.eggLinkInUserSite
assert egg_link_path(self.mock_dist) == self.user_site_egglink
def test_egglink_in_usersite_venv_noglobal(self):
self.mock_virtualenv_no_global.return_value = True
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.side_effect = self.eggLinkInUserSite
assert egg_link_path(self.mock_dist) is None
def test_egglink_in_usersite_venv_global(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.side_effect = self.eggLinkInUserSite
assert egg_link_path(self.mock_dist) == self.user_site_egglink
# ####################### #
# # egglink in sitepkgs # #
# ####################### #
def test_egglink_in_sitepkgs_notvenv(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = False
self.mock_isfile.side_effect = self.eggLinkInSitePackages
assert egg_link_path(self.mock_dist) == self.site_packages_egglink
def test_egglink_in_sitepkgs_venv_noglobal(self):
self.mock_virtualenv_no_global.return_value = True
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.side_effect = self.eggLinkInSitePackages
assert egg_link_path(self.mock_dist) == self.site_packages_egglink
def test_egglink_in_sitepkgs_venv_global(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.side_effect = self.eggLinkInSitePackages
assert egg_link_path(self.mock_dist) == self.site_packages_egglink
# ################################## #
# # egglink in usersite & sitepkgs # #
# ################################## #
def test_egglink_in_both_notvenv(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = False
self.mock_isfile.return_value = True
assert egg_link_path(self.mock_dist) == self.user_site_egglink
def test_egglink_in_both_venv_noglobal(self):
self.mock_virtualenv_no_global.return_value = True
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.return_value = True
assert egg_link_path(self.mock_dist) == self.site_packages_egglink
def test_egglink_in_both_venv_global(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.return_value = True
assert egg_link_path(self.mock_dist) == self.site_packages_egglink
# ############## #
# # no egglink # #
# ############## #
def test_noegglink_in_sitepkgs_notvenv(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = False
self.mock_isfile.return_value = False
assert egg_link_path(self.mock_dist) is None
def test_noegglink_in_sitepkgs_venv_noglobal(self):
self.mock_virtualenv_no_global.return_value = True
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.return_value = False
assert egg_link_path(self.mock_dist) is None
def test_noegglink_in_sitepkgs_venv_global(self):
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.return_value = False
assert egg_link_path(self.mock_dist) is None
@patch('pip.utils.dist_in_usersite')
@patch('pip.utils.dist_is_local')
@patch('pip.utils.dist_is_editable')
class Tests_get_installed_distributions:
"""test util.get_installed_distributions"""
workingset = [
Mock(test_name="global"),
Mock(test_name="editable"),
Mock(test_name="normal"),
Mock(test_name="user"),
]
workingset_stdlib = [
Mock(test_name='normal', key='argparse'),
Mock(test_name='normal', key='wsgiref')
]
workingset_freeze = [
Mock(test_name='normal', key='pip'),
Mock(test_name='normal', key='setuptools'),
Mock(test_name='normal', key='distribute')
]
def dist_is_editable(self, dist):
return dist.test_name == "editable"
def dist_is_local(self, dist):
return dist.test_name != "global" and dist.test_name != 'user'
def dist_in_usersite(self, dist):
return dist.test_name == "user"
@patch('pip._vendor.pkg_resources.working_set', workingset)
def test_editables_only(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions(editables_only=True)
assert len(dists) == 1, dists
assert dists[0].test_name == "editable"
@patch('pip._vendor.pkg_resources.working_set', workingset)
def test_exclude_editables(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions(include_editables=False)
assert len(dists) == 1
assert dists[0].test_name == "normal"
@patch('pip._vendor.pkg_resources.working_set', workingset)
def test_include_globals(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions(local_only=False)
assert len(dists) == 4
@patch('pip._vendor.pkg_resources.working_set', workingset)
def test_user_only(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions(local_only=False,
user_only=True)
assert len(dists) == 1
assert dists[0].test_name == "user"
@pytest.mark.skipif("sys.version_info >= (2,7)")
@patch('pip._vendor.pkg_resources.working_set', workingset_stdlib)
def test_py26_excludes(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions()
assert len(dists) == 1
assert dists[0].key == 'argparse'
@pytest.mark.skipif("sys.version_info < (2,7)")
@patch('pip._vendor.pkg_resources.working_set', workingset_stdlib)
def test_gte_py27_excludes(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions()
assert len(dists) == 0
@patch('pip._vendor.pkg_resources.working_set', workingset_freeze)
def test_freeze_excludes(self, mock_dist_is_editable,
mock_dist_is_local,
mock_dist_in_usersite):
mock_dist_is_editable.side_effect = self.dist_is_editable
mock_dist_is_local.side_effect = self.dist_is_local
mock_dist_in_usersite.side_effect = self.dist_in_usersite
dists = get_installed_distributions(skip=freeze_excludes)
assert len(dists) == 0
class TestUnpackArchives(object):
"""
test_tar.tgz/test_tar.zip have content as follows engineered to confirm 3
things:
1) confirm that reg files, dirs, and symlinks get unpacked
2) permissions are not preserved (and go by the 022 umask)
3) reg files with *any* execute perms, get chmod +x
file.txt 600 regular file
symlink.txt 777 symlink to file.txt
script_owner.sh 700 script where owner can execute
script_group.sh 610 script where group can execute
script_world.sh 601 script where world can execute
dir 744 directory
dir/dirfile 622 regular file
"""
def setup(self):
self.tempdir = tempfile.mkdtemp()
self.old_mask = os.umask(0o022)
self.symlink_expected_mode = None
def teardown(self):
os.umask(self.old_mask)
shutil.rmtree(self.tempdir, ignore_errors=True)
def mode(self, path):
return stat.S_IMODE(os.stat(path).st_mode)
def confirm_files(self):
# expections based on 022 umask set above and the unpack logic that
# sets execute permissions, not preservation
for fname, expected_mode, test in [
('file.txt', 0o644, os.path.isfile),
('symlink.txt', 0o644, os.path.isfile),
('script_owner.sh', 0o755, os.path.isfile),
('script_group.sh', 0o755, os.path.isfile),
('script_world.sh', 0o755, os.path.isfile),
('dir', 0o755, os.path.isdir),
(os.path.join('dir', 'dirfile'), 0o644, os.path.isfile)]:
path = os.path.join(self.tempdir, fname)
if path.endswith('symlink.txt') and sys.platform == 'win32':
# no symlinks created on windows
continue
assert test(path), path
if sys.platform == 'win32':
# the permissions tests below don't apply in windows
# due to os.chmod being a noop
continue
mode = self.mode(path)
assert mode == expected_mode, (
"mode: %s, expected mode: %s" % (mode, expected_mode)
)
def test_unpack_tgz(self, data):
"""
Test unpacking a *.tgz, and setting execute permissions
"""
test_file = data.packages.join("test_tar.tgz")
untar_file(test_file, self.tempdir)
self.confirm_files()
# Check the timestamp of an extracted file
file_txt_path = os.path.join(self.tempdir, 'file.txt')
mtime = time.gmtime(os.stat(file_txt_path).st_mtime)
assert mtime[0:6] == (2013, 8, 16, 5, 13, 37), mtime
def test_unpack_zip(self, data):
"""
Test unpacking a *.zip, and setting execute permissions
"""
test_file = data.packages.join("test_zip.zip")
unzip_file(test_file, self.tempdir)
self.confirm_files()
class Failer:
def __init__(self, duration=1):
self.succeed_after = time.time() + duration
def call(self, *args, **kw):
"""Fail with OSError self.max_fails times"""
if time.time() < self.succeed_after:
raise OSError("Failed")
def test_rmtree_retries(tmpdir, monkeypatch):
"""
Test pip.utils.rmtree will retry failures
"""
monkeypatch.setattr(shutil, 'rmtree', Failer(duration=1).call)
rmtree('foo')
def test_rmtree_retries_for_3sec(tmpdir, monkeypatch):
"""
Test pip.utils.rmtree will retry failures for no more than 3 sec
"""
monkeypatch.setattr(shutil, 'rmtree', Failer(duration=5).call)
with pytest.raises(OSError):
rmtree('foo')
class Test_normalize_path(object):
# Technically, symlinks are possible on Windows, but you need a special
# permission bit to create them, and Python 2 doesn't support it anyway, so
# it's easiest just to skip this test on Windows altogether.
@pytest.mark.skipif("sys.platform == 'win32'")
def test_resolve_symlinks(self, tmpdir):
print(type(tmpdir))
print(dir(tmpdir))
orig_working_dir = os.getcwd()
os.chdir(tmpdir)
try:
d = os.path.join('foo', 'bar')
f = os.path.join(d, 'file1')
os.makedirs(d)
with open(f, 'w'): # Create the file
pass
os.symlink(d, 'dir_link')
os.symlink(f, 'file_link')
assert normalize_path(
'dir_link/file1', resolve_symlinks=True
) == os.path.join(tmpdir, f)
assert normalize_path(
'dir_link/file1', resolve_symlinks=False
) == os.path.join(tmpdir, 'dir_link', 'file1')
assert normalize_path(
'file_link', resolve_symlinks=True
) == os.path.join(tmpdir, f)
assert normalize_path(
'file_link', resolve_symlinks=False
) == os.path.join(tmpdir, 'file_link')
finally:
os.chdir(orig_working_dir)
class TestHashes(object):
"""Tests for pip.utils.hashes"""
def test_success(self, tmpdir):
"""Make sure no error is raised when at least one hash matches.
Test check_against_path because it calls everything else.
"""
file = tmpdir / 'to_hash'
file.write('hello')
hashes = Hashes({
'sha256': ['2cf24dba5fb0a30e26e83b2ac5b9e29e'
'1b161e5c1fa7425e73043362938b9824'],
'sha224': ['wrongwrong'],
'md5': ['5d41402abc4b2a76b9719d911017c592']})
hashes.check_against_path(file)
def test_failure(self):
"""Hashes should raise HashMismatch when no hashes match."""
hashes = Hashes({'sha256': ['wrongwrong']})
with pytest.raises(HashMismatch):
hashes.check_against_file(BytesIO(b'hello'))
def test_missing_hashes(self):
"""MissingHashes should raise HashMissing when any check is done."""
with pytest.raises(HashMissing):
MissingHashes().check_against_file(BytesIO(b'hello'))
def test_unknown_hash(self):
"""Hashes should raise InstallationError when it encounters an unknown
hash."""
hashes = Hashes({'badbad': ['dummy']})
with pytest.raises(InstallationError):
hashes.check_against_file(BytesIO(b'hello'))
def test_non_zero(self):
"""Test that truthiness tests tell whether any known-good hashes
exist."""
assert Hashes({'sha256': 'dummy'})
assert not Hashes()
assert not Hashes({})
|
{
"content_hash": "94d71e3ad65fcd8f0b831d4d06cd27db",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 79,
"avg_line_length": 39.29398663697105,
"alnum_prop": 0.6140679022841921,
"repo_name": "willingc/pip",
"id": "8c3ec2477028a8be61f462115a87a8e2c18cd2fa",
"size": "17643",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/unit/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "860358"
},
{
"name": "Shell",
"bytes": "2201"
}
],
"symlink_target": ""
}
|
from django.conf import settings
class UrlUtils():
@staticmethod
def addPathToOwnURL(path):
path = str(path)
return "https://"+settings.ROOT_USERNAME+":"+settings.ROOT_PASSWORD+"@"+settings.VTAM_IP+":"+settings.VTAM_PORT+path
@staticmethod
def getOwnCallbackURL():
return UrlUtils.addPathToOwnURL("/xmlrpc/agent")
|
{
"content_hash": "655ad98a2cc42bc6e468b358bff0c675",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 118,
"avg_line_length": 25.384615384615383,
"alnum_prop": 0.7363636363636363,
"repo_name": "ict-felix/stack",
"id": "3a7e8543400151fb89d002d2f9076fe66486d47c",
"size": "464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vt_manager_kvm/src/python/vt_manager_kvm/utils/UrlUtils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11581"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "8073455"
},
{
"name": "Shell",
"bytes": "259720"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from h2o.automl import H2OAutoML
from h2o.exceptions import H2OValueError
from tests import pyunit_utils as pu
def import_dataset(seed=0):
df = h2o.import_file(path=pu.locate("smalldata/prostate/prostate.csv"))
target = "CAPSULE"
df[target] = df[target].asfactor()
fr = df.split_frame(ratios=[.8,.1], seed=seed)
return pu.ns(train=fr[0], valid=fr[1], test=fr[2], target=target, target_idx=1)
def test_params_can_be_set_as_attributes():
aml = H2OAutoML()
aml.max_models = 4
aml.seed = 42
aml.nfolds = 0
ds = import_dataset()
aml.train(y=ds.target, training_frame=ds.train, validation_frame=ds.valid)
assert aml.leaderboard.nrows == aml.max_models == 4
assert aml.project_name is not None
def test_params_are_validated_in_setter():
aml = H2OAutoML()
try:
aml.nfolds = 1
assert False, "should have raised"
except AssertionError as e:
assert aml.nfolds == 5, "nfolds should have remained to default value"
assert "nfolds set to 1; use nfolds >=2 if you want cross-validated metrics and Stacked Ensembles or use nfolds = 0 to disable." == str(e)
aml.nfolds = 3
assert aml.nfolds == 3
def test_non_train_params_are_frozen_after_first_train():
aml = H2OAutoML(max_models=2, nfolds=3, seed=42, keep_cross_validation_predictions=True)
ds = import_dataset()
aml.train(y=ds.target, training_frame=ds.train, validation_frame=ds.valid)
assert aml.leaderboard.nrows == aml.max_models+1 == 3 # only 1 SE as we have only one type of models
assert aml.leaderboard.columns[1] == 'auc'
try:
aml.nfolds = 0
assert False, "should have raised"
except H2OValueError as e:
assert "Param ``nfolds`` can not be modified after the first call to ``train``." == str(e)
assert aml.nfolds == 3
try:
aml.seed = 24
assert False, "should have raised"
except H2OValueError as e:
assert "Param ``seed`` can not be modified after the first call to ``train``." == str(e)
assert aml.seed == 42
assert aml.sort_metric == 'AUTO'
aml.sort_metric = 'logloss'
aml.train(y=ds.target, training_frame=ds.train, validation_frame=ds.valid)
print(aml.leaderboard)
assert aml.leaderboard.nrows == (aml.max_models+1)*2 == 6
assert aml.leaderboard.columns[1] == 'logloss'
pu.run_tests([
test_params_can_be_set_as_attributes,
test_params_are_validated_in_setter,
test_non_train_params_are_frozen_after_first_train,
])
|
{
"content_hash": "3c5533552c0bea79750601f4ecdc023c",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 146,
"avg_line_length": 34.103896103896105,
"alnum_prop": 0.6565118050266565,
"repo_name": "michalkurka/h2o-3",
"id": "4a12626a242a94a98adbcccc61c17949a824194b",
"size": "2626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_algos/automl/pyunit_automl_params_attributes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12629"
},
{
"name": "CSS",
"bytes": "231770"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "Dockerfile",
"bytes": "10302"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "166480"
},
{
"name": "HCL",
"bytes": "15007"
},
{
"name": "HTML",
"bytes": "251906"
},
{
"name": "HiveQL",
"bytes": "3965"
},
{
"name": "Java",
"bytes": "11932863"
},
{
"name": "JavaScript",
"bytes": "89484"
},
{
"name": "Jupyter Notebook",
"bytes": "13867219"
},
{
"name": "Makefile",
"bytes": "50635"
},
{
"name": "Python",
"bytes": "6801044"
},
{
"name": "R",
"bytes": "3223113"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "33647"
},
{
"name": "Shell",
"bytes": "186559"
},
{
"name": "TeX",
"bytes": "634412"
}
],
"symlink_target": ""
}
|
"""Implicit plotting module for SymPy
The module implements a data series called ImplicitSeries which is used by
``Plot`` class to plot implicit plots for different backends. The module,
by default, implements plotting using interval arithmetic. It switches to a
fall back algorithm if the expression cannot be plotted used interval
interval arithmetic. It is also possible to specify to use the fall back
algorithm for all plots.
Boolean combinations of expressions cannot be plotted by the fall back
algorithm.
See Also
========
sympy.plotting.plot
References
==========
- Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for
Mathematical Formulae with Two Free Variables.
- Jeffrey Allen Tupper. Graphing Equations with Generalized Interval
Arithmetic. Master's thesis. University of Toronto, 1996
"""
from __future__ import print_function, division
from .plot import BaseSeries, Plot
from .experimental_lambdify import experimental_lambdify, vectorized_lambdify
from .intervalmath import interval
from sympy.core.relational import (Equality, GreaterThan, LessThan,
Relational, StrictLessThan, StrictGreaterThan)
from sympy import Eq, Tuple, sympify, Symbol, Dummy
from sympy.external import import_module
from sympy.logic.boolalg import BooleanFunction
from sympy.polys.polyutils import _sort_gens
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import flatten
import warnings
class ImplicitSeries(BaseSeries):
""" Representation for Implicit plot """
is_implicit = True
def __init__(self, expr, var_start_end_x, var_start_end_y,
has_equality, use_interval_math, depth, nb_of_points):
super(ImplicitSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_raster
self.has_equality = has_equality # If the expression has equality, i.e.
#Eq, Greaterthan, LessThan.
self.nb_of_points = nb_of_points
self.use_interval_math = use_interval_math
self.depth = 4 + depth
def __str__(self):
return ('Implicit equation: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_raster(self):
func = experimental_lambdify((self.var_x, self.var_y), self.expr,
use_interval=True)
xinterval = interval(self.start_x, self.end_x)
yinterval = interval(self.start_y, self.end_y)
try:
temp = func(xinterval, yinterval)
except AttributeError:
if self.use_interval_math:
warnings.warn("Adaptive meshing could not be applied to the"
" expression. Using uniform meshing.")
self.use_interval_math = False
if self.use_interval_math:
return self._get_raster_interval(func)
else:
return self._get_meshes_grid()
def _get_raster_interval(self, func):
""" Uses interval math to adaptively mesh and obtain the plot"""
k = self.depth
interval_list = []
#Create initial 32 divisions
np = import_module('numpy')
xsample = np.linspace(self.start_x, self.end_x, 33)
ysample = np.linspace(self.start_y, self.end_y, 33)
#Add a small jitter so that there are no false positives for equality.
# Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2)
#which will draw a rectangle.
jitterx = (np.random.rand(
len(xsample)) * 2 - 1) * (self.end_x - self.start_x) / 2**20
jittery = (np.random.rand(
len(ysample)) * 2 - 1) * (self.end_y - self.start_y) / 2**20
xsample += jitterx
ysample += jittery
xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1],
xsample[1:])]
yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1],
ysample[1:])]
interval_list = [[x, y] for x in xinter for y in yinter]
plot_list = []
#recursive call refinepixels which subdivides the intervals which are
#neither True nor False according to the expression.
def refine_pixels(interval_list):
""" Evaluates the intervals and subdivides the interval if the
expression is partially satisfied."""
temp_interval_list = []
plot_list = []
for intervals in interval_list:
#Convert the array indices to x and y values
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
#The expression is valid in the interval. Change the contour
#array values to 1.
if func_eval[1] is False or func_eval[0] is False:
pass
elif func_eval == (True, True):
plot_list.append([intervalx, intervaly])
elif func_eval[1] is None or func_eval[0] is None:
#Subdivide
avgx = intervalx.mid
avgy = intervaly.mid
a = interval(intervalx.start, avgx)
b = interval(avgx, intervalx.end)
c = interval(intervaly.start, avgy)
d = interval(avgy, intervaly.end)
temp_interval_list.append([a, c])
temp_interval_list.append([a, d])
temp_interval_list.append([b, c])
temp_interval_list.append([b, d])
return temp_interval_list, plot_list
while k >= 0 and len(interval_list):
interval_list, plot_list_temp = refine_pixels(interval_list)
plot_list.extend(plot_list_temp)
k = k - 1
#Check whether the expression represents an equality
#If it represents an equality, then none of the intervals
#would have satisfied the expression due to floating point
#differences. Add all the undecided values to the plot.
if self.has_equality:
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
if func_eval[1] and func_eval[0] is not False:
plot_list.append([intervalx, intervaly])
return plot_list, 'fill'
def _get_meshes_grid(self):
"""Generates the mesh for generating a contour.
In the case of equality, ``contour`` function of matplotlib can
be used. In other cases, matplotlib's ``contourf`` is used.
"""
equal = False
if isinstance(self.expr, Equality):
expr = self.expr.lhs - self.expr.rhs
equal = True
elif isinstance(self.expr, (GreaterThan, StrictGreaterThan)):
expr = self.expr.lhs - self.expr.rhs
elif isinstance(self.expr, (LessThan, StrictLessThan)):
expr = self.expr.rhs - self.expr.lhs
else:
raise NotImplementedError("The expression is not supported for "
"plotting in uniform meshed plot.")
np = import_module('numpy')
xarray = np.linspace(self.start_x, self.end_x, self.nb_of_points)
yarray = np.linspace(self.start_y, self.end_y, self.nb_of_points)
x_grid, y_grid = np.meshgrid(xarray, yarray)
func = vectorized_lambdify((self.var_x, self.var_y), expr)
z_grid = func(x_grid, y_grid)
z_grid[np.ma.where(z_grid < 0)] = -1
z_grid[np.ma.where(z_grid > 0)] = 1
if equal:
return xarray, yarray, z_grid, 'contour'
else:
return xarray, yarray, z_grid, 'contourf'
@doctest_depends_on(modules=('matplotlib',))
def plot_implicit(expr, x_var=None, y_var=None, **kwargs):
"""A plot function to plot implicit equations / inequalities.
Arguments
=========
- ``expr`` : The equation / inequality that is to be plotted.
- ``x_var`` (optional) : symbol to plot on x-axis or tuple giving symbol
and range as ``(symbol, xmin, xmax)``
- ``y_var`` (optional) : symbol to plot on y-axis or tuple giving symbol
and range as ``(symbol, ymin, ymax)``
If neither ``x_var`` nor ``y_var`` are given then the free symbols in the
expression will be assigned in the order they are sorted.
The following keyword arguments can also be used:
- ``adaptive``. Boolean. The default value is set to True. It has to be
set to False if you want to use a mesh grid.
- ``depth`` integer. The depth of recursion for adaptive mesh grid.
Default value is 0. Takes value in the range (0, 4).
- ``points`` integer. The number of points if adaptive mesh grid is not
used. Default value is 200.
- ``title`` string .The title for the plot.
- ``xlabel`` string. The label for the x-axis
- ``ylabel`` string. The label for the y-axis
plot_implicit, by default, uses interval arithmetic to plot functions. If
the expression cannot be plotted using interval arithmetic, it defaults to
a generating a contour using a mesh grid of fixed number of points. By
setting adaptive to False, you can force plot_implicit to use the mesh
grid. The mesh grid method can be effective when adaptive plotting using
interval arithmetic, fails to plot with small line width.
Examples:
=========
Plot expressions:
>>> from sympy import plot_implicit, cos, sin, symbols, Eq, And
>>> x, y = symbols('x y')
Without any ranges for the symbols in the expression
>>> p1 = plot_implicit(Eq(x**2 + y**2, 5))
With the range for the symbols
>>> p2 = plot_implicit(Eq(x**2 + y**2, 3),
... (x, -3, 3), (y, -3, 3))
With depth of recursion as argument.
>>> p3 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -4, 4), (y, -4, 4), depth = 2)
Using mesh grid and not using adaptive meshing.
>>> p4 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2), adaptive=False)
Using mesh grid with number of points as input.
>>> p5 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2),
... adaptive=False, points=400)
Plotting regions.
>>> p6 = plot_implicit(y > x**2)
Plotting Using boolean conjunctions.
>>> p7 = plot_implicit(And(y > x, y > -x))
"""
has_equality = False # Represents whether the expression contains an Equality,
#GreaterThan or LessThan
def arg_expand(bool_expr):
"""
Recursively expands the arguments of an Boolean Function
"""
for arg in bool_expr.args:
if isinstance(arg, BooleanFunction):
arg_expand(arg)
elif isinstance(arg, Relational):
arg_list.append(arg)
arg_list = []
if isinstance(expr, BooleanFunction):
arg_expand(expr)
#Check whether there is an equality in the expression provided.
if any(isinstance(e, (Equality, GreaterThan, LessThan))
for e in arg_list):
has_equality = True
elif not isinstance(expr, Relational):
expr = Eq(expr, 0)
has_equality = True
elif isinstance(expr, (Equality, GreaterThan, LessThan)):
has_equality = True
xyvar = [i for i in (x_var, y_var) if i is not None]
free_symbols = expr.free_symbols
range_symbols = Tuple(*flatten(xyvar)).free_symbols
undeclared = free_symbols - range_symbols
if len(free_symbols & range_symbols) > 2:
raise NotImplementedError("Implicit plotting is not implemented for "
"more than 2 variables")
#Create default ranges if the range is not provided.
default_range = Tuple(-5, 5)
def _range_tuple(s):
if isinstance(s, Symbol):
return Tuple(s) + default_range
if len(s) == 3:
return Tuple(*s)
raise ValueError('symbol or `(symbol, min, max)` expected but got %s' % s)
if len(xyvar) == 0:
xyvar = list(_sort_gens(free_symbols))
var_start_end_x = _range_tuple(xyvar[0])
x = var_start_end_x[0]
if len(xyvar) != 2:
if x in undeclared or not undeclared:
xyvar.append(Dummy('f(%s)' % x.name))
else:
xyvar.append(undeclared.pop())
var_start_end_y = _range_tuple(xyvar[1])
use_interval = kwargs.pop('adaptive', True)
nb_of_points = kwargs.pop('points', 300)
depth = kwargs.pop('depth', 0)
#Check whether the depth is greater than 4 or less than 0.
if depth > 4:
depth = 4
elif depth < 0:
depth = 0
series_argument = ImplicitSeries(expr, var_start_end_x, var_start_end_y,
has_equality, use_interval, depth,
nb_of_points)
show = kwargs.pop('show', True)
#set the x and y limits
kwargs['xlim'] = tuple(float(x) for x in var_start_end_x[1:])
kwargs['ylim'] = tuple(float(y) for y in var_start_end_y[1:])
# set the x and y labels
kwargs.setdefault('xlabel', var_start_end_x[0].name)
kwargs.setdefault('ylabel', var_start_end_y[0].name)
p = Plot(series_argument, **kwargs)
if show:
p.show()
return p
|
{
"content_hash": "a9bfcfbaee2923c43a6fcc6b88a57e2d",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 83,
"avg_line_length": 38.54293628808864,
"alnum_prop": 0.5929279862009487,
"repo_name": "dennisss/sympy",
"id": "660085ef7dbf3d1761457142711f89485766d652",
"size": "13914",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sympy/plotting/plot_implicit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import json
from datetime import datetime, timedelta
# django
from django.views.generic import View
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
# local django
from user.decorators import is_health_professional
from prescription.models import Prescription
from dashboardHealthProfessional import constants
class ChartData(View):
"""
Responsible for obtaining data for the chart.
"""
@method_decorator(login_required)
@method_decorator(is_health_professional)
def dispatch(self, *args, **kwargs):
return super(ChartData, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
if request.is_ajax():
list_date = []
health_professional = request.user.healthprofessional
for count in range(7, -1, -1):
chart_item = {}
date_ago = datetime.today() - timedelta(days=count)
# Set initial date first hour
actual_date = datetime(date_ago.year, date_ago.month, date_ago.day)
prescription_count = Prescription.objects.filter(date__year=actual_date.year,
date__month=actual_date.month,
date__day=actual_date.day,
health_professional=health_professional).count()
# Checks whether the date in question is the current date.
if count:
chart_item['name'] = actual_date.strftime('%A')
else:
chart_item['name'] = constants.TODAY
chart_item['quantity'] = prescription_count
list_date.append(chart_item)
result = json.dumps(list_date)
mimetype = 'application/json'
return HttpResponse(result, mimetype)
|
{
"content_hash": "8e91ca94dfdd7f5b58280f94f76a9f0a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 113,
"avg_line_length": 38.16981132075472,
"alnum_prop": 0.5803262481463174,
"repo_name": "fga-gpp-mds/2017.2-Receituario-Medico",
"id": "2d7745f777bdb8e386739f547e60451d6128cce5",
"size": "2042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "medical_prescription/dashboardHealthProfessional/views/chart_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2123328"
},
{
"name": "CoffeeScript",
"bytes": "102158"
},
{
"name": "HTML",
"bytes": "2703462"
},
{
"name": "JavaScript",
"bytes": "7544427"
},
{
"name": "Makefile",
"bytes": "1369"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PowerShell",
"bytes": "471"
},
{
"name": "Python",
"bytes": "627321"
},
{
"name": "Ruby",
"bytes": "1030"
},
{
"name": "Shell",
"bytes": "3774"
}
],
"symlink_target": ""
}
|
from aux import version
from datetime import datetime
import ConfigParser, os
import logging
class LogController(object):
summary = dict()
config = None
def __init__(self, config):
self.config = config
self.loggers = dict()
self.log_directory = config.options.log_directory
self.log_console_level = config.options.log_level
self.log_file_level = config.options.log_level
if config.options.log_console_level is not None:
self.log_console_level = config.options.log_console_level
if config.options.log_file_level is not None:
self.log_file_level = config.options.log_file_level
self.log_verbose = config.options.verbose
self.log_result_server = config.options.log_server
logdir = os.path.join(self.log_directory,
datetime.strftime(datetime.now(), "%Y%m%d-%H%M%S%f"))
if not os.path.exists(logdir):
os.makedirs(logdir)
logging.basicConfig(level=self.log_file_level,
format='%(asctime)s:%(name)s:%(levelname)s:%(message)s',
filename='%s/all.log' % logdir,
filemode='w')
for loggername in ['runtime', 'protocol', 'script']:
self.loggers[loggername] = self.__new_logger(loggername, logdir)
if self.log_verbose:
self.pprint_header_on_init()
self.summary['logs'] = logdir
self.summary['success'] = True
self.runtime.debug('Config options :\n%s' % self.config.options)
self.runtime.debug('Config arguments :\n %s' % self.config.args)
def __getattr__(self, attr):
if self.loggers.get(attr, None) is not None:
return self.loggers.get(attr)
else:
emsg = "%s object has no attribute '%s'" % (self.__class__.__name__, attr)
raise AttributeError(emsg)
def __new_logger(self, loggername, logdir):
new_logger = logging.getLogger(loggername)
fh = logging.FileHandler(filename=os.path.join(logdir,
'%s.log' % (loggername)))
fh.setLevel(self.log_file_level)
ch = logging.StreamHandler()
ch.setLevel(self.log_console_level)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s',
'%H:%M:%S')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
new_logger.addHandler(fh)
new_logger.addHandler(ch)
new_logger.debug('Initiated')
return new_logger
def post_to_server(self):
serverendpoint = self.config.options.log_server
json_data = {'started' : str(self.summary.get('started')),
'ended' : str(self.summary.get('ended')),
'test' : self.summary.get('test'),
'success' : self.summary.get('success', False),
'testsubject' : str(self.summary.get('testsubject')),
'externalref': self.summary.get('externalref'),
'tester' : 'auxscript',
'logfolder' : self.summary.get('logfolder')}
headers = {'Host': '192.168.0.135:8080', #TODO: derive from logserverpath
'User-Agent':'Aux/0.1 (X11;Ubuntu;Linux x86_64;rv:24.0)',
'Cache-Control': 'no-cache'}
headers.update(http.basic( ('tester', 'tester')))
result = http.post(serverendpoint,
headers=headers,
body=json.dumps(json_data))
def pprint_header_on_init(self):
if self.log_verbose:
print "-"*70
self.runtime.info("Options : %s" % (self.config.options))
self.runtime.info("Args : %s" % (self.config.args))
def pprint_summary_on_exit(self):
if self.config.options.log_server is not None:
try:
self.post_to_server(self.config.options.log_server)
except:
pass
if self.log_verbose:
print "-"*70
print "- AUX %s - Summary" % version()
print "-"*70
for key in self.summary.keys():
print "- %s: %s" % (key, self.summary[key])
print "-"*70
|
{
"content_hash": "7ce580573a432cad2c02da3b9fede54b",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 87,
"avg_line_length": 42.47115384615385,
"alnum_prop": 0.540638442381707,
"repo_name": "bischjer/auxiliary",
"id": "5cfcf255d865ab81150ef74cf35bc95b3f085002",
"size": "4417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aux/logger/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "342555"
},
{
"name": "Shell",
"bytes": "103"
}
],
"symlink_target": ""
}
|
from django.conf.urls import re_path as url
from corehq.apps.app_manager.views import (
download_app_strings,
download_index,
download_media_profile,
download_media_suite,
download_odk_media_profile,
download_odk_profile,
download_practice_user_restore,
download_profile,
download_suite,
download_xform,
)
urlpatterns = [
url(r'^$', download_index, {}, 'download_index'),
url(r'^suite.xml$', download_suite, {}, 'download_suite'),
url(r'^media_suite.xml$', download_media_suite, {}, 'download_media_suite'),
url(r'^profile.xml$', download_profile, {}, 'download_profile'),
url(r'^media_profile.xml$', download_media_profile, {}, 'download_media_profile'),
url(r'^profile.ccpr$', download_odk_profile, {}, 'download_odk_profile'),
url(r'^media_profile.ccpr$', download_odk_media_profile, {}, 'download_odk_media_profile'),
url(r'^practice_user_restore.xml$', download_practice_user_restore, {}, 'download_practice_user_restore'),
url(r'^(?P<lang>[\w-]+)/app_strings.txt$', download_app_strings, {}, 'download_app_strings'),
url(r'^modules-(?P<module_id>\d+)/forms-(?P<form_id>\d+).xml$', download_xform, {}, 'download_xform'),
]
|
{
"content_hash": "850805c455ae2d60ebf63052d6897ca2",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 110,
"avg_line_length": 44.81481481481482,
"alnum_prop": 0.6628099173553719,
"repo_name": "dimagi/commcare-hq",
"id": "0ca2f8eb140ec4cccd5be4d109ba6f608b7793f5",
"size": "1210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/app_manager/download_urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
import os
import re
import random
import hashlib
import hmac
from string import letters
import webapp2
import jinja2
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
secret = '6R%=[`OG|G)9'
def make_secure_val(val):
return '%s|%s' % (val, hmac.new(secret, val).hexdigest())
def check_secure_val(secure_val):
val = secure_val.split('|')[0]
if secure_val == make_secure_val(val):
return val
##### user stuff
def make_salt(length = 5):
return ''.join(random.choice(letters) for x in xrange(length))
def make_pw_hash(name, pw, salt = None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (salt, h)
def valid_pw(name, password, h):
salt = h.split(',')[0]
return h == make_pw_hash(name, password, salt)
def users_key(group = 'default'):
return db.Key.from_path('users', group)
def blog_key(name = 'default'):
"""Facilitates multiple blogs, and selects this blog as default."""
return db.Key.from_path('blogs', name)
##Models##
class User(db.Model):
"""'User' model"""
name = db.StringProperty(required = True)
pw_hash = db.StringProperty(required = True)
email = db.StringProperty()
@classmethod
def by_id(cls, uid):
return cls.get_by_id(uid, parent = users_key())
@classmethod
def by_name(cls, name):
u = cls.all().filter('name =', name).get()
return u
@classmethod
def register(cls, name, pw, email = None):
pw_hash = make_pw_hash(name, pw)
return cls(parent = users_key(),
name = name,
pw_hash = pw_hash,
email = email)
@classmethod
def login(cls, name, pw):
u = cls.by_name(name)
if u and valid_pw(name, pw, u.pw_hash):
return u
class Post(db.Model):
"""'Post' model"""
subject = db.StringProperty(required = True)
content = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
author = db.StringProperty(User)
last_modified = db.DateTimeProperty(auto_now = True)
created_by = db.TextProperty()
user_id = db.IntegerProperty(required = True)
likes = db.IntegerProperty(default = 0)
liked_by = db.ListProperty(str)
@classmethod
def by_post_name(cls, name):
u = cls.all().filter('name=', name).get()
return u
@classmethod
def by_name(cls, name):
u = db.GqlQuery('select * from User where name=name')
return u
def render(self):
self._render_text = self.content.replace('\n', '<br>')
return render_str("post.html", p = self)
@property
def comments(self):
return Comment.all().filter('post_id = ', int(self.key().id()))
class Like(db.Model):
"""'Like' model"""
user_id = db.IntegerProperty(required=True)
post_id = db.IntegerProperty(required=True)
def getUserName(self):
user = User.by_id(self.user_id)
return user.name
class Comment(db.Model):
"""'Comment' model"""
user_id = db.IntegerProperty(required = True)
post_id = db.IntegerProperty(required = True)
comment = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add=True)
last_modified = db.DateTimeProperty(auto_now=True)
def getUserName(self):
user = User.by_id(self.user_id)
return user.name
##Handlers##
class BlogHandler(webapp2.RequestHandler):
"""BlogHandler.
Logs in user and sets a secure cookie; logs user out; initializes RequestHandler.
"""
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
params['user'] = self.user
return render_str(template, **params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def set_secure_cookie(self, name, val):
cookie_val = make_secure_val(val)
self.response.headers.add_header(
'Set-Cookie',
'%s=%s; Path=/' % (name, cookie_val))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
return cookie_val and check_secure_val(cookie_val)
def login(self, user):
self.set_secure_cookie('user_id', str(user.key().id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
self.user = uid and User.by_id(int(uid))
def render_post(response, post):
response.out.write('<b>' + post.subject + '</b><br>')
response.out.write(post.content)
class MainPage(BlogHandler):
"""MainPage handler.
Opens main page and directs user to blog page.
"""
def get(self):
self.write("Hello, and welcome to Randy's Blog. To get started, add /blog to the url.")
class BlogFront(BlogHandler):
"""BlogFront handler.
Renders blog front page, including last 10 posts in descending order by time. Also renders 'signup', 'login', and 'New Post' links.
"""
def get(self):
posts = db.GqlQuery('select * from Post order by created desc limit 10')
self.render('front.html', posts = posts)
class PostPage(BlogHandler):
"""PostPage handler.
Ensures that poster is legitimate, and that a post actually exists. Renders permalink.html, the page containing the single post.
"""
def get(self, post_id):
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
self.error(404)
return
self.render('permalink.html', post = post)
class NewPost(BlogHandler):
"""'NewPost' handler.
Ensures that (a)the poster is logged in; (b)that the post belongs ##to the logged-in individual; (c)that the post contains a subject and some content.
"""
def get(self):
if self.user:
self.render('newpost.html')
else:
self.redirect('/login')
def post(self):
if not self.user:
return self.redirect('/blog')
subject = self.request.get('subject')
content = self.request.get('content')
if subject and content:
p = Post(parent = blog_key(), subject = subject, content = content, created_by = self.user.name, user_id = self.user.key().id())
p.put()
self.redirect('/blog/%s' % str(p.key().id()))
else:
error = "subject and content, please!"
self.render('newpost.html', subject=subject, content=content, error=error)
class LikePost(BlogHandler):
"""'Like' handler.
Ensures that (a)the liker is logged in; (b)the liker is not the author of the post; (c)the liker only likes the post once. Increments the number of "Likes".
"""
def get(self, post_id):
if not self.user:
self.redirect("/login?error=You must be logged in to Like a post")
else:
key= db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
return self.redirect('/blog')
author = post.created_by
current_user = self.user.name
if author == current_user or current_user in post.liked_by:
self.redirect("/blog?error=Cannot Like your own post; you may only Like a post once.")
else:
post.likes=post.likes + 1
post.liked_by.append(current_user)
post.put()
self.redirect('/blog')
class UnlikePost(BlogHandler):
"""'Unlike' handler
Ensures that the unliker does not own the postand that the #unliker has not already unliked the post. Decrements the number of likes, and removes the unliker from the list of likers.
"""
def get(self, post_id):
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
return self.redirect('blog')
author = post.created_by
current_user = self.user.name
if author and self.user.key().id() == post.user_id:
self.redirect("/blog?error=Cannot unlike your own post.")
elif post.likes <= 0:
self.redirect("/blog?error=Cannot unlike this post again.")
else:
l = Like.all().filter('user_id=', self.user.key().id()).filter('post_id =', post.key().id()).get
if l:
post.likes -= 1
post.liked_by.remove(current_user)
post.put()
self.redirect('/blog')
class NewComment(BlogHandler):
"""'NewComment' handler.
Ensures that the commenter is logged in and that the post exists. Renders newcomment.html page. If comment exists and is legitimate, renders updated permalink.html page.
"""
def get(self, post_id):
if not self.user:
self.redirect('/login')
return
post = Post.get_by_id(int(post_id), parent=blog_key())
if not post:
return self.redirect('blog')
subject = post.subject
content = post.content
self.render('newcomment.html', subject=subject, content=content, pkey=post.key())
def post(self, post_id):
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
self.error(404)
return
if not self.user:
return self.redirect('/login')
comment = self.request.get('comment')
if comment:
c = Comment(comment=comment, post_id=int(post_id), user_id=self.user.key().id())
c.put()
self.redirect('/blog/%s' % str(post_id))
else:
error = "Comment Required"
self.render('permalink.html', post=post, error=error)
class EditComment(BlogHandler):
"""'EditComment' handler.
Ensures that comment editor is logged in, that the comment actually exists. and that the comment editor owns the comment. Renders editcomment.html page. Ensures that the editcomment.html page contains information. Updates comment.
"""
def get(self, comment_id):
key = db.Key.from_path('Comment', int(comment_id))
comment = db.get(key)
if not comment:
return self.redirect('blog')
if not self.user:
self.redirect('/login')
elif comment.user_id == self.user.key().id():
self.render('editcomment.html', comment=comment, post=comment)
else:
return self.redirect("/blog?error=You may edit only your own comment.")
def post(self, comment_id):
if self.user:
key = db.Key.from_path('Comment', int(comment_id))
comment = db.get(key)
if not comment:
return self.redirect('/blog')
if comment.user_id != self.user.key().id():
return self.redirect ("/blog?error=You may edit only your own comment.")
content = self.request.get('comment')
if not content:
return self.redirect('/blog/editcomment/%s' % str(comment.key().id()))
comment.comment = content
comment.put()
self.redirect('/blog')
else:
return self.redirect('/login')
class DeleteComment(BlogHandler):
"""'DeleteComment' handler.
Ensures that the comment deleter is logged in, and that the comment deleter actually owns the comment. Renders deletecomment.html page. Deletes comment and returns to '/blog' page.
"""
def get(self, comment_id):
key = db.Key.from_path('Comment', int(comment_id))
comment = db.get(key)
if not comment:
return self.redirect('blog')
if not self.user:
return self.redirect('/login')
elif comment.user_id == self.user.key().id():
self.render('deletecomment.html', comment=comment, post=comment)
else:
return self.redirect("/blog?error=You may delete only your own comment")
def post(self, comment_id):
if self.user:
key = db.Key.from_path('Comment', int(comment_id))
comment = db.get(key)
if comment.user_id != self.user.key().id():
return self.redirect('/blog')
comment.delete()
self.redirect('/blog')
else:
return self.redirect('/login')
class DeletePost(BlogHandler):
"""'DeletePost' handler.
Ensures that deleter is logged in, and that deleter owns the post. Deletes post and returns to '/blog' page.
"""
def get(self, post_id):
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
return self.redirect('blog')
if not self.user:
return self.redirect('/login')
elif post.user_id == self.user.key().id():
self.render('deletepost.html', post=post)
else:
return self.redirect('/blog?error=You may delete only your own post')
def post(self, post_id):
if self.user:
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
return self.redirect('blog')
if not self.user:
return self.redirect('/signup')
if post.user_id != self.user.key().id():
return self.redirect('/blog')
post.delete()
self.redirect('/blog')
else:
return self.redirect('/login')
class EditPost(BlogHandler):
"""'EditPost' handler.
Ensures that the post editor is logged in, and that the post editor owns the post. Renders editpost.html page. Ensures that editpost.html page contains subject and content information. Updates post subject and content on permalink page and '/blog' page.
"""
def get(self, post_id):
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
return self.redirect('blog')
if not self.user:
self.redirect('/login')
elif post.user_id == self.user.key().id():
self.render('editpost.html', post=post)
else:
return self.redirect('/blog?error=You may edit only your own post')
def post(self, post_id):
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
return self.redirect('blog')
if not self.user:
return self.redirect('/signup')
subject = self.request.get('subject')
content = self.request.get('content')
if post.user_id != self.user.key().id():
return self.redirect('/blog')
if subject and content:
post.subject = subject
post.content = content
post.put()
self.redirect('/blog')
else:
error = "Subject and Content, Please."
self.render('newpost.html', subject=subject, content=content,
error=error)
class Signup(BlogHandler):
"""'Signup' handler.
Renders signup-form.html. Ensures submitted username, password, and optional email are valid, and that passwords match. Stores parameters.
"""
def get(self):
self.render('signup-form.html')
def post(self):
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
params = dict(username = self.username,
email = self.email)
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
if not valid_username(self.username):
params['error_username'] = "That's not a valid username."
have_error = True
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
if not valid_password(self.password):
params['error_password'] = "That wasn't a valid password."
have_error = True
elif self.password != self.verify:
params['error_verify'] = "Your passwords didn't match."
have_error = True
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
if not valid_email(self.email):
params['error_email'] = "That's not a valid email."
have_error = True
if have_error:
self.render('signup-form.html', **params)
else:
self.done()
def done(self, *a, **kw):
raise NotImplementedError
class Register(Signup):
"""'Register' handler.
Registers new user after making sure user does not already exist; renders welcome.html page.
"""
def done(self):
u = User.by_name(self.username)
if u:
return self.redirect("/blog?error=That user already exists")
else:
u = User.register(self.username, self.password, self.email)
u.put()
self.login(u)
self.redirect('/welcome')
class Login(BlogHandler):
"""'Login' handler.
Renders login-form.html, checks username and password for validity, logs user in and returns to updated '/blog' page.
"""
def get(self):
self.render('login-form.html')
def post(self):
username = self.request.get('username')
password = self.request.get('password')
u = User.login(username, password)
if u:
self.login(u)
self.redirect('/blog')
else:
msg = "Invalid login"
self.render('login-form.html', error = msg)
class Logout(BlogHandler):
"""'Logout' handler.
Logs logged-in user out and returns to '/blog' page.
"""
def get(self):
self.logout()
self.redirect('/blog')
class Welcome(BlogHandler):
"""'Welcome' handler.
Renders welcome.html page, wecloming newly signed-up user.
"""
def get(self):
if self.user:
self.render('welcome.html', username = self.user.name)
else:
self.redirect('/signup')
##Routers##
app = webapp2.WSGIApplication([('/', MainPage),
('/blog/?', BlogFront),
('/blog/([0-9]+)', PostPage),
('/blog/newpost', NewPost),
('/blog/likepost/([0-9]+)', LikePost),
('/blog/unlikepost/([0-9]+)', UnlikePost),
('/blog/newcomment/([0-9]+)', NewComment),
('/blog/editcomment/([0-9]+)', EditComment),
('/blog/deletecomment/([0-9]+)', DeleteComment),
('/signup', Register),
('/login', Login),
('/logout', Logout),
('/blog/editpost/([0-9]+)', EditPost),
('/blog/deletepost/([0-9]+)', DeletePost),
('/welcome', Welcome),
],
debug=True)
|
{
"content_hash": "78d7963e1d124b90c8313e966b181bbf",
"timestamp": "",
"source": "github",
"line_count": 657,
"max_line_length": 260,
"avg_line_length": 30.30441400304414,
"alnum_prop": 0.5755399296835761,
"repo_name": "randyhoffner/FSND-Proj-3",
"id": "c2144aefdce7cd240f40b111b318417af19f1742",
"size": "19910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2073"
},
{
"name": "HTML",
"bytes": "5774"
},
{
"name": "Python",
"bytes": "19910"
}
],
"symlink_target": ""
}
|
from bluesky.plans import rel_scan
from bluesky.callbacks import LiveTable, LivePlot
subs = [LiveTable(['dcm_b', 'xray_eye3_stats1_total', 'xray_eye3_stats2_total']),
LivePlot('xray_eye3_stats1_total', 'dcm_b')]
print(dcm.b.read())
RE(rel_scan([xray_eye3], dcm.b, -.1, .1, 3), subs)
print(dcm.b.read())
|
{
"content_hash": "2d825c0fa02399fff0f9712b14c5a0fa",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 82,
"avg_line_length": 34.888888888888886,
"alnum_prop": 0.6751592356687898,
"repo_name": "NSLS-II-CHX/ipython_ophyd",
"id": "96a7ed6c4b7aab5504a3d68bda964913fefaadb7",
"size": "315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profile_collection/acceptance_tests/02-dscan_dcm.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "144"
},
{
"name": "JavaScript",
"bytes": "1763"
},
{
"name": "Python",
"bytes": "372492"
},
{
"name": "Roff",
"bytes": "8152"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
from senlin.common import utils
from senlin.events import base
from senlin.objects import notification as nobj
class MessageEvent(base.EventBackend):
"""Message driver for event dumping"""
@classmethod
def _notify_cluster_action(cls, ctx, level, cluster, action, **kwargs):
action_name = cls._get_action_name(action)
priority = utils.level_from_number(level).lower()
publisher = nobj.NotificationPublisher(
host=cfg.CONF.host, binary='senlin-engine')
publisher.obj_set_defaults()
phase = kwargs.get('phase')
event_type = nobj.EventType(
object='cluster', action=action_name, phase=phase)
payload = nobj.ClusterActionPayload(cluster, action)
notification = nobj.ClusterActionNotification(
context=ctx, priority=priority, publisher=publisher,
event_type=event_type, payload=payload)
notification.emit(ctx)
@classmethod
def _notify_node_action(cls, ctx, level, node, action, **kwargs):
action_name = cls._get_action_name(action)
priority = utils.level_from_number(level).lower()
publisher = nobj.NotificationPublisher(
host=cfg.CONF.host, binary='senlin-engine')
publisher.obj_set_defaults()
phase = kwargs.get('phase')
event_type = nobj.EventType(
object='node', action=action_name, phase=phase)
payload = nobj.NodeActionPayload(node, action)
notification = nobj.NodeActionNotification(
context=ctx, priority=priority, publisher=publisher,
event_type=event_type, payload=payload)
notification.emit(ctx)
@classmethod
def dump(cls, level, action, **kwargs):
"""Dump the provided event into message queue.
:param level: An integer as defined by python logging module.
:param action: An action object for the current operation.
:param dict kwargs: Other keyword arguments for the operation.
"""
ctx = action.context
entity = action.entity
etype = cls._check_entity(entity)
if etype == 'CLUSTER':
cls._notify_cluster_action(ctx, level, entity, action, **kwargs)
else:
cls._notify_node_action(ctx, level, entity, action, **kwargs)
|
{
"content_hash": "87a90eb11dcd28821e90f80b0084efbb",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 76,
"avg_line_length": 40.87719298245614,
"alnum_prop": 0.6510729613733905,
"repo_name": "openstack/senlin",
"id": "352422b9ee02f1243dd7a72c650ed9f34db485c4",
"size": "2879",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "senlin/events/message.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "69788"
},
{
"name": "Python",
"bytes": "3755028"
},
{
"name": "Shell",
"bytes": "24272"
}
],
"symlink_target": ""
}
|
"""Constructs a VM."""
# Verify that both ways of hierarchical imports work.
from helpers import common
import helpers.extra.common2
def GenerateConfig(evaluation_context):
"""Generates config of a VM."""
return """
resources:
- name: %s
type: compute.v1.instance
properties:
description: %s
machineSize: %s
""" % (common.GenerateMachineName("myFrontend", "prod"),
evaluation_context.imports[
evaluation_context.properties["description-file"]],
helpers.extra.common2.GenerateMachineSize())
|
{
"content_hash": "0cc855f27825ffa1254982dac0df3732",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 62,
"avg_line_length": 26.7,
"alnum_prop": 0.704119850187266,
"repo_name": "jackgr/helm",
"id": "2ea1fb8edc7c02c9a5db992012696b16227c0fc3",
"size": "534",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cmd/expandybird/test/templates/python_template_with_inlinedfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "443319"
},
{
"name": "HTML",
"bytes": "11038"
},
{
"name": "Makefile",
"bytes": "9330"
},
{
"name": "Python",
"bytes": "85426"
},
{
"name": "Ruby",
"bytes": "1755"
},
{
"name": "Shell",
"bytes": "20475"
}
],
"symlink_target": ""
}
|
from contentbase.json_renderer import json_renderer
from contentbase.util import get_root_request
from elasticsearch import Elasticsearch
from elasticsearch.connection import Urllib3HttpConnection
from elasticsearch.serializer import SerializationError
from pyramid.settings import (
asbool,
aslist,
)
from .interfaces import (
APP_FACTORY,
ELASTIC_SEARCH,
INDEXER,
)
import json
import sys
PY2 = sys.version_info.major == 2
def includeme(config):
settings = config.registry.settings
settings.setdefault('contentbase.elasticsearch.index', 'contentbase')
config.add_request_method(datastore, 'datastore', reify=True)
addresses = aslist(settings['elasticsearch.server'])
config.registry[ELASTIC_SEARCH] = Elasticsearch(
addresses,
serializer=PyramidJSONSerializer(json_renderer),
connection_class=TimedUrllib3HttpConnection,
retry_on_timeout=True,
)
config.include('.cached_views')
if not config.registry.settings.get('indexer'):
config.include('.esstorage')
config.include('.indexer')
if asbool(settings.get('indexer')) and not PY2:
config.include('.mpindexer')
def datastore(request):
if request.__parent__ is not None:
return request.__parent__.datastore
datastore = 'database'
if request.params.get('frame') == 'edit':
return datastore
if request.method in ('HEAD', 'GET'):
datastore = request.params.get('datastore') or \
request.headers.get('X-Datastore') or \
request.registry.settings.get('collection_datastore', 'elasticsearch')
return datastore
class PyramidJSONSerializer(object):
mimetype = 'application/json'
def __init__(self, renderer):
self.renderer = renderer
def loads(self, s):
try:
return json.loads(s)
except (ValueError, TypeError) as e:
raise SerializationError(s, e)
def dumps(self, data):
# don't serialize strings
if isinstance(data, (type(''), type(u''))):
return data
try:
return self.renderer.dumps(data)
except (ValueError, TypeError) as e:
raise SerializationError(data, e)
class TimedUrllib3HttpConnection(Urllib3HttpConnection):
stats_count_key = 'es_count'
stats_time_key = 'es_time'
def stats_record(self, duration):
request = get_root_request()
if request is None:
return
duration = int(duration * 1e6)
stats = request._stats
stats[self.stats_count_key] = stats.get(self.stats_count_key, 0) + 1
stats[self.stats_time_key] = stats.get(self.stats_time_key, 0) + duration
def log_request_success(self, method, full_url, path, body, status_code, response, duration):
self.stats_record(duration)
return super(TimedUrllib3HttpConnection, self).log_request_success(
method, full_url, path, body, status_code, response, duration)
def log_request_fail(self, method, full_url, body, duration, status_code=None, exception=None):
self.stats_record(duration)
return super(TimedUrllib3HttpConnection, self).log_request_fail(
method, full_url, body, duration, status_code, exception)
|
{
"content_hash": "4d21998cee751db0f38538d99596b154",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 99,
"avg_line_length": 32.34653465346535,
"alnum_prop": 0.6685032139577594,
"repo_name": "kidaa/encoded",
"id": "afa186d80645cfe6cb164b49c6ad8863e92df6f4",
"size": "3267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/contentbase/elasticsearch/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "741"
},
{
"name": "CSS",
"bytes": "124947"
},
{
"name": "Cucumber",
"bytes": "17721"
},
{
"name": "HTML",
"bytes": "371787"
},
{
"name": "JavaScript",
"bytes": "550757"
},
{
"name": "Python",
"bytes": "863661"
},
{
"name": "Ruby",
"bytes": "992"
},
{
"name": "Shell",
"bytes": "2814"
}
],
"symlink_target": ""
}
|
from numpy.testing import TestCase
from parameterized import parameterized
from .. import stripe_painter as sp
class StripePainterTest(TestCase):
def setUp(self):
self.painter = sp.StripePainter()
@parameterized.expand(
[("BECBBDDEEBABDCADEAAEABCACBDBEECDEDEACACCBEDABEDADD", 26),
("RGBGR", 3), ("RGRG", 3), ("ABACADA", 4), ("BRGBGR", 4),
("AABBCCDDCCBBAABBCCDD", 7)]
)
def test_min_strokes(self, stripes, expected):
self.assertEqual(self.painter.min_strokes(stripes), expected)
|
{
"content_hash": "1c33a3c5e79401d85a268cbe95e18d00",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 69,
"avg_line_length": 30.11111111111111,
"alnum_prop": 0.6752767527675276,
"repo_name": "aliciawyy/dmining",
"id": "45de8c3b24b673222dab027ea40da9fee56caba4",
"size": "542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "puzzle/tests/test_stripe_painter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "74603"
}
],
"symlink_target": ""
}
|
import json
import logging
import os
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import CreateView, DetailView, RedirectView, TemplateView, View
from django.views.generic.edit import FormView, UpdateView
from .forms import *
from .models import Account
# Get an instance of a logger
logger = logging.getLogger(__name__)
class AccountRedirectView(View):
@method_decorator(login_required)
def get(self, request):
user = request.user
return HttpResponseRedirect(reverse('account_detail', args=(user.username,)))
class AccountDetailView(DetailView):
model = Account
template_name = 'authentication/detailVCard.html'
def get_context_data(self, **kwargs):
context = super(AccountDetailView, self).get_context_data(**kwargs)
context['referer'] = self.request.META.get('HTTP_REFERER')
context['is_owner'] = (self.object == self.request.user)
return context
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(AccountDetailView, self).dispatch(request, *args, **kwargs)
class AccountThumbnailView(RedirectView):
permanent = False
query_string = True
def get_redirect_url(self, *args, **kwargs):
user = get_object_or_404(Account, slug=kwargs['slug'])
return user.get_thumbnail_url()
class AccountTinyView(RedirectView):
permanent = False
query_string = True
def get_redirect_url(self, *args, **kwargs):
user = get_object_or_404(Account, slug=kwargs['slug'])
return user.get_tiny_url()
class AccountUpdateView(UpdateView):
model = Account
form_class = AccountUpdateForm
template_name = 'form.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.time_zone = form.cleaned_data.get('time_zone')
self.object.save()
# Update session as well
self.request.session['django_timezone'] = str(form.cleaned_data.get('time_zone'))
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super(AccountUpdateView, self).get_context_data(**kwargs)
context['referer'] = self.request.META.get('HTTP_REFERER')
return context
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(AccountUpdateView, self).dispatch(request, *args, **kwargs)
|
{
"content_hash": "1870553313042d1488cc2d03c015fc2a",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 89,
"avg_line_length": 30.88888888888889,
"alnum_prop": 0.706115107913669,
"repo_name": "dkarchmer/django-aws-template",
"id": "3342817045f0c41ef16be783fb6425490bbf4f64",
"size": "2780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/apps/authentication/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1669"
},
{
"name": "HTML",
"bytes": "16162"
},
{
"name": "JavaScript",
"bytes": "3484"
},
{
"name": "Python",
"bytes": "93513"
},
{
"name": "SCSS",
"bytes": "1584"
},
{
"name": "Shell",
"bytes": "5398"
}
],
"symlink_target": ""
}
|
"""Strongly connected components.
"""
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils.decorators import not_implemented_for
__authors__ = "\n".join(['Eben Kenah',
'Aric Hagberg (hagberg@lanl.gov)'
'Christopher Ellison',
'Ben Edwards (bedwards@cs.unm.edu)'])
__all__ = ['number_strongly_connected_components',
'strongly_connected_components',
'strongly_connected_component_subgraphs',
'is_strongly_connected',
'strongly_connected_components_recursive',
'kosaraju_strongly_connected_components',
'condensation']
@not_implemented_for('undirected')
def strongly_connected_components(G):
"""Generate nodes in strongly connected components of graph.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : generator of sets
A generator of sets of nodes, one for each strongly connected
component of G.
Raises
------
NetworkXNotImplemented:
If G is undirected.
Examples
--------
Generate a sorted list of strongly connected components, largest first.
>>> G = nx.cycle_graph(4, create_using=nx.DiGraph())
>>> nx.add_cycle(G, [10, 11, 12])
>>> [len(c) for c in sorted(nx.strongly_connected_components(G),
... key=len, reverse=True)]
[4, 3]
If you only want the largest component, it's more efficient to
use max instead of sort.
>>> largest = max(nx.strongly_connected_components(G), key=len)
See Also
--------
connected_components,
weakly_connected_components
Notes
-----
Uses Tarjan's algorithm with Nuutila's modifications.
Nonrecursive version of algorithm.
References
----------
.. [1] Depth-first search and linear graph algorithms, R. Tarjan
SIAM Journal of Computing 1(2):146-160, (1972).
.. [2] On finding the strongly connected components in a directed graph.
E. Nuutila and E. Soisalon-Soinen
Information Processing Letters 49(1): 9-14, (1994)..
"""
preorder = {}
lowlink = {}
scc_found = {}
scc_queue = []
i = 0 # Preorder counter
for source in G:
if source not in scc_found:
queue = [source]
while queue:
v = queue[-1]
if v not in preorder:
i = i + 1
preorder[v] = i
done = 1
v_nbrs = G[v]
for w in v_nbrs:
if w not in preorder:
queue.append(w)
done = 0
break
if done == 1:
lowlink[v] = preorder[v]
for w in v_nbrs:
if w not in scc_found:
if preorder[w] > preorder[v]:
lowlink[v] = min([lowlink[v], lowlink[w]])
else:
lowlink[v] = min([lowlink[v], preorder[w]])
queue.pop()
if lowlink[v] == preorder[v]:
scc_found[v] = True
scc = {v}
while scc_queue and preorder[scc_queue[-1]] > preorder[v]:
k = scc_queue.pop()
scc_found[k] = True
scc.add(k)
yield scc
else:
scc_queue.append(v)
@not_implemented_for('undirected')
def kosaraju_strongly_connected_components(G, source=None):
"""Generate nodes in strongly connected components of graph.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : generator of sets
A genrator of sets of nodes, one for each strongly connected
component of G.
Raises
------
NetworkXNotImplemented:
If G is undirected.
Examples
--------
Generate a sorted list of strongly connected components, largest first.
>>> G = nx.cycle_graph(4, create_using=nx.DiGraph())
>>> nx.add_cycle(G, [10, 11, 12])
>>> [len(c) for c in sorted(nx.kosaraju_strongly_connected_components(G),
... key=len, reverse=True)]
[4, 3]
If you only want the largest component, it's more efficient to
use max instead of sort.
>>> largest = max(nx.kosaraju_strongly_connected_components(G), key=len)
See Also
--------
connected_components
weakly_connected_components
Notes
-----
Uses Kosaraju's algorithm.
"""
with nx.utils.reversed(G):
post = list(nx.dfs_postorder_nodes(G, source=source))
seen = set()
while post:
r = post.pop()
if r in seen:
continue
c = nx.dfs_preorder_nodes(G, r)
new = {v for v in c if v not in seen}
yield new
seen.update(new)
@not_implemented_for('undirected')
def strongly_connected_components_recursive(G):
"""Generate nodes in strongly connected components of graph.
Recursive version of algorithm.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : generator of sets
A generator of sets of nodes, one for each strongly connected
component of G.
Raises
------
NetworkXNotImplemented:
If G is undirected
Examples
--------
Generate a sorted list of strongly connected components, largest first.
>>> G = nx.cycle_graph(4, create_using=nx.DiGraph())
>>> nx.add_cycle(G, [10, 11, 12])
>>> [len(c) for c in sorted(nx.strongly_connected_components_recursive(G),
... key=len, reverse=True)]
[4, 3]
If you only want the largest component, it's more efficient to
use max instead of sort.
>>> largest = max(nx.strongly_connected_components_recursive(G), key=len)
See Also
--------
connected_components
Notes
-----
Uses Tarjan's algorithm with Nuutila's modifications.
References
----------
.. [1] Depth-first search and linear graph algorithms, R. Tarjan
SIAM Journal of Computing 1(2):146-160, (1972).
.. [2] On finding the strongly connected components in a directed graph.
E. Nuutila and E. Soisalon-Soinen
Information Processing Letters 49(1): 9-14, (1994)..
"""
def visit(v, cnt):
root[v] = cnt
visited[v] = cnt
cnt += 1
stack.append(v)
for w in G[v]:
if w not in visited:
for c in visit(w, cnt):
yield c
if w not in component:
root[v] = min(root[v], root[w])
if root[v] == visited[v]:
component[v] = root[v]
tmpc = {v} # hold nodes in this component
while stack[-1] != v:
w = stack.pop()
component[w] = root[v]
tmpc.add(w)
stack.remove(v)
yield tmpc
visited = {}
component = {}
root = {}
cnt = 0
stack = []
for source in G:
if source not in visited:
for c in visit(source, cnt):
yield c
@not_implemented_for('undirected')
def strongly_connected_component_subgraphs(G, copy=True):
"""Generate strongly connected components as subgraphs.
Parameters
----------
G : NetworkX Graph
A directed graph.
copy : boolean, optional
if copy is True, Graph, node, and edge attributes are copied to
the subgraphs.
Returns
-------
comp : generator of graphs
A generator of graphs, one for each strongly connected component of G.
Examples
--------
Generate a sorted list of strongly connected components, largest first.
>>> G = nx.cycle_graph(4, create_using=nx.DiGraph())
>>> nx.add_cycle(G, [10, 11, 12])
>>> [len(Gc) for Gc in sorted(nx.strongly_connected_component_subgraphs(G),
... key=len, reverse=True)]
[4, 3]
If you only want the largest component, it's more efficient to
use max instead of sort.
>>> Gc = max(nx.strongly_connected_component_subgraphs(G), key=len)
See Also
--------
connected_component_subgraphs
weakly_connected_component_subgraphs
"""
for comp in strongly_connected_components(G):
if copy:
yield G.subgraph(comp).copy()
else:
yield G.subgraph(comp)
@not_implemented_for('undirected')
def number_strongly_connected_components(G):
"""Return number of strongly connected components in graph.
Parameters
----------
G : NetworkX graph
A directed graph.
Returns
-------
n : integer
Number of strongly connected components
See Also
--------
connected_components
Notes
-----
For directed graphs only.
"""
return len(list(strongly_connected_components(G)))
@not_implemented_for('undirected')
def is_strongly_connected(G):
"""Test directed graph for strong connectivity.
Parameters
----------
G : NetworkX Graph
A directed graph.
Returns
-------
connected : bool
True if the graph is strongly connected, False otherwise.
See Also
--------
strongly_connected_components
Notes
-----
For directed graphs only.
"""
if len(G) == 0:
raise nx.NetworkXPointlessConcept(
"""Connectivity is undefined for the null graph.""")
return len(list(strongly_connected_components(G))[0]) == len(G)
@not_implemented_for('undirected')
def condensation(G, scc=None):
"""Returns the condensation of G.
The condensation of G is the graph with each of the strongly connected
components contracted into a single node.
Parameters
----------
G : NetworkX DiGraph
A directed graph.
scc: list or generator (optional, default=None)
Strongly connected components. If provided, the elements in
`scc` must partition the nodes in `G`. If not provided, it will be
calculated as scc=nx.strongly_connected_components(G).
Returns
-------
C : NetworkX DiGraph
The condensation graph C of G. The node labels are integers
corresponding to the index of the component in the list of
strongly connected components of G. C has a graph attribute named
'mapping' with a dictionary mapping the original nodes to the
nodes in C to which they belong. Each node in C also has a node
attribute 'members' with the set of original nodes in G that
form the SCC that the node in C represents.
Raises
------
NetworkXNotImplemented:
If G is not directed
Notes
-----
After contracting all strongly connected components to a single node,
the resulting graph is a directed acyclic graph.
"""
if scc is None:
scc = nx.strongly_connected_components(G)
mapping = {}
members = {}
C = nx.DiGraph()
for i, component in enumerate(scc):
members[i] = component
mapping.update((n, i) for n in component)
number_of_components = i + 1
C.add_nodes_from(range(number_of_components))
C.add_edges_from((mapping[u], mapping[v]) for u, v in G.edges()
if mapping[u] != mapping[v])
# Add a list of members (ie original nodes) to each node (ie scc) in C.
nx.set_node_attributes(C, 'members', members)
# Add mapping dict as graph attribute
C.graph['mapping'] = mapping
return C
|
{
"content_hash": "16ec8641b1a98a1531284aab2ae25ad6",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 82,
"avg_line_length": 28.189125295508273,
"alnum_prop": 0.5662529352566253,
"repo_name": "Sixshaman/networkx",
"id": "c2a91606b7fc3d6a83e99e563ab08fbf3ff04cf3",
"size": "11948",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "networkx/algorithms/components/strongly_connected.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "PowerShell",
"bytes": "3311"
},
{
"name": "Python",
"bytes": "3238984"
}
],
"symlink_target": ""
}
|
"""Test the cli module."""
|
{
"content_hash": "57f48b208e1d6b9983d881b3413fa3bd",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 26,
"avg_line_length": 27,
"alnum_prop": 0.5925925925925926,
"repo_name": "nstoik/farm_monitor",
"id": "40b8857c4cbe62e29b3c78bd020b58bf67cc6d2b",
"size": "27",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "database/tests/cli/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "18259"
},
{
"name": "HCL",
"bytes": "1534"
},
{
"name": "HTML",
"bytes": "611"
},
{
"name": "JavaScript",
"bytes": "268"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "242717"
},
{
"name": "Shell",
"bytes": "2937"
},
{
"name": "TypeScript",
"bytes": "18970"
},
{
"name": "Vue",
"bytes": "14394"
}
],
"symlink_target": ""
}
|
"""OpenStackClient plugin for Workflow service."""
import logging
from osc_lib import utils
LOG = logging.getLogger(__name__)
DEFAULT_WORKFLOW_API_VERSION = '2'
API_VERSION_OPTION = 'os_workflow_api_version'
API_NAME = 'workflow_engine'
API_VERSIONS = {
'2': 'mistralclient.api.v2.client.Client',
}
def make_client(instance):
"""Returns a workflow_engine service client."""
version = instance._api_version[API_NAME]
workflow_client = utils.get_client_class(
API_NAME,
version,
API_VERSIONS)
LOG.debug('Instantiating workflow engine client: %s', workflow_client)
mistral_url = instance.get_endpoint_for_service_type(
'workflowv2',
interface='publicURL'
)
client = workflow_client(mistral_url=mistral_url, session=instance.session)
return client
def build_option_parser(parser):
"""Hook to add global options."""
parser.add_argument(
'--os-workflow-api-version',
metavar='<workflow-api-version>',
default=utils.env(
'OS_WORKFLOW_API_VERSION',
default=DEFAULT_WORKFLOW_API_VERSION),
help='Workflow API version, default=' +
DEFAULT_WORKFLOW_API_VERSION +
' (Env: OS_WORKFLOW_API_VERSION)')
return parser
|
{
"content_hash": "deea3371d4eaaa09a74f84c2cc1e461a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 79,
"avg_line_length": 26.183673469387756,
"alnum_prop": 0.6500389711613406,
"repo_name": "StackStorm/python-mistralclient",
"id": "f1869b427469c877c815d2bf450321246da14a6b",
"size": "1848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistralclient/osc/plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "427266"
},
{
"name": "Shell",
"bytes": "6899"
}
],
"symlink_target": ""
}
|
"""Add a unique constraint on tenant_id and ha_vr_id of l3 ha router
Revision ID: efcfc172aca4
Revises: efcfc171aca4
Create Date: 2014-06-27 18:35:28.148680
"""
revision = 'efcfc172aca4'
down_revision = 'efcfc171aca4'
migration_for_plugins = [
'*'
]
from alembic import op
from neutron.db import migration
TABLE_NAME = 'routers'
UC_NAME = 'uniq_l3router0tenant_id0ha_vr_id'
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_unique_constraint(
name=UC_NAME,
source=TABLE_NAME,
local_cols=['tenant_id', 'ha_vr_id']
)
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_constraint(
name=UC_NAME,
table_name=TABLE_NAME,
type_='unique'
)
|
{
"content_hash": "8f9f207c9054633b7da0210ecaf86b2a",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 71,
"avg_line_length": 20.522727272727273,
"alnum_prop": 0.6733111849390919,
"repo_name": "CingHu/neutron-ustack",
"id": "1496ea88de5affaa03c8d3e602bf52cf7649c2a5",
"size": "1564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/db/migration/alembic_migrations/versions/efcfc172aca4_router_havrid_unique.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1109"
},
{
"name": "Python",
"bytes": "11544804"
},
{
"name": "Shell",
"bytes": "29485"
}
],
"symlink_target": ""
}
|
from ....testing import assert_equal
from ..maths import ApplyMask
def test_ApplyMask_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=2,
),
internal_datatype=dict(argstr='-dt %s',
position=1,
),
mask_file=dict(argstr='-mas %s',
mandatory=True,
position=4,
),
nan2zeros=dict(argstr='-nan',
position=3,
),
out_file=dict(argstr='%s',
genfile=True,
hash_files=False,
position=-2,
),
output_datatype=dict(argstr='-odt %s',
position=-1,
),
output_type=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = ApplyMask.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ApplyMask_outputs():
output_map = dict(out_file=dict(),
)
outputs = ApplyMask.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
{
"content_hash": "8cd76d2de497a01940f5e4ced8d267c5",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 24.203703703703702,
"alnum_prop": 0.6082631981637338,
"repo_name": "FCP-INDI/nipype",
"id": "d3745676623fb83eb58c7b193761f3619f37a108",
"size": "1361",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "nipype/interfaces/fsl/tests/test_auto_ApplyMask.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2063"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "5280923"
},
{
"name": "Shell",
"bytes": "1958"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timedelta
from django.test import TestCase
from couchforms.models import XFormInstance, XFormArchived
from couchforms.signals import xform_archived, xform_unarchived
from couchforms import fetch_and_wrap_form
class TestFormArchiving(TestCase):
def testArchive(self):
form = XFormInstance(
form={'foo': 'bar'}
)
form.save()
form.put_attachment(name='form.xml', content='<data/>')
self.assertEqual("XFormInstance", form.doc_type)
self.assertEqual(0, len(form.history))
lower_bound = datetime.utcnow() - timedelta(seconds=1)
form.archive(user='mr. librarian')
upper_bound = datetime.utcnow() + timedelta(seconds=1)
form = fetch_and_wrap_form(form._id)
self.assertEqual('XFormArchived', form.doc_type)
self.assertTrue(isinstance(form, XFormArchived))
[archival] = form.history
self.assertTrue(lower_bound <= archival.date <= upper_bound)
self.assertEqual('archive', archival.operation)
self.assertEqual('mr. librarian', archival.user)
lower_bound = datetime.utcnow() - timedelta(seconds=1)
form.unarchive(user='mr. researcher')
upper_bound = datetime.utcnow() + timedelta(seconds=1)
form = fetch_and_wrap_form(form._id)
self.assertEqual('XFormInstance', form.doc_type)
self.assertTrue(isinstance(form, XFormInstance))
[archival, restoration] = form.history
self.assertTrue(lower_bound <= restoration.date <= upper_bound)
self.assertEqual('unarchive', restoration.operation)
self.assertEqual('mr. researcher', restoration.user)
def testSignal(self):
global archive_counter, restore_counter
archive_counter = 0
restore_counter = 0
def count_archive(**kwargs):
global archive_counter
archive_counter += 1
def count_unarchive(**kwargs):
global restore_counter
restore_counter += 1
xform_archived.connect(count_archive)
xform_unarchived.connect(count_unarchive)
form = XFormInstance(form={'foo': 'bar'})
form.save()
form.put_attachment(name='form.xml', content='<data/>')
self.assertEqual(0, archive_counter)
self.assertEqual(0, restore_counter)
form.archive()
self.assertEqual(1, archive_counter)
self.assertEqual(0, restore_counter)
form.unarchive()
self.assertEqual(1, archive_counter)
self.assertEqual(1, restore_counter)
|
{
"content_hash": "fbd9164716f57f7ca7a941a0657ff6e8",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 71,
"avg_line_length": 35.397260273972606,
"alnum_prop": 0.6486068111455109,
"repo_name": "puttarajubr/commcare-hq",
"id": "d03e47ca5df6a5869aeadc438030473ef341778b",
"size": "2584",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "corehq/ex-submodules/couchforms/tests/test_archive.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
}
|
import logging
# Non-stdlib imports
from pylons import c
from ming.utils import LazyProperty
from ming.orm.ormsession import ThreadLocalORMSession
# Pyforge-specific imports
import allura.tasks.repo_tasks
from allura.lib import helpers as h
from allura import model as M
from allura.controllers.repository import RepoRootController, RefsController, CommitsController
from allura.controllers.repository import MergeRequestsController, RepoRestController
from allura.lib.repository import RepositoryApp
# Local imports
from . import model as GM
from . import version
from .controllers import BranchBrowser
log = logging.getLogger(__name__)
class ForgeGitApp(RepositoryApp):
'''This is the Git app for PyForge'''
__version__ = version.__version__
tool_label='Git'
ordinal=2
forkable=True
default_branch_name='ref/master'
def __init__(self, project, config):
super(ForgeGitApp, self).__init__(project, config)
self.root = RepoRootController()
self.api_root = RepoRestController()
self.root.ref = RefsController(BranchBrowser)
self.root.ci = CommitsController()
setattr(self.root, 'merge-requests', MergeRequestsController())
@LazyProperty
def repo(self):
return GM.Repository.query.get(app_config_id=self.config._id)
def install(self, project):
'''Create repo object for this tool'''
super(ForgeGitApp, self).install(project)
repo = GM.Repository(
name=self.config.options.mount_point + '.git',
tool='git',
status='initing')
ThreadLocalORMSession.flush_all()
cloned_from_project_id = self.config.options.get('cloned_from_project_id')
cloned_from_repo_id = self.config.options.get('cloned_from_repo_id')
init_from_url = self.config.options.get('init_from_url')
if cloned_from_project_id is not None:
cloned_from = GM.Repository.query.get(_id=cloned_from_repo_id)
allura.tasks.repo_tasks.clone.post(
cloned_from_path=cloned_from.full_fs_path,
cloned_from_name=cloned_from.app.config.script_name(),
cloned_from_url=cloned_from.full_fs_path)
elif init_from_url:
allura.tasks.repo_tasks.clone.post(
cloned_from_path=None,
cloned_from_name=None,
cloned_from_url=init_from_url)
else:
allura.tasks.repo_tasks.init.post()
|
{
"content_hash": "5c361dcf82580aba7ae9cb92e2315bb8",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 95,
"avg_line_length": 36.92537313432836,
"alnum_prop": 0.6709781729991916,
"repo_name": "leotrubach/sourceforge-allura",
"id": "ff5c0bc5d2c73eb2ff0adf6ae3d1383368e26163",
"size": "2490",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "ForgeGit/forgegit/git_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "2985957"
},
{
"name": "JavaScript",
"bytes": "650950"
},
{
"name": "Puppet",
"bytes": "2677"
},
{
"name": "Python",
"bytes": "1866436"
},
{
"name": "Ruby",
"bytes": "4109"
},
{
"name": "Shell",
"bytes": "6636"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CodeFile'
db.create_table(u'survey_admin_codefile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('code', self.gf('django.db.models.fields.TextField')()),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'survey_admin', ['CodeFile'])
def backwards(self, orm):
# Deleting model 'CodeFile'
db.delete_table(u'survey_admin_codefile')
models = {
u'survey_admin.codefile': {
'Meta': {'object_name': 'CodeFile'},
'code': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['survey_admin']
|
{
"content_hash": "81bea11c838ce95eebbecc3688ee547a",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 107,
"avg_line_length": 37,
"alnum_prop": 0.5976833976833977,
"repo_name": "sbreslav/mimic",
"id": "cd1b82fdeefb4fc11f3a4f3d012a8b49659bbbd8",
"size": "1319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/survey_admin/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "506"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "260"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "1522"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "98503"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "806"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "2034"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "2075070"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "1550"
},
{
"name": "JavaScript",
"bytes": "23452334"
},
{
"name": "Julia",
"bytes": "202"
},
{
"name": "LSL",
"bytes": "2124"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "981"
},
{
"name": "Makefile",
"bytes": "7335"
},
{
"name": "Matlab",
"bytes": "4"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "666"
},
{
"name": "PHP",
"bytes": "351"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "678"
},
{
"name": "PowerShell",
"bytes": "418"
},
{
"name": "Python",
"bytes": "258796"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Ruby",
"bytes": "531"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "1157"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "1345"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Visual Basic",
"bytes": "916"
},
{
"name": "XQuery",
"bytes": "114"
}
],
"symlink_target": ""
}
|
__author__ = 'Eric Bidelman <ebidel@>'
import logging
import json
import webapp2
PUSH_MANIFEST = 'push_manifest.json'
manifest_cache = {} # filename -> list of push URL mapping.
def use_push_manifest(filename):
global manifest_cache
push_urls = {}
# Read file only if it's not in memory.
if filename in manifest_cache:
push_urls = manifest_cache[filename]['push_urls']
else:
try:
with open(filename) as f:
push_urls = json.loads(f.read())
manifest_cache[filename] = {'push_urls': push_urls} # cache it.
except IOError as e:
logging.error("Error reading %s: %s" % (filename, e.strerror))
return push_urls
class PushHandler(webapp2.RequestHandler):
"""Base handler for constructing Link rel=preload header."""
push_urls = use_push_manifest(PUSH_MANIFEST)
# def __init__(self, request, response):
# self.initialize(request, response)
def _generate_associate_content_header(self, urls=None):
"""Constructs a value for the X-Associated-Content header.
The format of the header value is a comma-separated list of double-quoted
URLs, each of which may optionally be followed by a colon and a SPDY
priority number (from 0 to 7 inclusive). URL needs to be a full absolute
URL. Whitespace between tokens is optional, and is ignored if present.
For example:
X-Associated-Content: "https://www.example.com/styles/foo.css",
"/scripts/bar.js?q=4":2, "https://www.example.com/images/baz.png": 5,
"https://www.example.com/generate_image.php?w=32&h=24"
App Engine supports this header for now. Link: rel=preload is the standard
and you should use that to be compliant with the HTTP2 spec.
Args:
url: A dict of url: priority mappings to use in the header.
Returns:
Comma separated string for the X-Associated-Content header.
"""
if urls is None:
urls = self.push_urls
host = self.request.host_url
associate_content = []
for url,v in urls.iteritems():
url = '%s%s' % (host, str(url)) # Construct absolute URLs.
if v is None:
associate_content.append('"%s"' % url)
else:
associate_content.append('"%s":%s' % (url, str(v)))
headers = list(set(associate_content)) # remove duplicates
return ','.join(headers)
def _generate_link_preload_headers(self, urls=None):
"""Constructs a value for the Link: rel=preload header.
The format of the preload header is described in the spec
http://w3c.github.io/preload/:
Link: <https://example.com/font.woff>; rel=preload;
Args:
url: A list of urls to use in the header.
Returns:
A list of Link header values.
"""
if urls is None:
urls = self.push_urls
host = self.request.host_url
preload_links = []
for url in urls:
url = '%s%s' % (host, str(url)) # Construct absolute URLs.
preload_links.append('<%s>; rel="preload"' % url)
headers = list(set(preload_links)) # remove duplicates
# TODO: check that implementations support a single Link header with
# with commma separated values.
return headers # ','.join(headers)
"""
Example:
@http2push.push()
def get(self):
pass
@http2push.push('push_manifest.json') # Use a custom manifest.
def get(self):
pass
?nopush on the URL prevents the header from being included.
"""
def push(manifest=PUSH_MANIFEST):
def decorator(handler):
push_urls = use_push_manifest(manifest)
def wrapper(*args, **kwargs):
instance = args[0]
# nopush URL param prevents the Link header from being included.
if instance.request.get('nopush', None) is None and len(push_urls):
# Send X-Associated-Content header.
instance.response.headers.add_header('X-Associated-Content',
instance._generate_associate_content_header(push_urls))
preload_headers = instance._generate_link_preload_headers(push_urls)
if type(preload_headers) is list:
for h in preload_headers:
instance.response.headers.add_header('Link', h)
else:
instance.response.headers.add_header('Link', preload_headers)
return handler(*args, **kwargs)
return wrapper
return decorator
|
{
"content_hash": "6507aa631dbc606165efe7e3aa146581",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 28.804054054054053,
"alnum_prop": 0.6586910626319493,
"repo_name": "StickmanVentures/ginger",
"id": "91ec0f340170c33fa5f71825f6967b78a326d7fe",
"size": "4863",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "http2push.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10332"
},
{
"name": "JavaScript",
"bytes": "33754"
},
{
"name": "Python",
"bytes": "5949"
}
],
"symlink_target": ""
}
|
from .inputs import write_molcas_input
from .output import parse_molcas, Output, Orb, HDF
|
{
"content_hash": "94ca7556f02d2d539b9f2f3ecac1768c",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 50,
"avg_line_length": 45,
"alnum_prop": 0.7888888888888889,
"repo_name": "exa-analytics/exatomic",
"id": "b846fa18ffa1681e307ad210981a234c9cdb7feb",
"size": "228",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "exatomic/molcas/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "762"
},
{
"name": "JavaScript",
"bytes": "121101"
},
{
"name": "Jupyter Notebook",
"bytes": "13176"
},
{
"name": "Python",
"bytes": "1084816"
},
{
"name": "Shell",
"bytes": "711"
},
{
"name": "TypeScript",
"bytes": "953"
}
],
"symlink_target": ""
}
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Firefox()
driver.get("https://www.crunchbase.com/funding-rounds")
first_name = driver.find_element_by_id('first_name')
last_name = driver.find_element_by_id('last_name')
email = driver.find_element_by_id('email')
city = driver.find_element_by_id('city')
first_name.send_keys('Zubair')
last_name.send_keys('Hussain')
email.send_keys('abc@hotmail.com')
city.send_keys('islamabad')
driver.find_element_by_class_name('btn-lg').click()
title=driver.find_element_by_tag_name('title')
print title
driver.close()
|
{
"content_hash": "615ef8809e7b348ca2f0c7c3e9fd33ec",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 55,
"avg_line_length": 26.458333333333332,
"alnum_prop": 0.7196850393700788,
"repo_name": "ZubairHussain/Crunchbase-Startups-Analysis",
"id": "8a91a4368f3100899463245d14dd65d3d561bb7f",
"size": "635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CrunchBase Scraper/Selenium/scraper.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "384393"
},
{
"name": "HTML",
"bytes": "91943"
},
{
"name": "JavaScript",
"bytes": "165780"
},
{
"name": "Jupyter Notebook",
"bytes": "321122"
},
{
"name": "Python",
"bytes": "17264"
}
],
"symlink_target": ""
}
|
import unittest
from cors import constants
from cors import cors_handler
class TestCorsRequest(unittest.TestCase):
def test_defaultInstance(self):
req = cors_handler.CorsRequest()
self.assertIsNone(req.http_method)
self.assertIsNone(req.origin)
self.assertIsNone(req.request_method)
self.assertIsNone(req.request_headers)
self.assertFalse(req.is_cors)
self.assertFalse(req.is_preflight)
def test_setMethod(self):
req = cors_handler.CorsRequest('GET')
self.assertEquals('GET', req.http_method)
def test_headers(self):
headers = {}
headers[constants.ORIGIN] = 'http://github.com'
headers[constants.ACCESS_CONTROL_REQUEST_METHOD] = 'GET'
headers[constants.ACCESS_CONTROL_REQUEST_HEADERS] = 'Header1, Header2'
req = cors_handler.CorsRequest('GET', headers)
self.assertEquals('http://github.com', req.origin)
self.assertEquals('GET', req.request_method)
self.assertEquals(['Header1', 'Header2'], req.request_headers)
def test_isCors(self):
headers = {}
headers[constants.ORIGIN] = 'http://github.com'
req = cors_handler.CorsRequest('GET', headers)
self.assertTrue(req.is_cors)
self.assertFalse(req.is_preflight)
def test_isPreflight(self):
headers = {}
headers[constants.ORIGIN] = 'http://github.com'
headers[constants.ACCESS_CONTROL_REQUEST_METHOD] = 'PUT'
req = cors_handler.CorsRequest('OPTIONS', headers)
self.assertTrue(req.is_cors)
self.assertTrue(req.is_preflight)
def test_notPreflight1(self):
headers = {}
headers[constants.ORIGIN] = 'http://github.com'
headers[constants.ACCESS_CONTROL_REQUEST_METHOD] = 'PUT'
req = cors_handler.CorsRequest('GET', headers)
self.assertTrue(req.is_cors)
self.assertFalse(req.is_preflight)
def test_notPreflight2(self):
headers = {}
headers['Origin'] = 'http://github.com'
req = cors_handler.CorsRequest('OPTIONS', headers)
self.assertTrue(req.is_cors)
self.assertFalse(req.is_preflight)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "cac3acc06cfa0a7c42972e076c2369d2",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 78,
"avg_line_length": 34.734375,
"alnum_prop": 0.6437246963562753,
"repo_name": "monsur/cors-python",
"id": "9a28c93a58e7d94cbc01e733f191d8568a5b4f44",
"size": "2223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cors_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34661"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
}
|
"""Tests for test_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from deeplab.evaluation import test_utils
class TestUtilsTest(absltest.TestCase):
def test_read_test_image(self):
image_array = test_utils.read_test_image('team_pred_class.png')
self.assertSequenceEqual(image_array.shape, (231, 345, 4))
def test_reads_segmentation_with_color_map(self):
rgb_to_semantic_label = {(0, 0, 0): 0, (0, 0, 255): 1, (255, 0, 0): 23}
labels = test_utils.read_segmentation_with_rgb_color_map(
'team_pred_class.png', rgb_to_semantic_label)
input_image = test_utils.read_test_image('team_pred_class.png')
np.testing.assert_array_equal(
labels == 0,
np.logical_and(input_image[:, :, 0] == 0, input_image[:, :, 2] == 0))
np.testing.assert_array_equal(labels == 1, input_image[:, :, 2] == 255)
np.testing.assert_array_equal(labels == 23, input_image[:, :, 0] == 255)
def test_reads_gt_segmentation(self):
instance_label_to_semantic_label = {
0: 0,
47: 1,
97: 1,
133: 1,
150: 1,
174: 1,
198: 23,
215: 1,
244: 1,
255: 1,
}
instances, classes = test_utils.panoptic_segmentation_with_class_map(
'team_gt_instance.png', instance_label_to_semantic_label)
expected_label_shape = (231, 345)
self.assertSequenceEqual(instances.shape, expected_label_shape)
self.assertSequenceEqual(classes.shape, expected_label_shape)
np.testing.assert_array_equal(instances == 0, classes == 0)
np.testing.assert_array_equal(instances == 198, classes == 23)
np.testing.assert_array_equal(
np.logical_and(instances != 0, instances != 198), classes == 1)
if __name__ == '__main__':
absltest.main()
|
{
"content_hash": "c9f7b1a2b3bce9e7980e527df872462d",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 77,
"avg_line_length": 32.016949152542374,
"alnum_prop": 0.6363155108523028,
"repo_name": "alexgorban/models",
"id": "9e9bed37e4bf721304e60d7fa12e6cfa9c4b7ef8",
"size": "2605",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "research/deeplab/evaluation/test_utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1619012"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "454746"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "16363107"
},
{
"name": "Shell",
"bytes": "144095"
},
{
"name": "Starlark",
"bytes": "148029"
}
],
"symlink_target": ""
}
|
import os
import hashlib
from django.db import models
from django.core.files.storage import FileSystemStorage
from django.conf import settings
from django.utils.encoding import force_bytes, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
try:
from cms.models.pluginmodel import CMSPlugin
except ImportError:
CMSPlugin = None
from jsonfield import JSONField
class OverwriteStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
# FIXME: max_length is ignored
if self.exists(name):
full_path = self.path(name)
os.remove(full_path)
return name
def chart_file_path(instance=None, filename=None):
tmppath = ['charts']
root, ext = os.path.splitext(filename)
filehash = hashlib.md5(force_bytes(instance.slug) +
force_bytes(instance.pk)).hexdigest()
filehash_start = filehash[:2]
tmppath.append(filehash_start)
filehash_middle = filehash[2:4]
tmppath.append(filehash_middle)
tmppath.append(str(instance.pk))
new_filename = '%s.svg' % instance.slug
tmppath.append(new_filename)
return os.path.join(*tmppath)
@python_2_unicode_compatible
class Chart(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField()
creator = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
created_at = models.DateTimeField(default=timezone.now)
is_public = models.BooleanField(default=False)
csv_data = models.TextField(blank=True)
chart_settings = JSONField(blank=True)
svg = models.FileField(null=True, blank=True,
upload_to=chart_file_path,
storage=OverwriteStorage())
width = models.IntegerField(null=True, blank=True)
height = models.IntegerField(null=True, blank=True)
customised = models.BooleanField(default=False)
class Meta:
verbose_name = _('chart')
verbose_name_plural = _('charts')
ordering = ('-created_at',)
def __str__(self):
return self.title
if CMSPlugin is not None:
@python_2_unicode_compatible
class DisplayChartPlugin(CMSPlugin):
"""
CMS Plugin for displaying custom entries
"""
chart = models.ForeignKey(Chart)
def __str__(self):
return _('Chart %s') % self.chart
|
{
"content_hash": "68a8bd8a1cf85e8f507bad9547785835",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 74,
"avg_line_length": 29.97530864197531,
"alnum_prop": 0.6692751235584844,
"repo_name": "correctiv/correctiv-chartbuilder",
"id": "d983bbbcdf6f46477a0f238a434624008aa867db",
"size": "2454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chartbuilder/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "69399"
},
{
"name": "HTML",
"bytes": "4421"
},
{
"name": "JavaScript",
"bytes": "1645879"
},
{
"name": "Python",
"bytes": "10686"
}
],
"symlink_target": ""
}
|
'''Trains two recurrent neural networks based upon a story and a question.
The resulting merged vector is then queried to answer a range of bAbI tasks.
The results are comparable to those for an LSTM model provided in Weston et al.:
"Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks"
http://arxiv.org/abs/1502.05698
Task Number | FB LSTM Baseline | LSTM | LSTM w/ LN
--- | --- | --- | ---
QA1 - Single Supporting Fact | 50 | 52.0 | 58.0
For the resources related to the bAbI project, refer to:
https://research.facebook.com/researchers/1543934539189348
'''
from __future__ import print_function
from functools import reduce
import re
import tarfile
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.utils.data_utils import get_file
from keras import layers
from keras.models import Model
from keras.preprocessing.sequence import pad_sequences
from layer_norm_layers import *
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
'''Parse stories provided in the bAbi tasks format
If only_supporting is true, only the sentences that support the answer are kept.
'''
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
substory = None
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
'''Given a file name, read the file, retrieve the stories, and then convert the sentences into a single story.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
data = parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
X = []
Xq = []
Y = []
for story, query, answer in data:
x = [word_idx[w] for w in story]
xq = [word_idx[w] for w in query]
y = np.zeros(len(word_idx) + 1) # let's not forget that index 0 is reserved
y[word_idx[answer]] = 1
X.append(x)
Xq.append(xq)
Y.append(y)
return pad_sequences(X, maxlen=story_maxlen), pad_sequences(Xq, maxlen=query_maxlen), np.array(Y)
RNN = LayerNormLSTM # layers.LSTM
EMBED_HIDDEN_SIZE = 50
SENT_HIDDEN_SIZE = 100
QUERY_HIDDEN_SIZE = 100
BATCH_SIZE = 32
EPOCHS = 40
print('RNN / Embed / Sent / Query = {}, {}, {}, {}'.format(RNN, EMBED_HIDDEN_SIZE, SENT_HIDDEN_SIZE, QUERY_HIDDEN_SIZE))
try:
path = get_file('babi-tasks-v1-2.tar.gz', origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz')
except:
print('Error downloading dataset, please download it manually:\n'
'$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz\n'
'$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz')
raise
tar = tarfile.open(path)
# Default QA1 with 1000 samples
challenge = 'tasks_1-20_v1-2/en/qa1_single-supporting-fact_{}.txt'
# QA1 with 10,000 samples
# challenge = 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt'
# QA2 with 1000 samples
# challenge = 'tasks_1-20_v1-2/en/qa2_two-supporting-facts_{}.txt'
# QA2 with 10,000 samples
# challenge = 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt'
train = get_stories(tar.extractfile(challenge.format('train')))
test = get_stories(tar.extractfile(challenge.format('test')))
vocab = sorted(reduce(lambda x, y: x | y, (set(story + q + [answer]) for story, q, answer in train + test)))
# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
story_maxlen = max(map(len, (x for x, _, _ in train + test)))
query_maxlen = max(map(len, (x for _, x, _ in train + test)))
X, Xq, Y = vectorize_stories(train, word_idx, story_maxlen, query_maxlen)
tX, tXq, tY = vectorize_stories(test, word_idx, story_maxlen, query_maxlen)
print('vocab = {}'.format(vocab))
print('X.shape = {}'.format(X.shape))
print('Xq.shape = {}'.format(Xq.shape))
print('Y.shape = {}'.format(Y.shape))
print('story_maxlen, query_maxlen = {}, {}'.format(story_maxlen, query_maxlen))
print('Build model...')
sentence = layers.Input(shape=(story_maxlen,), dtype='int32')
encoded_sentence = layers.Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence)
encoded_sentence = layers.Dropout(0.3)(encoded_sentence)
question = layers.Input(shape=(query_maxlen,), dtype='int32')
encoded_question = layers.Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question)
encoded_question = layers.Dropout(0.3)(encoded_question)
encoded_question = RNN(EMBED_HIDDEN_SIZE)(encoded_question)
encoded_question = layers.RepeatVector(story_maxlen)(encoded_question)
merged = layers.add([encoded_sentence, encoded_question])
merged = RNN(EMBED_HIDDEN_SIZE)(merged)
merged = layers.Dropout(0.3)(merged)
preds = layers.Dense(vocab_size, activation='softmax')(merged)
model = Model([sentence, question], preds)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
print('Training')
model.fit([X, Xq], Y, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_split=0.05)
loss, acc = model.evaluate([tX, tXq], tY, batch_size=BATCH_SIZE)
print('Test loss / test accuracy = {:.4f} / {:.4f}'.format(loss, acc))
|
{
"content_hash": "1b9cae6eaf89f6d616c200db366c8b70",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 123,
"avg_line_length": 38.83734939759036,
"alnum_prop": 0.6533271288971615,
"repo_name": "DingKe/nn_playground",
"id": "4d3c0ad77ace95803776dcf9660f4d7dda8f4c26",
"size": "6447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "layernorm/babi_lnlstm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "16779"
},
{
"name": "Python",
"bytes": "181660"
}
],
"symlink_target": ""
}
|
"""Implementation of DPLL algorithm
Features:
- Clause learning
- Watch literal scheme
- VSIDS heuristic
References:
- http://en.wikipedia.org/wiki/DPLL_algorithm
"""
from __future__ import print_function, division
from collections import defaultdict
from heapq import heappush, heappop
from sympy.core import Symbol
from sympy import Predicate
from sympy.logic.boolalg import conjuncts, to_cnf, to_int_repr
def dpll_satisfiable(expr):
"""
Check satisfiability of a propositional sentence.
It returns a model rather than True when it succeeds
Examples
========
>>> from sympy import symbols
>>> from sympy.abc import A, B
>>> from sympy.logic.algorithms.dpll2 import dpll_satisfiable
>>> dpll_satisfiable(A & ~B)
{A: True, B: False}
>>> dpll_satisfiable(A & ~A)
False
"""
symbols = list(expr.atoms(Symbol, Predicate))
symbols_int_repr = range(1, len(symbols) + 1)
clauses = conjuncts(to_cnf(expr))
clauses_int_repr = to_int_repr(clauses, symbols)
solver = SATSolver(clauses_int_repr, symbols_int_repr, set())
result = solver._find_model()
if not result:
return result
# Uncomment to confirm the solution is valid (hitting set for the clauses)
#else:
#for cls in clauses_int_repr:
#assert solver.var_settings.intersection(cls)
return dict((symbols[abs(lit) - 1], lit > 0) for lit in solver.var_settings)
class SATSolver(object):
"""
Class for representing a SAT solver capable of
finding a model to a boolean theory in conjunctive
normal form.
"""
def __init__(self, clauses, variables, var_settings, heuristic='vsids',
clause_learning='none', INTERVAL=500):
self.var_settings = var_settings
self.heuristic = heuristic
self.is_unsatisfied = False
self._unit_prop_queue = []
self.update_functions = []
self.INTERVAL = INTERVAL
self._initialize_variables(variables)
self._initialize_clauses(clauses)
if 'vsids' == heuristic:
self._vsids_init()
self.heur_calculate = self._vsids_calculate
self.heur_lit_assigned = self._vsids_lit_assigned
self.heur_lit_unset = self._vsids_lit_unset
self.heur_clause_added = self._vsids_clause_added
# Note: Uncomment this if/when clause learning is enabled
#self.update_functions.append(self._vsids_decay)
else:
raise NotImplementedError
if 'simple' == clause_learning:
self.add_learned_clause = self._simple_add_learned_clause
self.compute_conflict = self.simple_compute_conflict
self.update_functions.append(self.simple_clean_clauses)
elif 'none' == clause_learning:
self.add_learned_clause = lambda x: None
self.compute_conflict = lambda: None
else:
raise NotImplementedError
# Create the base level
self.levels = [Level(0)]
self._current_level.varsettings = var_settings
# Keep stats
self.num_decisions = 0
self.num_learned_clauses = 0
self.original_num_clauses = len(self.clauses)
def _initialize_variables(self, variables):
"""Set up the variable data structures needed."""
self.sentinels = defaultdict(set)
self.occurrence_count = defaultdict(int)
self.variable_set = [False] * (len(variables) + 1)
def _initialize_clauses(self, clauses):
"""Set up the clause data structures needed.
For each clause, the following changes are made:
- Unit clauses are queued for propagation right away.
- Non-unit clauses have their first and last literals set as sentinels.
- The number of clauses a literal appears in is computed.
"""
self.clauses = []
for cls in clauses:
self.clauses.append(list(cls))
for i in range(len(self.clauses)):
# Handle the unit clauses
if 1 == len(self.clauses[i]):
self._unit_prop_queue.append(self.clauses[i][0])
continue
self.sentinels[self.clauses[i][0]].add(i)
self.sentinels[self.clauses[i][-1]].add(i)
for lit in self.clauses[i]:
self.occurrence_count[lit] += 1
def _find_model(self):
"""Main DPLL loop.
Variables are chosen successively, and assigned to be either
True or False. If a solution is not found with this setting,
the opposite is chosen and the search continues. The solver
halts when every variable has a setting.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> SATSolver([set([-1]), set([1])], set([1]), set([]))._find_model()
False
>>> SATSolver([set([1]), set([-2])], set([-2, 3]), set([]))._find_model()
True
"""
# We use this variable to keep track of if we should flip a
# variable setting in successive rounds
flip_var = False
# Check if unit prop says the theory is unsat right off the bat
self._simplify()
if self.is_unsatisfied:
return False
# While the theory still has clauses remaining
while True:
# Perform cleanup / fixup at regular intervals
if self.num_decisions % self.INTERVAL == 0:
for func in self.update_functions:
func()
if flip_var:
# We have just backtracked and we are trying to opposite literal
flip_var = False
lit = self._current_level.decision
else:
# Pick a literal to set
lit = self.heur_calculate()
self.num_decisions += 1
# Stopping condition for a satisfying theory
if 0 == lit:
return True
# Start the new decision level
self.levels.append(Level(lit))
# Assign the literal, updating the clauses it satisfies
self._assign_literal(lit)
# _simplify the theory
self._simplify()
# Check if we've made the theory unsat
if self.is_unsatisfied:
self.is_unsatisfied = False
# We unroll all of the decisions until we can flip a literal
while self._current_level.flipped:
self._undo()
# If we've unrolled all the way, the theory is unsat
if 1 == len(self.levels):
return False
# Detect and add a learned clause
self.add_learned_clause(self.compute_conflict())
# Try the opposite setting of the most recent decision
flip_lit = -self._current_level.decision
self._undo()
self.levels.append(Level(flip_lit, flipped=True))
flip_var = True
########################
# Helper Methods #
########################
@property
def _current_level(self):
"""The current decision level data structure
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([set([1]), set([2])], set([1, 2]), set([]))
>>> l._find_model()
True
>>> l._current_level.decision
0
>>> l._current_level.flipped
False
>>> l._current_level.var_settings
set([1, 2])
"""
return self.levels[-1]
def _clause_sat(self, cls):
"""Check if a clause is satisfied by the current variable setting.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([set([1]), set([-1])], set([1]), set([]))
>>> l._find_model()
False
>>> l._clause_sat(0)
False
>>> l._clause_sat(1)
True
"""
for lit in self.clauses[cls]:
if lit in self.var_settings:
return True
return False
def _is_sentinel(self, lit, cls):
"""Check if a literal is a sentinel of a given clause.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([set([2, -3]), set([1]), set([3, -3]), set([2, -2]),
... set([3, -2])], set([1, 2, 3]), set([]))
>>> l._find_model()
True
>>> l._is_sentinel(2, 3)
True
>>> l._is_sentinel(-3, 1)
False
"""
return cls in self.sentinels[lit]
def _assign_literal(self, lit):
"""Make a literal assignment.
The literal assignment must be recorded as part of the current
decision level. Additionally, if the literal is marked as a
sentinel of any clause, then a new sentinel must be chosen. If
this is not possible, then unit propagation is triggered and
another literal is added to the queue to be set in the future.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([set([2, -3]), set([1]), set([3, -3]), set([2, -2]),
... set([3, -2])], set([1, 2, 3]), set([]))
>>> l._find_model()
True
>>> l.var_settings
set([-3, -2, 1])
>>> l = SATSolver([set([2, -3]), set([1]), set([3, -3]), set([2, -2]),
... set([3, -2])], set([1, 2, 3]), set([]))
>>> l._assign_literal(-1)
>>> l._find_model()
False
>>> l.var_settings
set([-1])
"""
self.var_settings.add(lit)
self._current_level.var_settings.add(lit)
self.variable_set[abs(lit)] = True
self.heur_lit_assigned(lit)
sentinel_list = list(self.sentinels[-lit])
for cls in sentinel_list:
if not self._clause_sat(cls):
other_sentinel = None
for newlit in self.clauses[cls]:
if newlit != -lit:
if self._is_sentinel(newlit, cls):
other_sentinel = newlit
elif not self.variable_set[abs(newlit)]:
self.sentinels[-lit].remove(cls)
self.sentinels[newlit].add(cls)
other_sentinel = None
break
# Check if no sentinel update exists
if other_sentinel:
self._unit_prop_queue.append(other_sentinel)
def _undo(self):
"""
_undo the changes of the most recent decision level.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([set([2, -3]), set([1]), set([3, -3]), set([2, -2]),
... set([3, -2])], set([1, 2, 3]), set([]))
>>> l._find_model()
True
>>> level = l._current_level
>>> level.decision, level.var_settings, level.flipped
(-3, set([-3, -2]), False)
>>> l._undo()
>>> level = l._current_level
>>> level.decision, level.var_settings, level.flipped
(0, set([1]), False)
"""
# Undo the variable settings
for lit in self._current_level.var_settings:
self.var_settings.remove(lit)
self.heur_lit_unset(lit)
self.variable_set[abs(lit)] = False
# Pop the level off the stack
self.levels.pop()
#########################
# Propagation #
#########################
"""
Propagation methods should attempt to soundly simplify the boolean
theory, and return True if any simplification occurred and False
otherwise.
"""
def _simplify(self):
"""Iterate over the various forms of propagation to simplify the theory.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([set([2, -3]), set([1]), set([3, -3]), set([2, -2]),
... set([3, -2])], set([1, 2, 3]), set([]))
>>> l.variable_set
[False, False, False, False]
>>> l.sentinels
{-3: set([0, 2]), -2: set([3, 4]), 2: set([0, 3]), 3: set([2, 4])}
>>> l._simplify()
>>> l.variable_set
[False, True, False, False]
>>> l.sentinels
{-3: set([0, 2]), -2: set([3, 4]), -1: set(), 2: set([0, 3]),
...3: set([2, 4])}
"""
changed = True
while changed:
changed = False
changed |= self._unit_prop()
changed |= self._pure_literal()
def _unit_prop(self):
"""Perform unit propagation on the current theory."""
result = len(self._unit_prop_queue) > 0
while self._unit_prop_queue:
next_lit = self._unit_prop_queue.pop()
if -next_lit in self.var_settings:
self.is_unsatisfied = True
self._unit_prop_queue = []
return False
else:
self._assign_literal(next_lit)
return result
def _pure_literal(self):
"""Look for pure literals and assign them when found."""
return False
#########################
# Heuristics #
#########################
def _vsids_init(self):
"""Initialize the data structures needed for the VSIDS heuristic."""
self.lit_heap = []
self.lit_scores = {}
for var in range(1, len(self.variable_set)):
self.lit_scores[var] = float(-self.occurrence_count[var])
self.lit_scores[-var] = float(-self.occurrence_count[-var])
heappush(self.lit_heap, (self.lit_scores[var], var))
heappush(self.lit_heap, (self.lit_scores[-var], -var))
def _vsids_decay(self):
"""Decay the VSIDS scores for every literal.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([set([2, -3]), set([1]), set([3, -3]), set([2, -2]),
... set([3, -2])], set([1, 2, 3]), set([]))
>>> l.lit_scores
{-3: -2.0, -2: -2.0, -1: 0.0, 1: 0.0, 2: -2.0, 3: -2.0}
>>> l._vsids_decay()
>>> l.lit_scores
{-3: -1.0, -2: -1.0, -1: 0.0, 1: 0.0, 2: -1.0, 3: -1.0}
"""
# We divide every literal score by 2 for a decay factor
# Note: This doesn't change the heap property
for lit in self.lit_scores.keys():
self.lit_scores[lit] /= 2.0
def _vsids_calculate(self):
"""
VSIDS Heuristic Calculation
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([set([2, -3]), set([1]), set([3, -3]), set([2, -2]),
... set([3, -2])], set([1, 2, 3]), set([]))
>>> l.lit_heap
[(-2.0, -3), (-2.0, 2), (-2.0, -2), (0.0, 1), (-2.0, 3), (0.0, -1)]
>>> l._vsids_calculate()
-3
>>> l.lit_heap
[(-2.0, -2), (-2.0, 2), (0.0, -1), (0.0, 1), (-2.0, 3)]
"""
if len(self.lit_heap) == 0:
return 0
# Clean out the front of the heap as long the variables are set
while self.variable_set[abs(self.lit_heap[0][1])]:
heappop(self.lit_heap)
if len(self.lit_heap) == 0:
return 0
return heappop(self.lit_heap)[1]
def _vsids_lit_assigned(self, lit):
"""Handle the assignment of a literal for the VSIDS heuristic."""
pass
def _vsids_lit_unset(self, lit):
"""Handle the unsetting of a literal for the VSIDS heuristic.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([set([2, -3]), set([1]), set([3, -3]), set([2, -2]),
... set([3, -2])], set([1, 2, 3]), set([]))
>>> l.lit_heap
[(-2.0, -3), (-2.0, 2), (-2.0, -2), (0.0, 1), (-2.0, 3), (0.0, -1)]
>>> l._vsids_lit_unset(2)
>>> l.lit_heap
[(-2.0, -3), (-2.0, -2), (-2.0, -2), (-2.0, 2), (-2.0, 3), (0.0, -1),
...(-2.0, 2), (0.0, 1)]
"""
var = abs(lit)
heappush(self.lit_heap, (self.lit_scores[var], var))
heappush(self.lit_heap, (self.lit_scores[-var], -var))
def _vsids_clause_added(self, cls):
"""Handle the addition of a new clause for the VSIDS heuristic.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([set([2, -3]), set([1]), set([3, -3]), set([2, -2]),
... set([3, -2])], set([1, 2, 3]), set([]))
>>> l.num_learned_clauses
0
>>> l.lit_scores
{-3: -2.0, -2: -2.0, -1: 0.0, 1: 0.0, 2: -2.0, 3: -2.0}
>>> l._vsids_clause_added(set([2, -3]))
>>> l.num_learned_clauses
1
>>> l.lit_scores
{-3: -1.0, -2: -2.0, -1: 0.0, 1: 0.0, 2: -1.0, 3: -2.0}
"""
self.num_learned_clauses += 1
for lit in cls:
self.lit_scores[lit] += 1
########################
# Clause Learning #
########################
def _simple_add_learned_clause(self, cls):
"""Add a new clause to the theory.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([set([2, -3]), set([1]), set([3, -3]), set([2, -2]),
... set([3, -2])], set([1, 2, 3]), set([]))
>>> l.num_learned_clauses
0
>>> l.clauses
[[2, -3], [1], [3, -3], [2, -2], [3, -2]]
>>> l.sentinels
{-3: set([0, 2]), -2: set([3, 4]), 2: set([0, 3]), 3: set([2, 4])}
>>> l._simple_add_learned_clause([3])
>>> l.clauses
[[2, -3], [1], [3, -3], [2, -2], [3, -2], [3]]
>>> l.sentinels
{-3: set([0, 2]), -2: set([3, 4]), 2: set([0, 3]), 3: set([2, 4, 5])}
"""
cls_num = len(self.clauses)
self.clauses.append(cls)
for lit in cls:
self.occurrence_count[lit] += 1
self.sentinels[cls[0]].add(cls_num)
self.sentinels[cls[-1]].add(cls_num)
self.heur_clause_added(cls)
def _simple_compute_conflict(self):
""" Build a clause representing the fact that at least one decision made
so far is wrong.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([set([2, -3]), set([1]), set([3, -3]), set([2, -2]),
... set([3, -2])], set([1, 2, 3]), set([]))
>>> l._find_model()
True
>>> l._simple_compute_conflict()
[3]
"""
return [-(level.decision) for level in self.levels[1:]]
def _simple_clean_clauses(self):
"""Clean up learned clauses."""
pass
class Level(object):
"""
Represents a single level in the DPLL algorithm, and contains
enough information for a sound backtracking procedure.
"""
def __init__(self, decision, flipped=False):
self.decision = decision
self.var_settings = set()
self.flipped = flipped
|
{
"content_hash": "beba809bf7c2ded6935e87ed67a1bc8b",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 81,
"avg_line_length": 31.914332784184513,
"alnum_prop": 0.512543877761718,
"repo_name": "kmacinnis/sympy",
"id": "c07e89d1134bb0de86d3ff93235962ce6104484d",
"size": "19372",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sympy/logic/algorithms/dpll2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13573973"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "1284"
},
{
"name": "TeX",
"bytes": "8790"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import calendar
import datetime
import re
import sys
try:
from urllib import parse as urllib_parse
except ImportError: # Python 2
import urllib as urllib_parse
import urlparse
urllib_parse.urlparse = urlparse.urlparse
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils import six
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(urllib_parse.quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(urllib_parse.quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(urllib_parse.unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(urllib_parse.unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return urllib_parse.urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if not six.PY3 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if i < 0:
raise ValueError("Negative base36 conversion input.")
if not six.PY3:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urllib_parse.urlparse(url1), urllib_parse.urlparse(url2)
try:
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
except ValueError:
return False
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host).
Always returns ``False`` on an empty url.
"""
if not url:
return False
netloc = urllib_parse.urlparse(url)[1]
return not netloc or netloc == host
|
{
"content_hash": "acb0d92da45dc93954b6ec61b4b6413a",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 95,
"avg_line_length": 34.47540983606557,
"alnum_prop": 0.6371849738468854,
"repo_name": "eltonsantos/django",
"id": "f4911b4ec0015b2d42e4ce6c4de55ec2c410a206",
"size": "8412",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/utils/http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Very quick and dirty module to do basic GPIO stuff on /sys/class/gpio/ (to which normal users can be given access with relative safety,
/dev/mem, which RPi.GPIO uses is definitely root-only -land)"""
import os
#TODO: Steal the epoll idea from https://github.com/derekstavis/python-sysfs-gpio/blob/master/src/sysfs/gpio.py
FAKE = False
def channel_controlpath(channel):
"""Get the /sys/class/gpio/ -path for the given channel"""
return "/sys/class/gpio/gpio%d" % int(channel)
def conditional_export(channel):
"""Call export if channel is not exported"""
if os.path.isdir(channel_controlpath(channel)):
return True
return export(channel)
def export(channel):
"""Exports the given channel to /sys/class/gpio/"""
if FAKE:
print("export(%s)" % channel)
return False
with open("/sys/class/gpio/export", 'w') as f:
f.write(str(channel))
return True
def set_direction(channel, direction):
"""Set direction ('in' or 'out') of the channel"""
conditional_export(channel)
path = "%s/direction" % channel_controlpath(channel)
if FAKE:
print("set_direction(%s, %s)" % (channel, direction))
return False
with open(path, 'w') as f:
f.write(direction)
return True
def set_value(channel, value):
"""Set value for the channel (forces it to 'out' direction first)"""
set_direction(channel, 'out')
path = "%s/value" % channel_controlpath(channel)
#print("path=%s value=%s" % (path, value))
if FAKE:
print("set_value(%s, %s)" % (channel, value))
return False
with open(path, 'w') as f:
f.write(str(value))
return True
def read_value(channel):
"""UNTESTED: Reads the value from given channel (forces it to 'in' direction first)"""
set_direction(channel, 'in')
path = "%s/value" % channel_controlpath(channel)
if FAKE:
print("read_value(%s)" % channel)
return False
with open(path, 'r') as f:
ret = f.readline()
return ret
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print "Usage (for output): usergpio.py channel state"
sys.exit(1)
set_value(int(sys.argv[1]), int(sys.argv[2]))
|
{
"content_hash": "670ba3abe03505b280c2057d0f78774e",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 139,
"avg_line_length": 32.31884057971015,
"alnum_prop": 0.6322869955156951,
"repo_name": "jautero/nfc_lock",
"id": "6b8e9cc6be7be6dcc46ee7eb026086f428695ddf",
"size": "2276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/usergpio.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "568370"
},
{
"name": "C++",
"bytes": "240651"
},
{
"name": "Eagle",
"bytes": "310741"
},
{
"name": "Makefile",
"bytes": "6036"
},
{
"name": "Python",
"bytes": "18555"
},
{
"name": "Shell",
"bytes": "854"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from dateutil.parser import parse
import inspect
import itertools
import json
import pytz
import re
from requests.exceptions import HTTPError
import six
import sys
from onecodex.exceptions import (
MethodNotSupported,
OneCodexException,
PermissionDenied,
ServerError,
)
from onecodex.models.helpers import (
check_bind,
generate_potion_sort_clause,
generate_potion_keyword_where,
)
from onecodex.vendored.potion_client.converter import PotionJSONEncoder
from onecodex.vendored.potion_client.resource import Resource
DEFAULT_PAGE_SIZE = 200
class ResourceList(object):
"""Wrapper around lists of onecodex-wrapped potion objects.
Parameters
----------
_resource : `list`
A list of potion objects, which are generally stored in `OneCodexBase._resource`.
oc_model : `OneCodexBase`
A class which inherits from `OneCodexBase`, for example, `models.Tags`.
Notes
-----
In OneCodexBase, when attributes are lists (e.g., `Samples.tags`), actions performed on the
returned lists are not passed through to the underlying potion object's list. This class passes
those actions through, and will generally act like a list.
See https://github.com/onecodex/onecodex/issues/40
"""
def _update(self):
self._res_list = [self._oc_model(x) for x in self._resource]
def _check_valid_resource(self, other, check_for_dupes=True):
try:
other = iter(other)
except TypeError:
other = [other]
other_ids = []
for o in other:
if not isinstance(o, self._oc_model):
raise ValueError(
"Expected object of type '{}', got '{}'".format(
self._oc_model.__name__, type(o).__name__
)
)
other_ids.append(o.id)
if check_for_dupes:
# duplicates are not allowed
self_ids = [s.id for s in self._resource]
if len(set(self_ids + other_ids)) != len(self_ids + other_ids):
raise OneCodexException(
"{} cannot contain duplicate objects".format(self.__class__.__name__)
)
def __init__(self, _resource, oc_model, **kwargs):
if not issubclass(oc_model, OneCodexBase):
raise ValueError(
"Expected object of type '{}', got '{}'".format(
OneCodexBase.__name__, oc_model.__name__
)
)
# turn potion Resource objects into OneCodex objects
self._resource = _resource
self._oc_model = oc_model
self._kwargs = kwargs
self._update()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
# two ResourceLists are equal if they refer to the same underlying Resource
return id(self._resource) == id(other._resource)
def __contains__(self, other):
return other.__hash__() in [x.__hash__() for x in self._res_list]
@property
def __repr__(self):
return self._res_list.__repr__
@property
def __len__(self):
return self._res_list.__len__
def __getitem__(self, x):
wrapped = self._res_list[x]
if isinstance(wrapped, list):
return self.__class__(self._resource[x], self._oc_model, **self._kwargs)
else:
return wrapped
def __setitem__(self, k, v):
self._check_valid_resource(v)
self._resource[k] = v._resource
self._update()
def __delitem__(self, x):
del self._resource[x]
self._update()
@property
def __iter__(self):
return self._res_list.__iter__
@property
def __reversed__(self):
return self._res_list.__reversed__
def __add__(self, other):
if not isinstance(other, self.__class__):
raise TypeError(
'can only concatenate {} (not "{}") to {}'.format(
self.__class__.__name__, type(other), self.__class__.__name__
)
)
new_obj = self.copy()
new_obj.extend(other._res_list)
return new_obj
def append(self, x):
self._check_valid_resource(x)
self._resource.append(x._resource)
self._update()
def clear(self):
self._resource.clear()
self._res_list.clear()
def copy(self):
new_obj = self.__class__(self._resource[:], self._oc_model, **self._kwargs)
return new_obj
def count(self, x):
# assume that ResourceList objects are identical if they share the same underlying resource
self._check_valid_resource(x, check_for_dupes=False)
n = 0
for res_obj in self._resource:
if res_obj == x._resource:
n += 1
return n
def extend(self, iterable):
self._check_valid_resource(iterable)
self._resource.extend([x._resource for x in iterable])
self._update()
def index(self, x):
# assume that ResourceList objects are identical if they share the same underlying resource
self._check_valid_resource(x, check_for_dupes=False)
for res_obj_idx, res_obj in enumerate(self._resource):
if res_obj == x._resource:
return res_obj_idx
raise ValueError("{} is not in list".format(x))
def insert(self, idx, x):
self._check_valid_resource(x)
self._resource.insert(idx, x._resource)
self._update()
def pop(self):
self._resource.pop()
return self._res_list.pop()
def remove(self, x):
del self._resource[self.index(x)]
self._update()
class OneCodexBase(object):
"""Parent of all the One Codex objects that wraps the Potion-Client API."""
def __init__(self, _resource=None, **kwargs):
# FIXME: allow setting properties via kwargs?
# FIXME: get a resource from somewhere instead of setting to None (lots of stuff assumes
# non-None) if we have a class.resource?
if _resource is not None:
if not isinstance(_resource, Resource):
raise TypeError("Use the .get() method to fetch an individual resource.")
self._resource = _resource
elif hasattr(self.__class__, "_resource"):
for key, val in kwargs.items():
# This modifies kwargs in place to be the underlying
# Potion-Client resource
if isinstance(val, OneCodexBase):
kwargs[key] = val._resource
self._resource = self.__class__._resource(**kwargs)
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.id)
def _repr_html_(self):
return self._resource._repr_html_()
def __dir__(self):
# this only gets called on instances, so we're okay to add all the properties because
# this won't appear when you call, e.g. dir(ocx.Samples)
fields = [
str(f) if f != "$uri" else "id" for f in self.__class__._resource._schema["properties"]
]
# this might be a little too clever, but we mask out class methods/fxns from the instances
base_object_names = []
for name, obj in inspect.getmembers(self.__class__):
if inspect.isfunction(obj): # .save() and .delete() are functions in Py3
base_object_names.append(name)
if inspect.ismethod(obj) and obj.__self__ is not self.__class__:
base_object_names.append(name)
return fields + base_object_names
def __getattr__(self, key):
if hasattr(self, "_resource") and hasattr(self.__class__, "_resource"):
schema_key = key if key != "id" else "$uri"
schema = self.__class__._resource._schema["properties"].get(schema_key)
if schema is not None:
value = getattr(self._resource, key)
if isinstance(value, Resource):
# convert potion resources into wrapped ones
resource_path = value._uri.rsplit("/", 1)[0]
return _model_lookup[resource_path](_resource=value)
elif isinstance(value, list):
if schema["items"]["type"] == "object":
# convert lists of potion resources into wrapped ones
compiled_re = re.compile(schema["items"]["properties"]["$ref"]["pattern"])
# if the list we're returning is empty, we can't just infer what type of
# object belongs in this list from its contents. to account for this, we'll
# instead try to match the object's URI to those in our lookup table
for route, obj in _model_lookup.items():
if compiled_re.match("{}/dummy_lookup".format(route)):
return ResourceList(value, obj)
raise OneCodexException(
"No object found for {}".format(compiled_re.pattern)
)
else:
# otherwise, just return a regular list
return value
else:
if key == "id":
# undo the bad coercion from potion_client/resource.py#L111
if value is None:
return None
else:
return str(value)
if schema.get("format") == "date-time" and value is not None:
datetime_value = parse(value)
if datetime_value.tzinfo is None:
return pytz.utc.localize(datetime_value)
else:
return datetime_value.astimezone(pytz.utc)
return value
elif key == "id" or key in self.__class__._resource._schema["properties"]:
# make fields appear blank if there's no _resource bound to me
return None
raise AttributeError(
"'{}' object has no attribute '{}'".format(self.__class__.__name__, key)
)
def __setattr__(self, key, value):
if key.startswith("_"): # Allow directly setting _attributes, incl. _resource
# these are any fields that have to be settable normally
super(OneCodexBase, self).__setattr__(key, value)
return
elif key == "id":
raise AttributeError("can't set attribute")
elif isinstance(value, OneCodexBase) or isinstance(value, ResourceList):
self._resource[key] = value._resource
return
elif isinstance(value, (list, tuple)):
# convert any fancy items into their underlying resources
new_value = []
for v in value:
new_value.append(v._resource if isinstance(v, OneCodexBase) else v)
# coerce back to the value passed in
self._resource[key] = type(value)(new_value)
return
elif hasattr(self, "_resource") and hasattr(self.__class__, "_resource"):
schema = self.__class__._resource._schema["properties"].get(key)
if schema is not None:
# do some type checking against the schema
if not self.__class__._has_schema_method("update"):
raise MethodNotSupported(
"{} do not support editing.".format(self.__class__.__name__)
)
if schema.get("readOnly", False):
raise MethodNotSupported("{} is a read-only field".format(key))
if schema.get("format") == "date-time":
if isinstance(value, datetime):
if value.tzinfo is None:
value = value.isoformat() + "Z"
else:
value = value.isoformat()
# changes on this model also change the potion resource
self._resource[key] = value
return
raise AttributeError(
"'{}' object has no attribute '{}'".format(self.__class__.__name__, key)
)
def __delattr__(self, key):
if not self.__class__._has_schema_method("update"):
raise MethodNotSupported("{} do not support editing.".format(self.__class__.__name__))
if hasattr(self, "_resource") and key in self._resource.keys():
# changes on this model also change the potion resource
del self._resource[key]
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
# TODO: We should potentially check that both resources are up-to-date
return self._resource._uri == other._resource._uri
def _to_json(self, include_references=True):
"""Convert model to JSON using the PotionJSONEncode.
Automatically resolves the resource as needed (`_properties` call handles this).
"""
if include_references:
return json.dumps(self._resource._properties, cls=PotionJSONEncoder)
else:
return json.dumps(
{
k: v
for k, v in self._resource._properties.items()
if not isinstance(v, Resource) and not k.startswith("$")
},
cls=PotionJSONEncoder,
)
@classmethod
def _convert_id_to_uri(cls, uuid):
base_uri = cls._resource._schema["_base_uri"]
if not uuid.startswith(base_uri):
uuid = "{}/{}".format(base_uri, uuid)
return uuid
@classmethod
def _has_schema_method(cls, method_name):
# potion-client is too stupid to check the schema before allowing certain operations
# so we manually check it before allowing some instance methods
# FIXME: this doesn't actually work though, because potion creates these routes for all
# items :/
method_links = cls._resource._schema["links"]
return any(True for link in method_links if link["rel"] == method_name)
@classmethod
def all(cls, sort=None, limit=None):
"""Return all objects of this type. Alias for where() (without filter arguments).
See `where` for documentation on the `sort` and `limit` parameters.
"""
return cls.where(sort=sort, limit=limit)
@classmethod
def where(cls, *filters, **keyword_filters):
"""Retrieve objects (Samples, Classifications, etc.) from the One Codex server.
Parameters
----------
filters : `object`
Advanced filters to use (not implemented)
sort : `str` or `list`, optional
Sort the results by this field (or list of fields). By default in descending order,
but if any of the fields start with the special character ^, sort in ascending order.
For example, sort=['size', '^filename'] will sort by size from largest to smallest and
filename from A-Z for items with the same size.
limit : `int`, optional
Number of records to return. For smaller searches, this can reduce the number of
network requests made.
keyword_filters : `str` or `object`
Filter the results by specific keywords (or filter objects, in advanced usage)
Examples
--------
You can filter objects that are returned locally using a lambda function:
# returns only samples with a filename ending in '.gz'
my_samples = Samples.where(filter=lambda s: s.filename.endswith('.gz'))
Returns
-------
`list`
A list of all objects matching these filters. If no filters are passed, this
matches all objects.
"""
check_bind(cls)
# do this here to avoid passing this on to potion
filter_func = keyword_filters.pop("filter", None)
public = False
if any(x["rel"] == "instances_public" for x in cls._resource._schema["links"]):
public = keyword_filters.pop("public", False)
instances_route = keyword_filters.pop(
"_instances", "instances" if not public else "instances_public"
)
schema = next(
link for link in cls._resource._schema["links"] if link["rel"] == instances_route
)
sort_schema = schema["schema"]["properties"]["sort"]["properties"]
where_schema = schema["schema"]["properties"]["where"]["properties"]
sort = generate_potion_sort_clause(keyword_filters.pop("sort", None), sort_schema)
limit = keyword_filters.pop("limit", None if not public else 1000)
where = {}
# we're filtering by fancy objects (like SQLAlchemy's filter)
if len(filters) > 0:
if len(filters) == 1 and isinstance(filters[0], dict):
where = filters[0]
elif all(isinstance(f, six.string_types) for f in filters):
# if it's a list of strings, treat it as an multiple "get" request
where = {"$uri": {"$in": [cls._convert_id_to_uri(f) for f in filters]}}
else:
# we're doing some more advanced filtering
raise NotImplementedError("Advanced filtering hasn't been implemented yet")
# we're filtering by keyword arguments (like SQLAlchemy's filter_by)
if len(keyword_filters) > 0:
for k, v in generate_potion_keyword_where(keyword_filters, where_schema, cls).items():
if k in where:
raise AttributeError("Multiple definitions for same field {}".format(k))
where[k] = v
# the potion-client method returns an iterator (which lazily fetchs the records
# using `per_page` instances per request) so for limiting we only want to fetch the first
# n (and not instantiate all the available which is what would happen if we just sliced)
cursor = getattr(cls._resource, instances_route)(
where=where, sort=sort, per_page=DEFAULT_PAGE_SIZE
)
if limit is not None:
cursor = itertools.islice(cursor, limit)
# finally, apply local filtering function on objects before returning
wrapped = [cls(_resource=r) for r in cursor]
if filter_func:
if callable(filter_func):
wrapped = [obj for obj in wrapped if filter_func(obj) is True]
else:
raise OneCodexException(
"Expected callable for filter, got: {}".format(type(filter_func).__name__)
)
return wrapped
@classmethod
def get(cls, uuid):
"""Retrieve one specific object from the server by its UUID (unique 16-character id).
UUIDs are found in the web browser's address bar while viewing analyses and other objects.
Parameters
----------
uuid : `str`
UUID of the object to retrieve.
Returns
-------
`OneCodexBase` or `None`
The object with that UUID or None if no object could be found.
Examples
--------
>>> api.Samples.get('xxxxxxxxxxxxxxxx')
<Sample xxxxxxxxxxxxxxxx>
"""
check_bind(cls)
# we're just retrieving one object from its uuid
try:
resource = cls._resource.fetch(uuid)
if isinstance(resource, list):
# TODO: Investigate why potion .fetch()
# method is occassionally returning a list here...
if len(resource) == 1:
resource = resource[0]
else:
raise TypeError("Potion-Client error in fetching resource")
except HTTPError as e:
# 404 error means this doesn't exist
if e.response.status_code == 404:
return None
else:
raise e
return cls(_resource=resource)
def delete(self):
"""Delete this object from the One Codex server."""
check_bind(self)
if self.id is None:
raise ServerError("{} object does not exist yet".format(self.__class__.name))
elif not self.__class__._has_schema_method("destroy"):
raise MethodNotSupported("{} do not support deletion.".format(self.__class__.__name__))
try:
self._resource.delete()
except HTTPError as e:
if e.response.status_code == 403:
raise PermissionDenied("") # FIXME: is this right?
else:
raise e
def save(self):
"""Either create or persist changes on this object back to the One Codex server."""
check_bind(self)
creating = self.id is None
if creating and not self.__class__._has_schema_method("create"):
raise MethodNotSupported("{} do not support creating.".format(self.__class__.__name__))
if not creating and not self.__class__._has_schema_method("update"):
raise MethodNotSupported("{} do not support updating.".format(self.__class__.__name__))
try:
self._resource.save()
except HTTPError as e:
if e.response.status_code == 400:
err_json = e.response.json().get("errors", [])
msg = pretty_print_error(err_json)
raise ServerError(msg)
elif e.response.status_code == 404:
action = "creating" if creating else "updating"
raise MethodNotSupported(
"{} do not support {}.".format(self.__class__.__name__, action)
)
elif e.response.status_code == 409:
raise ServerError("This {} object already exists".format(self.__class__.__name__))
else:
raise e
from onecodex.models.analysis import ( # noqa
Analyses,
Classifications,
Alignments,
Panels,
)
from onecodex.models.collection import SampleCollection # noqa
from onecodex.models.misc import Jobs, Projects, Tags, Users, Documents # noqa
from onecodex.models.sample import Samples, Metadata # noqa
__all__ = [
"Alignments",
"Classifications",
"Documents",
"Jobs",
"Metadata",
"Panels",
"Projects",
"Samples",
"SampleCollection",
"Tags",
"Users",
]
# import and expose experimental models
from onecodex.models.experimental import ( # noqa
AnnotationSets,
Assemblies,
FunctionalProfiles,
Genomes,
Taxa,
)
__all__.extend(["AnnotationSets", "Assemblies", "FunctionalProfiles", "Genomes", "Taxa"])
def pretty_print_error(err_json):
"""Pretty print Flask-Potion error messages for the user."""
# Special case validation errors
if len(err_json) == 1 and "validationOf" in err_json[0]:
required_fields = ", ".join(err_json[0]["validationOf"]["required"])
return "Validation error. Requires properties: {}.".format(required_fields)
# General error handling
msg = "; ".join(err.get("message", "") for err in err_json)
# Fallback
if not msg:
msg = "Bad request."
return msg
# go through all the models and generate a lookup table (to use in binding in the API and elsewhere)
def is_oc_class(cls):
return inspect.isclass(cls) and issubclass(cls, OneCodexBase)
_model_lookup = {}
for name, obj in inspect.getmembers(sys.modules[__name__], is_oc_class):
if hasattr(obj, "_resource_path"):
_model_lookup[obj._resource_path] = obj
|
{
"content_hash": "ba655950bdd78376119791dc0acdee2a",
"timestamp": "",
"source": "github",
"line_count": 631,
"max_line_length": 100,
"avg_line_length": 37.729001584786054,
"alnum_prop": 0.567522157348679,
"repo_name": "onecodex/onecodex",
"id": "da7288e0837f920e0289d88260906812aeec6e06",
"size": "23807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onecodex/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5609"
},
{
"name": "Jupyter Notebook",
"bytes": "9580"
},
{
"name": "Makefile",
"bytes": "597"
},
{
"name": "Python",
"bytes": "516832"
},
{
"name": "SCSS",
"bytes": "4745"
},
{
"name": "Shell",
"bytes": "114"
},
{
"name": "Smarty",
"bytes": "587"
}
],
"symlink_target": ""
}
|
import os
import unittest
import mock
import pyfakefs.fake_filesystem_unittest as fake_filesystem_unittest
from dem.project.environment import EnvironmentBuilder
from dem.project.reader import Config
class TestEnvironmentBuilder(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
self._config = mock.MagicMock(spec=Config)
self._config.has_http_proxy.return_value = False
@mock.patch('virtualenv.create_environment')
@mock.patch('subprocess.call')
def test_will_create_devenv_dir(self, mock_virtualenv, mock_subpprocess):
EnvironmentBuilder.build('', self._config)
self.assertTrue(os.path.exists('.devenv'))
@mock.patch('virtualenv.create_environment')
@mock.patch('subprocess.call')
def test_will_create_project_dir(self, mock_virtualenv, mock_subpprocess):
EnvironmentBuilder.build('project', self._config)
self.assertTrue(os.path.exists(os.path.join('.devenv', 'project')))
@mock.patch('virtualenv.create_environment')
@mock.patch('subprocess.call')
def test_will_create_dependencies_dir(self, mock_virtualenv, mock_subpprocess):
EnvironmentBuilder.build('project', self._config)
self.assertTrue(os.path.exists(os.path.join('.devenv', 'project', 'dependencies')))
@mock.patch('subprocess.call')
@mock.patch('virtualenv.create_environment')
def test_will_create_virtualenv_in_devenv_dir(self, mock_virtualenv, mock_subpprocess):
EnvironmentBuilder.build('project', self._config)
mock_virtualenv.assert_called_once_with(os.path.join(os.getcwd(), '.devenv', 'project'))
@mock.patch('subprocess.call')
@mock.patch('virtualenv.create_environment')
def test_will_create_downloads_dir(self, mock_virtualenv, mock_subpprocess):
EnvironmentBuilder.build('project', self._config)
self.assertTrue(os.path.exists(os.path.join('.devenv', 'project', 'downloads')))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "341554f33375a0057dd27247f7f0a443",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 96,
"avg_line_length": 38.57692307692308,
"alnum_prop": 0.7088733798604188,
"repo_name": "nitehawck/dem",
"id": "35a3b89f68f46f4f26da5b3061f071351f2d66c7",
"size": "2006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/TestEnvironmentBuilder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13701"
}
],
"symlink_target": ""
}
|
from setuptools import setup
__author__ = 'leandroloi'
__license__ = "GPL"
__version__ = "0.0.1"
__maintainer__ = "Leandro Loi"
__email__ = "leandroloi at gmail dot com"
setup(
name='bov-eod-scrapper',
version='1',
author='Leandro Loi',
description=('A End Of Day scrapper and API for Bovespa'),
license='Open Source'
)
|
{
"content_hash": "d1454dba21ec485f91376ea727d7f129",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 62,
"avg_line_length": 21.375,
"alnum_prop": 0.6345029239766082,
"repo_name": "leandroloi/bovespa-eod",
"id": "7413b27ff33090033ff09d35d4292fc5b30e27f9",
"size": "366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "53345"
}
],
"symlink_target": ""
}
|
import tkinter
import subprocess
import signal
import shlex
import threading
from subprocess import Popen
def mouse_only_ui(cmd_arg):
cmd = (shlex.split(cmd_arg)
if isinstance(cmd_arg, (str,))
else cmd_arg)
if not isinstance(cmd, (list,)):
raise ValueError()
#
app_state = {}
def stdio_reader(file_obj_name):
if file_obj_name not in ['stdout', 'stderr']:
raise ValueError()
proc = app_state['proc']
file_obj = getattr(proc, file_obj_name)
for line in iter((file_obj.readline
if file_obj else lambda: b""), b""):
app_state[file_obj_name].append(
line.decode("utf-8"))
def on_go():
if 'proc' in app_state and app_state['proc'].poll() is None:
return
proc = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
app_state['proc'] = proc
# workaround "pipe buffer deadlock"
# also consider
# * .communicate() with a future (?)
# * passing a file object to stdout and stderr
app_state['stdout'] = []
app_state['stderr'] = []
app_state['reader_threads'] = [
threading.Thread(
target=stdio_reader,
args=('stdout',),
),
threading.Thread(
target=stdio_reader,
args=('stderr',),
),
]
for t in app_state['reader_threads']:
t.start()
def on_stop():
if 'proc' not in app_state:
return
proc = app_state['proc']
#
for sig in [None, signal.SIGINT,
signal.SIGKILL,
signal.SIGTERM,]:
if sig is not None:
proc.send_signal(sig)
if sig is None:
if proc.poll() is None:
continue
else:
break
try:
if sig is not None:
proc.wait(timeout=3)
except subprocess.TimeoutExpired as ex:
continue
break
else:
raise RuntimeError("process didn't exit")
#
for t in app_state['reader_threads']:
t.join()
if proc.poll() != 0:
print("error exit")
print("stdout")
print(('\n'.join(app_state['stdout'])
if app_state['stdout']
else "<None>"))
print("stderr")
print(('\n'.join(app_state['stderr'])
if app_state['stderr']
else "<None>"))
del app_state['proc']
del app_state['reader_threads']
del app_state['stdout']
del app_state['stderr']
###
# ui
root = tkinter.Tk()
left_frame = tkinter.Frame(
root,
# relief="raised",
# color="#200",
)
left_frame["bg"] = "purple"
right_frame = tkinter.Frame(
root,
)
right_frame["bg"] = "pink"
left_frame.pack(
side="left",
fill="both",
anchor="center",
expand=True,
)
right_frame.pack(
side="right",
fill="y",
)
##
# https://stackoverflow.com/questions/42579927/rounded-button-tkinter-python
# suggests using canvas.
# elsewhere suggested to use image/bitmap
class RoundButton(tkinter.Button, tkinter.Canvas):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# self.create_line(0, 0, 10, 10)
self.create_line(*self.bbox("all"))
#
def _configure(self, *args, **kwargs):
rv = super()._configure(*args, **kwargs)
breakpoint()
return rv
##
go_btn = tkinter.Button(
left_frame,
text="Go!",
command=on_go,
relief="raised",
)
go_btn["bg"] = "#082"
stop_btn = tkinter.Button(
right_frame,
text="stop",
command=on_stop,
)
stop_btn["bg"] = "#086"
go_btn.pack(
side="bottom",
# expand=True,
fill="both",
)
stop_btn.pack(
side="top",
)
root.mainloop()
|
{
"content_hash": "d6e7a2d1f187d482aa058b0f9066ac26",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 80,
"avg_line_length": 29.253333333333334,
"alnum_prop": 0.46604375569735645,
"repo_name": "ransomw/dotfiles",
"id": "74203e40029ec6ec8a0231029a3ac772ec5360e6",
"size": "4388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyutils/tv/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "449"
},
{
"name": "Emacs Lisp",
"bytes": "20303"
},
{
"name": "Go",
"bytes": "46689"
},
{
"name": "HTML",
"bytes": "962"
},
{
"name": "Haskell",
"bytes": "3579"
},
{
"name": "Lua",
"bytes": "711"
},
{
"name": "Python",
"bytes": "187437"
},
{
"name": "Shell",
"bytes": "32973"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class UnmanagedFileSystemReference(Model):
"""Details of the file system to mount on the compute cluster nodes.
All required parameters must be populated in order to send to Azure.
:param mount_command: Required. Command used to mount the unmanaged file
system.
:type mount_command: str
:param relative_mount_path: Required. Specifies the relative path on the
compute cluster node where the file system will be mounted. Note that all
cluster level unmanaged file system will be mounted under
$AZ_BATCHAI_MOUNT_ROOT location and job level unmanaged file system will
be mounted under $AZ_BATCHAI_JOB_MOUNT_ROOT.
:type relative_mount_path: str
"""
_validation = {
'mount_command': {'required': True},
'relative_mount_path': {'required': True},
}
_attribute_map = {
'mount_command': {'key': 'mountCommand', 'type': 'str'},
'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'},
}
def __init__(self, **kwargs):
super(UnmanagedFileSystemReference, self).__init__(**kwargs)
self.mount_command = kwargs.get('mount_command', None)
self.relative_mount_path = kwargs.get('relative_mount_path', None)
|
{
"content_hash": "dcf07d7565d156f69a623c3013b400a3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 38.696969696969695,
"alnum_prop": 0.6742364917776038,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "5f1e465c1d1fb10bac92d92190616bf5dc3c499a",
"size": "1751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-batchai/azure/mgmt/batchai/models/unmanaged_file_system_reference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
""" Sample command-line program for listing Google Dataproc Clusters"""
import argparse
import os
from apiclient import discovery
from google.cloud import storage
from oauth2client.client import GoogleCredentials
# Currently only the "global" region is supported
REGION = 'global'
DEFAULT_FILENAME = 'pyspark_sort.py'
def get_default_pyspark_file():
"""Gets the PySpark file from this directory"""
current_dir = os.path.dirname(os.path.abspath(__file__))
f = open(os.path.join(current_dir, DEFAULT_FILENAME), 'r')
return f, DEFAULT_FILENAME
def get_pyspark_file(filename):
f = open(filename, 'r')
return f, os.path.basename(filename)
def upload_pyspark_file(project_id, bucket_name, filename, file):
"""Uploads the PySpark file in this directory to the configured
input bucket."""
print('Uploading pyspark file to GCS')
client = storage.Client(project=project_id)
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(filename)
blob.upload_from_file(file)
def download_output(project_id, cluster_id, output_bucket, job_id):
"""Downloads the output file from Cloud Storage and returns it as a
string."""
print('Downloading output file')
client = storage.Client(project=project_id)
bucket = client.get_bucket(output_bucket)
output_blob = (
'google-cloud-dataproc-metainfo/{}/jobs/{}/driveroutput.000000000'
.format(cluster_id, job_id))
return bucket.blob(output_blob).download_as_string()
# [START create_cluster]
def create_cluster(dataproc, project, cluster_name, zone):
print('Creating cluster.')
zone_uri = \
'https://www.googleapis.com/compute/v1/projects/{}/zones/{}'.format(
project, zone)
cluster_data = {
'projectId': project,
'clusterName': cluster_name,
'config': {
'gceClusterConfig': {
'zoneUri': zone_uri
}
}
}
result = dataproc.projects().regions().clusters().create(
projectId=project,
region=REGION,
body=cluster_data).execute()
return result
# [END create_cluster]
def wait_for_cluster_creation(dataproc, project_id, cluster_name, zone):
print('Waiting for cluster creation')
while True:
result = dataproc.projects().regions().clusters().list(
projectId=project_id,
region=REGION).execute()
cluster_list = result['clusters']
cluster = [c
for c in cluster_list
if c['clusterName'] == cluster_name][0]
if cluster['status']['state'] == 'ERROR':
raise Exception(result['status']['details'])
if cluster['status']['state'] == 'RUNNING':
print("Cluster created.")
break
# [START list_clusters_with_detail]
def list_clusters_with_details(dataproc, project):
result = dataproc.projects().regions().clusters().list(
projectId=project,
region=REGION).execute()
cluster_list = result['clusters']
for cluster in cluster_list:
print("{} - {}"
.format(cluster['clusterName'], cluster['status']['state']))
return result
# [END list_clusters_with_detail]
def get_cluster_id_by_name(cluster_list, cluster_name):
"""Helper function to retrieve the ID and output bucket of a cluster by
name."""
cluster = [c for c in cluster_list if c['clusterName'] == cluster_name][0]
return cluster['clusterUuid'], cluster['config']['configBucket']
# [START submit_pyspark_job]
def submit_pyspark_job(dataproc, project, cluster_name, bucket_name, filename):
"""Submits the Pyspark job to the cluster, assuming `filename` has
already been uploaded to `bucket_name`"""
job_details = {
'projectId': project,
'job': {
'placement': {
'clusterName': cluster_name
},
'pysparkJob': {
'mainPythonFileUri': 'gs://{}/{}'.format(bucket_name, filename)
}
}
}
result = dataproc.projects().regions().jobs().submit(
projectId=project,
region=REGION,
body=job_details).execute()
job_id = result['reference']['jobId']
print('Submitted job ID {}'.format(job_id))
return job_id
# [END submit_pyspark_job]
# [START delete]
def delete_cluster(dataproc, project, cluster):
print('Tearing down cluster')
result = dataproc.projects().regions().clusters().delete(
projectId=project,
region=REGION,
clusterName=cluster).execute()
return result
# [END delete]
# [START wait]
def wait_for_job(dataproc, project, job_id):
print('Waiting for job to finish...')
while True:
result = dataproc.projects().regions().jobs().get(
projectId=project,
region=REGION,
jobId=job_id).execute()
# Handle exceptions
if result['status']['state'] == 'ERROR':
raise Exception(result['status']['details'])
elif result['status']['state'] == 'DONE':
print('Job finished')
return result
# [END wait]
# [START get_client]
def get_client():
"""Builds an http client authenticated with the service account
credentials."""
credentials = GoogleCredentials.get_application_default()
dataproc = discovery.build('dataproc', 'v1', credentials=credentials)
return dataproc
# [END get_client]
def main(project_id, zone, cluster_name, bucket_name, pyspark_file=None):
dataproc = get_client()
try:
if pyspark_file:
spark_file, spark_filename = get_pyspark_file(pyspark_file)
else:
spark_file, spark_filename = get_default_pyspark_file()
create_cluster(dataproc, project_id, cluster_name, zone)
wait_for_cluster_creation(dataproc, project_id, cluster_name, zone)
upload_pyspark_file(project_id, bucket_name,
spark_filename, spark_file)
cluster_list = list_clusters_with_details(
dataproc, project_id)['clusters']
(cluster_id, output_bucket) = (
get_cluster_id_by_name(cluster_list, cluster_name))
# [START call_submit_pyspark_job]
job_id = submit_pyspark_job(
dataproc, project_id, cluster_name, bucket_name, spark_filename)
# [END call_submit_pyspark_job]
wait_for_job(dataproc, project_id, job_id)
output = download_output(project_id, cluster_id, output_bucket, job_id)
print('Received job output {}'.format(output))
return output
finally:
delete_cluster(dataproc, project_id, cluster_name)
spark_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--project_id', help='Project ID you want to access.', required=True),
parser.add_argument(
'--zone', help='Region to create clusters in', required=True)
parser.add_argument(
'--cluster_name', help='Name of the cluster to create', required=True)
parser.add_argument(
'--gcs_bucket', help='Bucket to upload Pyspark file to', required=True)
parser.add_argument(
'--pyspark_file', help='Pyspark filename. Defaults to pyspark_sort.py')
args = parser.parse_args()
main(
args.project_id, args.zone,
args.cluster_name, args.gcs_bucket, args.pyspark_file)
|
{
"content_hash": "7ae79b3b0b51cb6c8f907a3f0561b5fc",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 79,
"avg_line_length": 33.591928251121075,
"alnum_prop": 0.6290214924576158,
"repo_name": "clarko1/Cramd",
"id": "426a73e5c78224ee6d81e31dff258f4a3daedac0",
"size": "8054",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dataproc/create_cluster_and_submit_job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2924"
},
{
"name": "HTML",
"bytes": "23592"
},
{
"name": "JavaScript",
"bytes": "11222"
},
{
"name": "Makefile",
"bytes": "881"
},
{
"name": "Protocol Buffer",
"bytes": "8810"
},
{
"name": "Python",
"bytes": "1055640"
},
{
"name": "Shell",
"bytes": "8344"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Phoenix Pipeline'
copyright = '2014, Open Event Data Alliance'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.1'
# The full version, including alpha/beta/rc tags.
release = '.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PhoenixPipelinedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PhoenixPipeline.tex', 'Phoenix Pipeline Documentation',
'Open Event Data Alliance', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phoenixpipeline', 'Phoenix Pipeline Documentation',
['Open Event Data Alliance'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PhoenixPipeline', 'Phoenix Pipeline Documentation',
'Open Event Data Alliance', 'PhoenixPipeline', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "c408581098fba5261bd08c897848c7a8",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 85,
"avg_line_length": 31.846774193548388,
"alnum_prop": 0.7065079767029627,
"repo_name": "openeventdata/phoenix_pipeline",
"id": "32e53bb81a1f6e0f89c4fc3aaa8633dabd1b0c99",
"size": "8327",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108362"
}
],
"symlink_target": ""
}
|
from marshmallow import Schema, fields, validate
from app.basemodels import db, CRUD
class {Resources}(db.Model, CRUD):
id = db.Column(db.Integer, primary_key=True)
{db_rows}
def __init__(self, {init_args}):
{init_self_vars}
class {Resources}Schema(Schema):
not_blank = validate.Length(min=1, error='Field cannot be blank')
{schema}
class Meta:
fields = ('id', {meta})
|
{
"content_hash": "d7d4a433f0d1ae32620c4b1b94cd64e5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 69,
"avg_line_length": 22.894736842105264,
"alnum_prop": 0.6137931034482759,
"repo_name": "Leo-G/T",
"id": "9676b3eb92fc4c72fa09d9f6a70b53381f13dde1",
"size": "435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scaffold/app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14088"
},
{
"name": "HTML",
"bytes": "24547"
},
{
"name": "Python",
"bytes": "56930"
},
{
"name": "Shell",
"bytes": "119"
}
],
"symlink_target": ""
}
|
import os
from pathlib import Path
from pytorch_lightning.callbacks import ModelCheckpoint
from deepspeech_pytorch.configs.lightning_config import ModelCheckpointConf
class CheckpointHandler(ModelCheckpoint):
def __init__(self, cfg: ModelCheckpointConf):
super().__init__(
dirpath=cfg.dirpath,
filename=cfg.filename,
monitor=cfg.monitor,
verbose=cfg.verbose,
save_last=cfg.save_last,
save_top_k=cfg.save_top_k,
save_weights_only=cfg.save_weights_only,
mode=cfg.mode,
auto_insert_metric_name=cfg.auto_insert_metric_name,
every_n_train_steps=cfg.every_n_train_steps,
train_time_interval=cfg.train_time_interval,
every_n_epochs=cfg.every_n_epochs,
save_on_train_epoch_end=cfg.save_on_train_epoch_end,
)
def find_latest_checkpoint(self):
raise NotImplementedError
class FileCheckpointHandler(CheckpointHandler):
def find_latest_checkpoint(self):
"""
Finds the latest checkpoint in a folder based on the timestamp of the file.
If there are no checkpoints, returns None.
:return: The latest checkpoint path, or None if no checkpoints are found.
"""
paths = list(Path(self.dirpath).rglob('*'))
if paths:
paths.sort(key=os.path.getctime)
latest_checkpoint_path = paths[-1]
return latest_checkpoint_path
else:
return None
|
{
"content_hash": "f4a475ac4eb1e8ac00a2a4d877f098fb",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 83,
"avg_line_length": 33.17391304347826,
"alnum_prop": 0.6310615989515073,
"repo_name": "SeanNaren/deepspeech.pytorch",
"id": "279c588595e9c5d5841822fb26ea38ec9acf8776",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepspeech_pytorch/checkpoint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "713"
},
{
"name": "Python",
"bytes": "131433"
}
],
"symlink_target": ""
}
|
from webViews.view import normalView
from webViews.dockletrequest import dockletRequest
from flask import redirect, request, abort, render_template
class registerView(normalView):
template_path = 'register.html'
@classmethod
def post(self):
form = dict(request.form)
if (request.form.get('username') == None or request.form.get('password') == None or request.form.get('password') != request.form.get('password2') or request.form.get('email') == None or request.form.get('description') == None):
abort(500)
result = dockletRequest.unauthorizedpost('/register/', form)
return redirect("/login/")
@classmethod
def get(self):
return render_template(self.template_path)
|
{
"content_hash": "a57162f89fb1e324fa4725402c4cee4a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 235,
"avg_line_length": 41.111111111111114,
"alnum_prop": 0.6891891891891891,
"repo_name": "FirmlyReality/docklet",
"id": "bae545723e66dfe266c8ff0af52596ecfb49053a",
"size": "740",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "web/webViews/authenticate/register.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1109"
},
{
"name": "HTML",
"bytes": "289696"
},
{
"name": "JavaScript",
"bytes": "13946"
},
{
"name": "Jupyter Notebook",
"bytes": "17841"
},
{
"name": "Python",
"bytes": "666527"
},
{
"name": "Shell",
"bytes": "41851"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.