text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import copy
import math
import time
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from oslo_utils import strutils
LOG = logging.getLogger(__name__)
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(client_base.Client):
def __init__(self, volume_list=None, **kwargs):
super(Client, self).__init__(**kwargs)
vfiler = kwargs.get('vfiler', None)
self.connection.set_vfiler(vfiler)
(major, minor) = self.get_ontapi_version(cached=False)
self.connection.set_api_version(major, minor)
self.volume_list = volume_list
self._init_features()
def _init_features(self):
super(Client, self)._init_features()
ontapi_version = self.get_ontapi_version() # major, minor
ontapi_1_20 = ontapi_version >= (1, 20)
self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_20)
def send_ems_log_message(self, message_dict):
"""Sends a message to the Data ONTAP EMS log."""
# NOTE(cknight): Cannot use deepcopy on the connection context
node_client = copy.copy(self)
node_client.connection = copy.copy(self.connection)
node_client.connection.set_timeout(25)
try:
node_client.connection.set_vfiler(None)
node_client.send_request('ems-autosupport-log', message_dict)
LOG.debug('EMS executed successfully.')
except netapp_api.NaApiError as e:
LOG.warning('Failed to invoke EMS. %s', e)
def get_iscsi_target_details(self):
"""Gets the iSCSI target portal details."""
iscsi_if_iter = netapp_api.NaElement('iscsi-portal-list-info')
result = self.connection.invoke_successfully(iscsi_if_iter, True)
tgt_list = []
portal_list_entries = result.get_child_by_name(
'iscsi-portal-list-entries')
if portal_list_entries:
portal_list = portal_list_entries.get_children()
for iscsi_if in portal_list:
d = dict()
d['address'] = iscsi_if.get_child_content('ip-address')
d['port'] = iscsi_if.get_child_content('ip-port')
d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag')
tgt_list.append(d)
return tgt_list
def check_iscsi_initiator_exists(self, iqn):
"""Returns True if initiator exists."""
initiator_exists = True
try:
auth_list = netapp_api.NaElement('iscsi-initiator-auth-list-info')
auth_list.add_new_child('initiator', iqn)
self.connection.invoke_successfully(auth_list, True)
except netapp_api.NaApiError:
initiator_exists = False
return initiator_exists
def get_fc_target_wwpns(self):
"""Gets the FC target details."""
wwpns = []
port_name_list_api = netapp_api.NaElement('fcp-port-name-list-info')
result = self.connection.invoke_successfully(port_name_list_api)
port_names = result.get_child_by_name('fcp-port-names')
if port_names:
for port_name_info in port_names.get_children():
wwpn = port_name_info.get_child_content('port-name').lower()
wwpns.append(wwpn)
return wwpns
def get_iscsi_service_details(self):
"""Returns iscsi iqn."""
iscsi_service_iter = netapp_api.NaElement('iscsi-node-get-name')
result = self.connection.invoke_successfully(iscsi_service_iter, True)
return result.get_child_content('node-name')
def set_iscsi_chap_authentication(self, iqn, username, password):
"""Provides NetApp host's CHAP credentials to the backend."""
command = ("iscsi security add -i %(iqn)s -s CHAP "
"-p %(password)s -n %(username)s") % {
'iqn': iqn,
'password': password,
'username': username,
}
LOG.debug('Updating CHAP authentication for %(iqn)s.', {'iqn': iqn})
try:
ssh_pool = self.ssh_client.ssh_pool
with ssh_pool.item() as ssh:
self.ssh_client.execute_command(ssh, command)
except Exception as e:
msg = _('Failed to set CHAP authentication for target IQN '
'%(iqn)s. Details: %(ex)s') % {
'iqn': iqn,
'ex': e,
}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def get_lun_list(self):
"""Gets the list of LUNs on filer."""
lun_list = []
if self.volume_list:
for vol in self.volume_list:
try:
luns = self._get_vol_luns(vol)
if luns:
lun_list.extend(luns)
except netapp_api.NaApiError:
LOG.warning("Error finding LUNs for volume %s."
" Verify volume exists.", vol)
else:
luns = self._get_vol_luns(None)
lun_list.extend(luns)
return lun_list
def _get_vol_luns(self, vol_name):
"""Gets the LUNs for a volume."""
api = netapp_api.NaElement('lun-list-info')
if vol_name:
api.add_new_child('volume-name', vol_name)
result = self.connection.invoke_successfully(api, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def get_igroup_by_initiators(self, initiator_list):
"""Get igroups exactly matching a set of initiators."""
igroup_list = []
if not initiator_list:
return igroup_list
initiator_set = set(initiator_list)
igroup_list_info = netapp_api.NaElement('igroup-list-info')
result = self.connection.invoke_successfully(igroup_list_info, True)
initiator_groups = result.get_child_by_name(
'initiator-groups') or netapp_api.NaElement('none')
for initiator_group_info in initiator_groups.get_children():
initiator_set_for_igroup = set()
initiators = initiator_group_info.get_child_by_name(
'initiators') or netapp_api.NaElement('none')
for initiator_info in initiators.get_children():
initiator_set_for_igroup.add(
initiator_info.get_child_content('initiator-name'))
if initiator_set == initiator_set_for_igroup:
igroup = {'initiator-group-os-type':
initiator_group_info.get_child_content(
'initiator-group-os-type'),
'initiator-group-type':
initiator_group_info.get_child_content(
'initiator-group-type'),
'initiator-group-name':
initiator_group_info.get_child_content(
'initiator-group-name')}
igroup_list.append(igroup)
return igroup_list
def clone_lun(self, path, clone_path, name, new_name,
space_reserved='true', src_block=0,
dest_block=0, block_count=0, source_snapshot=None):
# zAPI can only handle 2^24 blocks per range
bc_limit = 2 ** 24 # 8GB
# zAPI can only handle 32 block ranges per call
br_limit = 32
z_limit = br_limit * bc_limit # 256 GB
z_calls = int(math.ceil(block_count / float(z_limit)))
zbc = block_count
if z_calls == 0:
z_calls = 1
for _call in range(0, z_calls):
if zbc > z_limit:
block_count = z_limit
zbc -= z_limit
else:
block_count = zbc
zapi_args = {
'source-path': path,
'destination-path': clone_path,
'no-snap': 'true',
}
if source_snapshot:
zapi_args['snapshot-name'] = source_snapshot
clone_start = netapp_api.NaElement.create_node_with_children(
'clone-start', **zapi_args)
if block_count > 0:
block_ranges = netapp_api.NaElement("block-ranges")
# zAPI can only handle 2^24 block ranges
bc_limit = 2 ** 24 # 8GB
segments = int(math.ceil(block_count / float(bc_limit)))
bc = block_count
for _segment in range(0, segments):
if bc > bc_limit:
block_count = bc_limit
bc -= bc_limit
else:
block_count = bc
block_range =\
netapp_api.NaElement.create_node_with_children(
'block-range',
**{'source-block-number':
six.text_type(src_block),
'destination-block-number':
six.text_type(dest_block),
'block-count':
six.text_type(block_count)})
block_ranges.add_child_elem(block_range)
src_block += int(block_count)
dest_block += int(block_count)
clone_start.add_child_elem(block_ranges)
result = self.connection.invoke_successfully(clone_start, True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
vol_uuid = cl_id_info.get_child_content('volume-uuid')
clone_id = cl_id_info.get_child_content('clone-op-id')
if vol_uuid:
self._check_clone_status(clone_id, vol_uuid, name, new_name)
def _check_clone_status(self, clone_id, vol_uuid, name, new_name):
"""Checks for the job till completed."""
clone_status = netapp_api.NaElement('clone-list-status')
cl_id = netapp_api.NaElement('clone-id')
clone_status.add_child_elem(cl_id)
cl_id.add_node_with_children('clone-id-info',
**{'clone-op-id': clone_id,
'volume-uuid': vol_uuid})
running = True
clone_ops_info = None
while running:
result = self.connection.invoke_successfully(clone_status, True)
status = result.get_child_by_name('status')
ops_info = status.get_children()
if ops_info:
for info in ops_info:
if info.get_child_content('clone-state') == 'running':
time.sleep(1)
break
else:
running = False
clone_ops_info = info
break
else:
if clone_ops_info:
fmt = {'name': name, 'new_name': new_name}
if clone_ops_info.get_child_content('clone-state')\
== 'completed':
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s completed", fmt)
else:
LOG.debug("Clone operation with src %(name)s"
" and dest %(new_name)s failed", fmt)
raise netapp_api.NaApiError(
clone_ops_info.get_child_content('error'),
clone_ops_info.get_child_content('reason'))
def get_lun_by_args(self, **args):
"""Retrieves LUNs with specified args."""
lun_info = netapp_api.NaElement.create_node_with_children(
'lun-list-info', **args)
result = self.connection.invoke_successfully(lun_info, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def get_filer_volumes(self, volume=None):
"""Returns list of filer volumes in API format."""
vol_request = netapp_api.NaElement('volume-list-info')
res = self.connection.invoke_successfully(vol_request, True)
volumes = res.get_child_by_name('volumes')
if volumes:
return volumes.get_children()
return []
def get_lun_map(self, path):
lun_map_list = netapp_api.NaElement.create_node_with_children(
'lun-map-list-info',
**{'path': path})
return self.connection.invoke_successfully(lun_map_list, True)
def set_space_reserve(self, path, enable):
"""Sets the space reserve info."""
space_res = netapp_api.NaElement.create_node_with_children(
'lun-set-space-reservation-info',
**{'path': path, 'enable': enable})
self.connection.invoke_successfully(space_res, True)
def get_actual_path_for_export(self, export_path):
"""Gets the actual path on the filer for export path."""
storage_path = netapp_api.NaElement.create_node_with_children(
'nfs-exportfs-storage-path', **{'pathname': export_path})
result = self.connection.invoke_successfully(storage_path,
enable_tunneling=True)
if result.get_child_content('actual-pathname'):
return result.get_child_content('actual-pathname')
raise exception.NotFound(_('No storage path found for export path %s')
% (export_path))
def clone_file(self, src_path, dest_path, source_snapshot=None):
LOG.debug("Cloning with src %(src_path)s, dest %(dest_path)s",
{'src_path': src_path, 'dest_path': dest_path})
zapi_args = {
'source-path': src_path,
'destination-path': dest_path,
'no-snap': 'true',
}
if source_snapshot:
zapi_args['snapshot-name'] = source_snapshot
clone_start = netapp_api.NaElement.create_node_with_children(
'clone-start', **zapi_args)
result = self.connection.invoke_successfully(clone_start,
enable_tunneling=True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
vol_uuid = cl_id_info.get_child_content('volume-uuid')
clone_id = cl_id_info.get_child_content('clone-op-id')
if vol_uuid:
try:
self._wait_for_clone_finish(clone_id, vol_uuid)
except netapp_api.NaApiError as e:
if e.code != 'UnknownCloneId':
self._clear_clone(clone_id)
raise
def _wait_for_clone_finish(self, clone_op_id, vol_uuid):
"""Waits till a clone operation is complete or errored out."""
clone_ls_st = netapp_api.NaElement('clone-list-status')
clone_id = netapp_api.NaElement('clone-id')
clone_ls_st.add_child_elem(clone_id)
clone_id.add_node_with_children('clone-id-info',
**{'clone-op-id': clone_op_id,
'volume-uuid': vol_uuid})
task_running = True
while task_running:
result = self.connection.invoke_successfully(clone_ls_st,
enable_tunneling=True)
status = result.get_child_by_name('status')
ops_info = status.get_children()
if ops_info:
state = ops_info[0].get_child_content('clone-state')
if state == 'completed':
task_running = False
elif state == 'failed':
code = ops_info[0].get_child_content('error')
reason = ops_info[0].get_child_content('reason')
raise netapp_api.NaApiError(code, reason)
else:
time.sleep(1)
else:
raise netapp_api.NaApiError(
'UnknownCloneId',
'No clone operation for clone id %s found on the filer'
% (clone_id))
def _clear_clone(self, clone_id):
"""Clear the clone information.
Invoke this in case of failed clone.
"""
clone_clear = netapp_api.NaElement.create_node_with_children(
'clone-clear',
**{'clone-id': clone_id})
retry = 3
while retry:
try:
self.connection.invoke_successfully(clone_clear,
enable_tunneling=True)
break
except netapp_api.NaApiError:
# Filer might be rebooting
time.sleep(5)
retry = retry - 1
def get_file_usage(self, path):
"""Gets the file unique bytes."""
LOG.debug('Getting file usage for %s', path)
file_use = netapp_api.NaElement.create_node_with_children(
'file-usage-get', **{'path': path})
res = self.connection.invoke_successfully(file_use)
bytes = res.get_child_content('unique-bytes')
LOG.debug('file-usage for path %(path)s is %(bytes)s',
{'path': path, 'bytes': bytes})
return bytes
def get_ifconfig(self):
ifconfig = netapp_api.NaElement('net-ifconfig-get')
return self.connection.invoke_successfully(ifconfig)
def get_flexvol_capacity(self, flexvol_path):
"""Gets total capacity and free capacity, in bytes, of the flexvol."""
api_args = {'volume': flexvol_path, 'verbose': 'false'}
result = self.send_request('volume-list-info', api_args)
flexvol_info_list = result.get_child_by_name('volumes')
flexvol_info = flexvol_info_list.get_children()[0]
size_total = float(flexvol_info.get_child_content('size-total'))
size_available = float(
flexvol_info.get_child_content('size-available'))
return {
'size-total': size_total,
'size-available': size_available,
}
def get_performance_instance_names(self, object_name):
"""Get names of performance instances for a node."""
api_args = {'objectname': object_name}
result = self.send_request('perf-object-instance-list-info',
api_args,
enable_tunneling=False)
instance_names = []
instances = result.get_child_by_name(
'instances') or netapp_api.NaElement('None')
for instance_info in instances.get_children():
instance_names.append(instance_info.get_child_content('name'))
return instance_names
def get_performance_counters(self, object_name, instance_names,
counter_names):
"""Gets or or more 7-mode Data ONTAP performance counters."""
api_args = {
'objectname': object_name,
'instances': [
{'instance': instance} for instance in instance_names
],
'counters': [
{'counter': counter} for counter in counter_names
],
}
result = self.send_request('perf-object-get-instances',
api_args,
enable_tunneling=False)
counter_data = []
timestamp = result.get_child_content('timestamp')
instances = result.get_child_by_name(
'instances') or netapp_api.NaElement('None')
for instance in instances.get_children():
instance_name = instance.get_child_content('name')
counters = instance.get_child_by_name(
'counters') or netapp_api.NaElement('None')
for counter in counters.get_children():
counter_name = counter.get_child_content('name')
counter_value = counter.get_child_content('value')
counter_data.append({
'instance-name': instance_name,
'timestamp': timestamp,
counter_name: counter_value,
})
return counter_data
def get_system_name(self):
"""Get the name of the 7-mode Data ONTAP controller."""
result = self.send_request('system-get-info',
{},
enable_tunneling=False)
system_info = result.get_child_by_name('system-info')
system_name = system_info.get_child_content('system-name')
return system_name
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
snapshot_list_info = netapp_api.NaElement('snapshot-list-info')
snapshot_list_info.add_new_child('volume', volume_name)
result = self.connection.invoke_successfully(snapshot_list_info,
enable_tunneling=True)
snapshots = result.get_child_by_name('snapshots')
if not snapshots:
msg = _('No snapshots could be found on volume %s.')
raise exception.VolumeBackendAPIException(data=msg % volume_name)
snapshot_list = snapshots.get_children()
snapshot = None
for s in snapshot_list:
if (snapshot_name == s.get_child_content('name')) and (snapshot
is None):
snapshot = {
'name': s.get_child_content('name'),
'volume': s.get_child_content('volume'),
'busy': strutils.bool_from_string(
s.get_child_content('busy')),
}
snapshot_owners_list = s.get_child_by_name(
'snapshot-owners-list') or netapp_api.NaElement('none')
snapshot_owners = set([snapshot_owner.get_child_content(
'owner') for snapshot_owner in
snapshot_owners_list.get_children()])
snapshot['owners'] = snapshot_owners
elif (snapshot_name == s.get_child_content('name')) and (
snapshot is not None):
msg = _('Could not find unique snapshot %(snap)s on '
'volume %(vol)s.')
msg_args = {'snap': snapshot_name, 'vol': volume_name}
raise exception.VolumeBackendAPIException(data=msg % msg_args)
if not snapshot:
raise exception.SnapshotNotFound(snapshot_id=snapshot_name)
return snapshot
def get_snapshots_marked_for_deletion(self, volume_list=None):
"""Get a list of snapshots marked for deletion."""
snapshots = []
for volume_name in volume_list:
api_args = {
'target-name': volume_name,
'target-type': 'volume',
'terse': 'true',
}
result = self.send_request('snapshot-list-info', api_args)
snapshots.extend(
self._parse_snapshot_list_info_result(result, volume_name))
return snapshots
def _parse_snapshot_list_info_result(self, result, volume_name):
snapshots = []
snapshots_elem = result.get_child_by_name(
'snapshots') or netapp_api.NaElement('none')
snapshot_info_list = snapshots_elem.get_children()
for snapshot_info in snapshot_info_list:
snapshot_name = snapshot_info.get_child_content('name')
snapshot_busy = strutils.bool_from_string(
snapshot_info.get_child_content('busy'))
snapshot_id = snapshot_info.get_child_content(
'snapshot-instance-uuid')
if (not snapshot_busy and
snapshot_name.startswith(client_base.DELETED_PREFIX)):
snapshots.append({
'name': snapshot_name,
'instance_id': snapshot_id,
'volume_name': volume_name,
})
return snapshots
|
{
"content_hash": "b1fe0c713dd832e53d466a5f73a1d482",
"timestamp": "",
"source": "github",
"line_count": 588,
"max_line_length": 79,
"avg_line_length": 41.40816326530612,
"alnum_prop": 0.53942828979793,
"repo_name": "eharney/cinder",
"id": "9671fa9d007ff8e9437c5d77490c43b295a88262",
"size": "25092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/netapp/dataontap/client/client_7mode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "561"
},
{
"name": "Python",
"bytes": "19839107"
},
{
"name": "Shell",
"bytes": "6453"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import functools
def toDict(replay):
def __getattr(object, name, default):
return getattr(object, name, default)
_getattr = functools.partial(__getattr, default=None)
data = {
'gateway': _getattr(replay, 'gateway'),
'map': _getattr(replay, 'map'),
'file_time': _getattr(replay, 'file_time'),
'unix_timestamp': _getattr(replay, 'unix_timestamp'),
'date': _getattr(replay, 'date'),
'utc_date': _getattr(replay, 'utc_date'),
'speed': _getattr(replay, 'speed'),
'category': _getattr(replay, 'category'),
'type': _getattr(replay, 'type'),
'is_ladder': _getattr(replay, 'is_ladder', default=False),
'is_private': _getattr(replay, 'is_private', default=False),
'filename': _getattr(replay, 'filename'),
'file_time': _getattr(replay, 'file_time'),
'frames': _getattr(replay, 'frames'),
'build': _getattr(replay, 'build'),
'release': _getattr(replay, 'release_string'),
'length': _getattr(replay, 'length').seconds,
}
players = []
for player in replay.players:
p = {
'avg_apm': _getattr(player, 'avg_apm'),
'color': _getattr(player, 'color'),
'name': _getattr(player, 'name'),
'pick_race': _getattr(player, 'pick_race'),
'pid': _getattr(player, 'pid'),
'play_race': _getattr(player, 'play_race'),
'result': _getattr(player, 'result'),
'type': _getattr(player, 'type'),
'uid': _getattr(player, 'uid'),
'url': _getattr(player, 'url'),
'messages': [],
}
for message in player.messages:
p['messages'].append({
'time': message.time.seconds,
'text': message.text,
'is_public': message.to_all
})
players.append(p)
data['players'] = players
observers = []
for observer in replay.observers:
observers.append({
'name': _getattr(observer, 'name'),
'messages': _getattr(observer, 'messages', default=[]),
'pid': _getattr(observer, 'pid'),
})
data['observers'] = observers
return data
|
{
"content_hash": "4d54d5ece1d83d69064b495458dabf3c",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 68,
"avg_line_length": 38.644067796610166,
"alnum_prop": 0.5342105263157895,
"repo_name": "dsjoerg/sc2reader",
"id": "63cd137403583faaaf5e8ac11813c8bbc7a22d67",
"size": "2280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sc2reader/processors/todict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186471"
},
{
"name": "Shell",
"bytes": "4521"
}
],
"symlink_target": ""
}
|
"""Utility functions, constants, and classes pertaining to WAVE audio files."""
import struct
from vesper.util.bunch import Bunch
class WaveFileFormatError(Exception):
pass
RIFF_CHUNK_ID = 'RIFF'
FMT_CHUNK_ID = 'fmt '
FACT_CHUNK_ID = 'fact'
DATA_CHUNK_ID = 'data'
_WAVE_FORM_TYPE = 'WAVE'
_FMT_CHUNK_SIZES = (16, 18, 40)
def parse_riff_chunk_header(f):
id = read_id(f, 0)
if (id != RIFF_CHUNK_ID):
raise WaveFileFormatError(
'Purported WAVE audio file does not start with "RIFF".')
size = read_u4(f, 4)
form_type = read_id(f, 8)
if (form_type != _WAVE_FORM_TYPE):
raise WaveFileFormatError(
f'Purported WAVE audio file does not have expected RIFF '
f'form type "{_WAVE_FORM_TYPE}" in bytes 8-11.')
return Bunch(id=RIFF_CHUNK_ID, offset=0, size=size)
def parse_subchunk(f, offset):
id = read_id(f, offset)
size = read_u4(f, offset + 4)
chunk = Bunch(id=id, offset=offset, size=size)
parser = _subchunk_parsers.get(id)
if parser is not None:
parser(f, offset, chunk)
return chunk
def read_id(f, offset):
f.seek(offset)
data = f.read(4)
return data.decode('UTF-8')
def read_u2(f, offset):
f.seek(offset)
data = f.read(2)
return struct.unpack('<H', data)[0]
def read_u4(f, offset):
f.seek(offset)
data = f.read(4)
return struct.unpack('<I', data)[0]
def parse_fact_chunk(f, offset, chunk):
frame_count = read_u4(f, offset + 8)
chunk.frame_count = frame_count
def parse_fmt_chunk(f, offset, chunk):
if chunk.size not in _FMT_CHUNK_SIZES:
raise WaveFileFormatError(
f'WAVE audio file fmt chunk size is {chunk.size} bytes '
f'rather than one of the expected {str(_FMT_CHUNK_SIZES)}. '
f'Will only parse chunk header.')
else:
# chunk is of one of the expected sizes
chunk.format_code = read_u2(f, offset + 8)
chunk.channel_count = read_u2(f, offset + 10)
chunk.sample_rate = read_u4(f, offset + 12)
chunk.data_rate = read_u4(f, offset + 16)
chunk.block_size = read_u2(f, offset + 20)
chunk.sample_size = read_u2(f, offset + 22)
if chunk.size > 16:
chunk.extension_size = read_u2(f, offset + 24)
_subchunk_parsers = {
FACT_CHUNK_ID: parse_fact_chunk,
FMT_CHUNK_ID: parse_fmt_chunk,
}
def show_subchunk_info(chunk):
formatter = _subchunk_formatters.get(chunk.id)
if formatter is None:
show_basic_chunk_info(chunk)
else:
formatter(chunk)
def show_basic_chunk_info(chunk):
print(f' {chunk.id}')
print(f' chunk offset (bytes): {chunk.offset}')
print(f' chunk size (bytes): {chunk.size}')
def show_fact_chunk(chunk):
show_basic_chunk_info(chunk)
print(f' frame count: {chunk.frame_count}')
def show_fmt_chunk(chunk):
show_basic_chunk_info(chunk)
if chunk.size in _FMT_CHUNK_SIZES:
format = get_audio_data_format(chunk.format_code)
print(f' format: {format}')
print(f' channel count: {chunk.channel_count}')
print(
f' sample rate (frames per second): '
f'{chunk.sample_rate}')
print(f' data rate (bytes per second): {chunk.data_rate}')
print(f' block size (bytes): {chunk.block_size}')
print(f' sample size (bits): {chunk.sample_size}')
if chunk.size > 16:
print(
f' extension_size (bytes): {chunk.extension_size}')
_subchunk_formatters = {
FACT_CHUNK_ID: show_fact_chunk,
FMT_CHUNK_ID: show_fmt_chunk,
}
def get_audio_data_format(format_code):
return audio_data_formats.get(
format_code, 'Unrecognized (code {format_code})')
audio_data_formats = {
0x0001: 'PCM',
0x0003: 'IEEE Float',
0x0006: 'A-law',
0x0007: '\u03bc-law',
0xFFFE: 'extensible',
}
|
{
"content_hash": "b715eed970a9a1d5841e53d8355aa462",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 79,
"avg_line_length": 24.536585365853657,
"alnum_prop": 0.5897117296222664,
"repo_name": "HaroldMills/Vesper",
"id": "34dc4316660cf5cd529e1595c6e3c12f61a98e0a",
"size": "4024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vesper/util/wave_file_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "92"
},
{
"name": "CSS",
"bytes": "9101"
},
{
"name": "Dockerfile",
"bytes": "1678"
},
{
"name": "HTML",
"bytes": "70614"
},
{
"name": "JavaScript",
"bytes": "410277"
},
{
"name": "Python",
"bytes": "2697554"
},
{
"name": "Shell",
"bytes": "2772"
},
{
"name": "TypeScript",
"bytes": "30001"
}
],
"symlink_target": ""
}
|
from ntulifeguardapp.settings import *
|
{
"content_hash": "096f9d9c87d647eb4ac6ff757f6c2ce5",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 38,
"avg_line_length": 39,
"alnum_prop": 0.8461538461538461,
"repo_name": "timchen86/ntulifeguardapp",
"id": "282c1b65287e22c256e881c87a6a2fce2913bf71",
"size": "39",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11990"
},
{
"name": "CSS",
"bytes": "57232"
},
{
"name": "JavaScript",
"bytes": "24064"
},
{
"name": "OpenEdge ABL",
"bytes": "125979"
},
{
"name": "Python",
"bytes": "6175905"
},
{
"name": "Shell",
"bytes": "1022"
}
],
"symlink_target": ""
}
|
import unittest
import munch
import basecrm
from basecrm.test.testutils import BaseTestCase
class UsersServiceTests(BaseTestCase):
def test_service_property_exists(self):
self.assertTrue(hasattr(self.client, 'users'))
def test_method_list_exists(self):
self.assertTrue(hasattr(self.client.users, 'list') and callable(getattr(self.client.users, 'list')))
def test_method_retrieve_exists(self):
self.assertTrue(hasattr(self.client.users, 'retrieve') and callable(getattr(self.client.users, 'retrieve')))
def test_method_self_exists(self):
self.assertTrue(hasattr(self.client.users, 'self') and callable(getattr(self.client.users, 'self')))
def test_list(self):
users = self.client.users.list(page=1)
self.assertIsInstance(users, list)
for user in users:
self.assertIsInstance(user, munch.Munch)
def test_retrieve(self):
found_user = self.client.users.retrieve(self.user.id);
self.assertIsInstance(found_user, munch.Munch);
self.assertEqual(found_user.id, self.user.id);
def test_self(self):
resource = self.client.users.self()
self.assertIsInstance(resource, munch.Munch)
|
{
"content_hash": "7430dce8f462151a7d0ab59826725635",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 116,
"avg_line_length": 33.916666666666664,
"alnum_prop": 0.6904176904176904,
"repo_name": "basecrm/basecrm-python",
"id": "d32d07e3732239c1c3eca029bfa4054751e0a5f6",
"size": "1221",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "basecrm/test/test_users_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "163217"
}
],
"symlink_target": ""
}
|
import numpy
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
class Contrastive(function.Function):
"""Contrastive loss function."""
def __init__(self, margin, reduce='mean'):
if margin <= 0:
raise ValueError("margin should be positive value.")
self.margin = margin
if reduce not in ('mean', 'no'):
raise ValueError(
"only 'mean' and 'no' are valid for 'reduce', but '%s' is "
'given' % reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x0_type, x1_type, y_type = in_types
type_check.expect(
x0_type.dtype == numpy.float32,
x1_type.dtype == numpy.float32,
y_type.dtype.kind == 'i',
x0_type.shape == x1_type.shape,
x1_type.shape[0] == y_type.shape[0],
x1_type.shape[0] > 0,
x0_type.ndim == 2,
x1_type.ndim == 2,
y_type.ndim == 1
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
x0, x1, y = inputs
self.diff = x0 - x1
self.dist_sq = xp.sum(self.diff ** 2, axis=1)
self.dist = xp.sqrt(self.dist_sq)
self.mdist = self.margin - self.dist
dist = xp.maximum(self.mdist, 0)
loss = (y * self.dist_sq + (1 - y) * dist * dist) * .5
if self.reduce == 'mean':
loss = xp.sum(loss) / x0.shape[0]
return xp.array(loss, dtype=xp.float32),
def backward(self, inputs, gy):
xp = cuda.get_array_module(*inputs)
x0, x1, y = inputs
x_dim = x0.shape[1]
y = xp.repeat(y[:, None], x_dim, axis=1)
if self.reduce == 'mean':
alpha = gy[0] / y.shape[0]
else:
alpha = gy[0][:, None]
dist = xp.repeat(self.dist[:, None], x_dim, axis=1)
# avoid division by zero
dist = xp.maximum(dist, 1e-8)
# similar pair
gx0 = alpha * y * self.diff
# dissimilar pair
mdist = xp.maximum(xp.repeat(self.mdist[:, None], x_dim, axis=1), 0)
gx0 += alpha * (1 - y) * mdist * -(self.diff / dist)
gx0 = gx0.astype(xp.float32)
return gx0, -gx0, None
def contrastive(x0, x1, y, margin=1, reduce='mean'):
"""Computes contrastive loss.
It takes a pair of samples and a label as inputs.
The label is :math:`1` when those samples are similar,
or :math:`0` when they are dissimilar.
Let :math:`N` and :math:`K` denote mini-batch size and the dimension
of input variables, respectively. The shape of both input variables
``x0`` and ``x1`` should be ``(N, K)``.
The loss value of the :math:`n`-th sample pair :math:`L_n` is
.. math::
L_n = \\frac{1}{2} \\left( y_n d_n^2
+ (1 - y_n) \\max ({\\rm margin} - d_n, 0)^2 \\right)
where :math:`d_n = \\| {\\bf x_0}_n - {\\bf x_1}_n \\|_2`,
:math:`{\\bf x_0}_n` and :math:`{\\bf x_1}_n` are :math:`n`-th
K-dimensional vectors of ``x0`` and ``x1``.
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the elementwise
loss values. If it is ``'mean'``, this function takes a mean of
loss values.
Args:
x0 (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): The first input variable. The shape should be
(N, K), where N denotes the mini-batch size, and K denotes the
dimension of ``x0``.
x1 (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): The second input variable. The shape should be
the same as ``x0``.
y (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Labels. All values should be 0 or 1. The shape
should be ``(N,)``, where N denotes the mini-batch size.
margin (float): A parameter for contrastive loss. It should be positive
value.
reduce (str): Reduction option. Its value must be either
``'mean'`` or ``'no'``. Otherwise, :class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable holding the loss value(s) calculated by the
above equation.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is same as one of (hence both of) input variables.
If it is ``'mean'``, the output variable holds a scalar value.
.. note::
This cost can be used to train siamese networks. See `Learning a
Similarity Metric Discriminatively, with Application to Face
Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_
for details.
.. admonition:: Example
>>> x0 = np.array([[-2.0, 3.0, 0.5], [5.0, 2.0, -0.5]]).\
astype(np.float32)
>>> x1 = np.array([[-1.0, 3.0, 1.0], [3.5, 0.5, -2.0]]).\
astype(np.float32)
>>> y = np.array([1, 0]).astype(np.int32)
>>> F.contrastive(x0, x1, y)
variable(0.3125)
>>> F.contrastive(x0, x1, y, margin=3.0) # harder penalty
variable(0.3528857)
>>> z = F.contrastive(x0, x1, y, reduce='no')
>>> z.shape
(2,)
>>> z.data
array([0.625, 0. ], dtype=float32)
"""
return Contrastive(margin, reduce)(x0, x1, y)
|
{
"content_hash": "6a50b1349727aff03a04ec6035950096",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 79,
"avg_line_length": 36.81879194630873,
"alnum_prop": 0.5534086766314255,
"repo_name": "aonotas/chainer",
"id": "511ba946860aaecfd851f0a8ada09a4f0e00bdb7",
"size": "5486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainer/functions/loss/contrastive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3357320"
}
],
"symlink_target": ""
}
|
from jousting.round.phase import Phase
from jousting.util.dice import D6, roll
class Charge(Phase):
def do_charge(self):
p1 = self._controller.get_p1()
p2 = self._controller.get_p2()
while not self.check_point_of_contact():
p1_move = self.limit_movement(self.roll_movement())
p1.move(p1_move)
p2_move = self.limit_movement(self.roll_movement())
p2.move(p2_move)
p1.determine_failed_to_start()
p2.determine_failed_to_start()
if p1.get_failed_to_start():
p1.add_fail_start()
elif p2.get_failed_to_start():
p2.add_fail_start()
def roll_movement(self):
# Roll two dice and subtract a third die roll from their value
roll_val = roll(D6, 2, -1 * roll(D6, 1))
roll_val = 1 if roll_val < 0 else roll_val
return roll_val
def limit_movement(self, movement):
p1 = self._controller.get_p1()
p2 = self._controller.get_p2()
# There are 24 spaces on the field, so check how far the players are from each other
spaces_to_contact = 24 - p1.get_current_position() - p2.get_current_position()
return movement if movement < spaces_to_contact else spaces_to_contact
def check_point_of_contact(self):
p1 = self._controller.get_p1()
p2 = self._controller.get_p2()
# There are 24 spaces on the field, so if the positions add to 24 the players are at the point of contact
return p1.get_current_position() + p2.get_current_position() == 24
|
{
"content_hash": "3904758cfdffc481a1b2d76d55a96ffd",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 113,
"avg_line_length": 38.292682926829265,
"alnum_prop": 0.6165605095541401,
"repo_name": "Serneum/jousting-core",
"id": "06dcdece2242632dd60f56f3ee9c0a5fcd6e9cff",
"size": "1570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/jousting/round/charge.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "33635"
}
],
"symlink_target": ""
}
|
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
class StepperMotor:
def __init__(self, pin1, pin2, pin3, pin4, mode=2, delay=0.005, stepsbyspin=512):
self.p1 = pin1
self.p2 = pin2
self.p3 = pin3
self.p4 = pin4
self.modo = mode
self.delay = delay
self.lap = 512
GPIO.setup(pin1, GPIO.OUT)
GPIO.setup(pin2, GPIO.OUT)
GPIO.setup(pin3, GPIO.OUT)
GPIO.setup(pin4, GPIO.OUT)
def setMode(self, mode=2):
self.modo = mode
def setDelay(self, delay):
self.delay = delay
def stepForward(self):
if self.modo==1:
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
elif self.modo==2:
GPIO.output(self.p1, True)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
elif self.modo==3:
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
def stepBackward(self):
if self.modo==1:
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
elif self.modo==2:
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
elif self.modo==3:
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
def goForward(self, steps=1):
for i in range(steps):
self.stepForward()
self.off()
def goBackwards(self, steps=1):
for i in range(steps):
self.stepBackward()
self.off()
def clockwise(self, degrees=360):
steps = self.lap*degrees/360
self.goForward(int(steps))
def anticlockwise(self, degrees=360):
steps = self.lap*degrees/360
self.goBackwards(int(steps))
def off(self):
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
|
{
"content_hash": "a32133ebeccf6f3901093e4de262da30",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 82,
"avg_line_length": 28.425837320574164,
"alnum_prop": 0.6780003366436627,
"repo_name": "intelidomo/rpi_snippets",
"id": "68e8ad3f93ddebde4588151b7c0d7590c90d9857",
"size": "5959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stepper_motor/stepper_motor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7818"
}
],
"symlink_target": ""
}
|
from ._tail_recursive import tail_recursive
|
{
"content_hash": "1633e1e58002f188337ef04955c5334e",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 43,
"avg_line_length": 44,
"alnum_prop": 0.8181818181818182,
"repo_name": "xj9/funk",
"id": "1a7ef825b6795217ded4746f7adb10d8604f0372",
"size": "69",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "decorators/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1134"
}
],
"symlink_target": ""
}
|
"""OpenType Layout-related functionality."""
|
{
"content_hash": "477940433b8bb95f268306b837c9df42",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 44,
"avg_line_length": 45,
"alnum_prop": 0.7555555555555555,
"repo_name": "Pal3love/otRebuilder",
"id": "12e414fc3bf00e6152f953b989914f034edfe9e1",
"size": "45",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Package/otRebuilder/Dep/fontTools/otlLib/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2756220"
}
],
"symlink_target": ""
}
|
import json
import time
import subprocess
import discord
import discord.ext.commands as commands
import psutil
def setup(bot):
bot.add_cog(Core(bot))
def duration_to_str(duration):
"""Converts a timestamp to a string representation."""
minutes, seconds = divmod(duration, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
duration = []
if days > 0: duration.append(f'{days} days')
if hours > 0: duration.append(f'{hours} hours')
if minutes > 0: duration.append(f'{minutes} minutes')
if seconds > 0 or len(duration) == 0: duration.append(f'{seconds} seconds')
return ', '.join(duration)
class Core(commands.Cog):
"""♡🐼"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
self.bot.start_time = time.time()
@commands.command(aliases=['infos'])
async def info(self, ctx):
"""Shows info about the bot."""
latest_commits = subprocess.check_output(['git', 'log', '--pretty=format:[`%h`](https://github.com/PapyrusThePlant/Panda/commit/%h) %s', '-n', '5']).decode('utf-8')
embed = discord.Embed(description='[Click here to get your own Panda!](https://github.com/PapyrusThePlant/Panda)', colour=discord.Colour.blurple())
embed.set_thumbnail(url='https://raw.githubusercontent.com/PapyrusThePlant/Panda/master/images/panda.jpg')
embed.set_author(name='Author : Papyrus#0095', icon_url='https://cdn.discordapp.com/avatars/145110704293281792/2775b3ee7b6a865722b3f6a27da8b14a.webp?size=1024')
embed.add_field(name='Command prefixes', value=f'`@{ctx.guild.me.display_name} `, `{self.bot.conf["prefix"]}`', inline=False)
embed.add_field(name='CPU', value=f'{psutil.cpu_percent()}%')
embed.add_field(name='Memory', value=f'{psutil.Process().memory_full_info().uss / 1048576:.2f} Mb') # Expressed in bytes, turn to Mb and round to 2 decimals
embed.add_field(name='Uptime', value=duration_to_str(int(time.time() - self.bot.start_time)))
embed.add_field(name='Latest changes', value=latest_commits, inline=False)
embed.add_field(name='\N{ZERO WIDTH SPACE}', value='For any question about the bot, announcements and an easy way to get in touch with the author, feel free to join the dedicated [discord server](https://discord.gg/AvAsTHW).')
embed.set_footer(text='Powered by discord.py', icon_url='http://i.imgur.com/5BFecvA.png')
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(manage_guild=True)
async def load(self, ctx, name):
"""Loads an extension.
This command requires the Manage Server permission.
"""
cog = name.lower()
try:
ctx.bot.load_extension(f'cogs.{cog}')
except commands.ExtensionAlreadyLoaded:
await ctx.send(f'Extension {name} already loaded.')
except commands.ExtensionNotFound:
await ctx.send(f'Extension {name} not found.')
else:
self.bot.conf['extensions'].append(cog)
with open(self.bot.conf_file, 'w') as fp:
json.dump(self.bot.conf, fp)
await ctx.message.add_reaction('\N{WHITE HEAVY CHECK MARK}')
@commands.command()
@commands.has_permissions(manage_guild=True)
async def unload(self, ctx, name):
"""Unloads an extension.
This command requires the Manage Server permission.
"""
cog = name.lower()
try:
ctx.bot.unload_extension(f'cogs.{cog}')
except commands.ExtensionNotLoaded:
await ctx.send(f'Extension {name} not loaded.')
else:
self.bot.conf['extensions'].remove(cog)
with open(self.bot.conf_file, 'w') as fp:
json.dump(self.bot.conf, fp)
await ctx.message.add_reaction('\N{WHITE HEAVY CHECK MARK}')
@commands.command()
@commands.has_permissions(manage_guild=True)
async def reload(self, ctx, *extensions):
"""Reloads extensions.
If none are provided, reloads all loaded extensions.
This command requires the Manage Server permission.
"""
if extensions is None:
extensions = self.bot.conf['extensions']
for name in extensions:
cog = name.lower()
try:
ctx.bot.unload_extension(f'cogs.{cog}')
ctx.bot.load_extension(f'cogs.{cog}')
except commands.ExtensionError as e:
await ctx.send(f'Error reloading extension {name} : {e}')
await ctx.message.add_reaction('\N{WHITE HEAVY CHECK MARK}')
|
{
"content_hash": "9ae1c90e5947905f688edd413c6e19d4",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 234,
"avg_line_length": 39.94871794871795,
"alnum_prop": 0.6328626444159179,
"repo_name": "PapyrusThePlant/MusicPanda",
"id": "2b01151bc0817d5c4ffee3d4faef78896aecd9ca",
"size": "4679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cogs/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2179"
},
{
"name": "Python",
"bytes": "14532"
},
{
"name": "Shell",
"bytes": "1115"
}
],
"symlink_target": ""
}
|
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("GradientBoostingRegressor" , "freidman2" , "duckdb")
|
{
"content_hash": "449da442728ec3c83fc177d7820286f7",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 35.5,
"alnum_prop": 0.7887323943661971,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "848ca36d4042235ee1857ce5e304e0dc84329906",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regression/freidman2/ws_freidman2_GradientBoostingRegressor_duckdb_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
}
|
"""Example scenarios to display in the workbench.
This code is in the Workbench layer.
"""
import logging
from collections import namedtuple
from xblock.core import XBlock
from django.conf import settings
from django.template.defaultfilters import slugify
from .runtime import WORKBENCH_KVS, WorkbenchRuntime
log = logging.getLogger(__name__)
# Build the scenarios, which are named trees of usages.
Scenario = namedtuple("Scenario", "description usage_id xml")
SCENARIOS = {}
def add_xml_scenario(scname, description, xml):
"""
Add a scenario defined in XML.
"""
assert scname not in SCENARIOS, "Already have a %r scenario" % scname
runtime = WorkbenchRuntime()
# WorkbenchRuntime has an id_generator, but most runtimes won't
# (because the generator will be contextual), so we
# pass it explicitly to parse_xml_string.
runtime.id_generator.set_scenario(slugify(description))
usage_id = runtime.parse_xml_string(xml, runtime.id_generator)
SCENARIOS[scname] = Scenario(description, usage_id, xml)
def remove_scenario(scname):
"""
Remove a named scenario from the global list.
"""
del SCENARIOS[scname]
def add_class_scenarios(class_name, cls, fail_silently=True):
"""
Add scenarios from a class to the global collection of scenarios.
"""
# Each XBlock class can provide scenarios to display in the workbench.
if hasattr(cls, "workbench_scenarios"):
for i, (desc, xml) in enumerate(cls.workbench_scenarios()):
scname = "%s.%d" % (class_name, i)
try:
add_xml_scenario(scname, desc, xml)
except Exception: # pylint:disable=broad-except
# don't allow a single bad scenario to block the whole workbench
if fail_silently:
log.warning("Cannot load %s", desc, exc_info=True)
else:
raise
def init_scenarios():
"""
Create all the scenarios declared in all the XBlock classes.
"""
# Clear any existing scenarios, since this is used repeatedly during testing.
SCENARIOS.clear()
if settings.WORKBENCH['reset_state_on_restart']:
WORKBENCH_KVS.clear()
else:
WORKBENCH_KVS.prep_for_scenario_loading()
# Get all the XBlock classes, and add their scenarios.
for class_name, cls in sorted(XBlock.load_classes(fail_silently=False)):
add_class_scenarios(class_name, cls, fail_silently=False)
def get_scenarios():
"""
Return SCENARIOS, initializing it if required.
"""
if not SCENARIOS and not get_scenarios.initialized:
init_scenarios()
get_scenarios.initialized = True
return SCENARIOS
get_scenarios.initialized = False
|
{
"content_hash": "073dff6134c6ab8fc676bee1d2b1650a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 81,
"avg_line_length": 29.559139784946236,
"alnum_prop": 0.6711531465987632,
"repo_name": "stvstnfrd/xblock-sdk",
"id": "1c03355ea71c324420147fcedcc9cd197b82d9e1",
"size": "2749",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "workbench/scenarios.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14419"
},
{
"name": "Dockerfile",
"bytes": "681"
},
{
"name": "HTML",
"bytes": "8020"
},
{
"name": "JavaScript",
"bytes": "237802"
},
{
"name": "Makefile",
"bytes": "2918"
},
{
"name": "Python",
"bytes": "146395"
}
],
"symlink_target": ""
}
|
"""Adds support for generic thermostat units."""
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
ATTR_PRESET_MODE,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, callback
from homeassistant.helpers import condition
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import (
async_track_state_change_event,
async_track_time_interval,
)
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.restore_state import RestoreEntity
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
DEFAULT_TOLERANCE = 0.3
DEFAULT_NAME = "Generic Thermostat"
CONF_HEATER = "heater"
CONF_SENSOR = "target_sensor"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
CONF_TARGET_TEMP = "target_temp"
CONF_AC_MODE = "ac_mode"
CONF_MIN_DUR = "min_cycle_duration"
CONF_COLD_TOLERANCE = "cold_tolerance"
CONF_HOT_TOLERANCE = "hot_tolerance"
CONF_KEEP_ALIVE = "keep_alive"
CONF_INITIAL_HVAC_MODE = "initial_hvac_mode"
CONF_AWAY_TEMP = "away_temp"
CONF_PRECISION = "precision"
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HEATER): cv.entity_id,
vol.Required(CONF_SENSOR): cv.entity_id,
vol.Optional(CONF_AC_MODE): cv.boolean,
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_MIN_DUR): cv.positive_time_period,
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_COLD_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),
vol.Optional(CONF_HOT_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),
vol.Optional(CONF_TARGET_TEMP): vol.Coerce(float),
vol.Optional(CONF_KEEP_ALIVE): cv.positive_time_period,
vol.Optional(CONF_INITIAL_HVAC_MODE): vol.In(
[HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF]
),
vol.Optional(CONF_AWAY_TEMP): vol.Coerce(float),
vol.Optional(CONF_PRECISION): vol.In(
[PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the generic thermostat platform."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
heater_entity_id = config.get(CONF_HEATER)
sensor_entity_id = config.get(CONF_SENSOR)
min_temp = config.get(CONF_MIN_TEMP)
max_temp = config.get(CONF_MAX_TEMP)
target_temp = config.get(CONF_TARGET_TEMP)
ac_mode = config.get(CONF_AC_MODE)
min_cycle_duration = config.get(CONF_MIN_DUR)
cold_tolerance = config.get(CONF_COLD_TOLERANCE)
hot_tolerance = config.get(CONF_HOT_TOLERANCE)
keep_alive = config.get(CONF_KEEP_ALIVE)
initial_hvac_mode = config.get(CONF_INITIAL_HVAC_MODE)
away_temp = config.get(CONF_AWAY_TEMP)
precision = config.get(CONF_PRECISION)
unit = hass.config.units.temperature_unit
async_add_entities(
[
GenericThermostat(
name,
heater_entity_id,
sensor_entity_id,
min_temp,
max_temp,
target_temp,
ac_mode,
min_cycle_duration,
cold_tolerance,
hot_tolerance,
keep_alive,
initial_hvac_mode,
away_temp,
precision,
unit,
)
]
)
class GenericThermostat(ClimateEntity, RestoreEntity):
"""Representation of a Generic Thermostat device."""
def __init__(
self,
name,
heater_entity_id,
sensor_entity_id,
min_temp,
max_temp,
target_temp,
ac_mode,
min_cycle_duration,
cold_tolerance,
hot_tolerance,
keep_alive,
initial_hvac_mode,
away_temp,
precision,
unit,
):
"""Initialize the thermostat."""
self._name = name
self.heater_entity_id = heater_entity_id
self.sensor_entity_id = sensor_entity_id
self.ac_mode = ac_mode
self.min_cycle_duration = min_cycle_duration
self._cold_tolerance = cold_tolerance
self._hot_tolerance = hot_tolerance
self._keep_alive = keep_alive
self._hvac_mode = initial_hvac_mode
self._saved_target_temp = target_temp or away_temp
self._temp_precision = precision
if self.ac_mode:
self._hvac_list = [HVAC_MODE_COOL, HVAC_MODE_OFF]
else:
self._hvac_list = [HVAC_MODE_HEAT, HVAC_MODE_OFF]
self._active = False
self._cur_temp = None
self._temp_lock = asyncio.Lock()
self._min_temp = min_temp
self._max_temp = max_temp
self._target_temp = target_temp
self._unit = unit
self._support_flags = SUPPORT_FLAGS
if away_temp:
self._support_flags = SUPPORT_FLAGS | SUPPORT_PRESET_MODE
self._away_temp = away_temp
self._is_away = False
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
# Add listener
self.async_on_remove(
async_track_state_change_event(
self.hass, [self.sensor_entity_id], self._async_sensor_changed
)
)
self.async_on_remove(
async_track_state_change_event(
self.hass, [self.heater_entity_id], self._async_switch_changed
)
)
if self._keep_alive:
self.async_on_remove(
async_track_time_interval(
self.hass, self._async_control_heating, self._keep_alive
)
)
@callback
def _async_startup(event):
"""Init on startup."""
sensor_state = self.hass.states.get(self.sensor_entity_id)
if sensor_state and sensor_state.state not in (
STATE_UNAVAILABLE,
STATE_UNKNOWN,
):
self._async_update_temp(sensor_state)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _async_startup)
# Check If we have an old state
old_state = await self.async_get_last_state()
if old_state is not None:
# If we have no initial temperature, restore
if self._target_temp is None:
# If we have a previously saved temperature
if old_state.attributes.get(ATTR_TEMPERATURE) is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning(
"Undefined target temperature, falling back to %s",
self._target_temp,
)
else:
self._target_temp = float(old_state.attributes[ATTR_TEMPERATURE])
if old_state.attributes.get(ATTR_PRESET_MODE) == PRESET_AWAY:
self._is_away = True
if not self._hvac_mode and old_state.state:
self._hvac_mode = old_state.state
else:
# No previous state, try and restore defaults
if self._target_temp is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning(
"No previously saved temperature, setting to %s", self._target_temp
)
# Set default state to off
if not self._hvac_mode:
self._hvac_mode = HVAC_MODE_OFF
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def precision(self):
"""Return the precision of the system."""
if self._temp_precision is not None:
return self._temp_precision
return super().precision
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit
@property
def current_temperature(self):
"""Return the sensor temperature."""
return self._cur_temp
@property
def hvac_mode(self):
"""Return current operation."""
return self._hvac_mode
@property
def hvac_action(self):
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
if self._hvac_mode == HVAC_MODE_OFF:
return CURRENT_HVAC_OFF
if not self._is_device_active:
return CURRENT_HVAC_IDLE
if self.ac_mode:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_HEAT
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
@property
def hvac_modes(self):
"""List of available operation modes."""
return self._hvac_list
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
return PRESET_AWAY if self._is_away else PRESET_NONE
@property
def preset_modes(self):
"""Return a list of available preset modes or PRESET_NONE if _away_temp is undefined."""
return [PRESET_NONE, PRESET_AWAY] if self._away_temp else PRESET_NONE
async def async_set_hvac_mode(self, hvac_mode):
"""Set hvac mode."""
if hvac_mode == HVAC_MODE_HEAT:
self._hvac_mode = HVAC_MODE_HEAT
await self._async_control_heating(force=True)
elif hvac_mode == HVAC_MODE_COOL:
self._hvac_mode = HVAC_MODE_COOL
await self._async_control_heating(force=True)
elif hvac_mode == HVAC_MODE_OFF:
self._hvac_mode = HVAC_MODE_OFF
if self._is_device_active:
await self._async_heater_turn_off()
else:
_LOGGER.error("Unrecognized hvac mode: %s", hvac_mode)
return
# Ensure we update the current operation after changing the mode
self.async_write_ha_state()
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._target_temp = temperature
await self._async_control_heating(force=True)
self.async_write_ha_state()
@property
def min_temp(self):
"""Return the minimum temperature."""
if self._min_temp is not None:
return self._min_temp
# get default temp from super class
return super().min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
if self._max_temp is not None:
return self._max_temp
# Get default temp from super class
return super().max_temp
async def _async_sensor_changed(self, event):
"""Handle temperature changes."""
new_state = event.data.get("new_state")
if new_state is None or new_state.state in (STATE_UNAVAILABLE, STATE_UNKNOWN):
return
self._async_update_temp(new_state)
await self._async_control_heating()
self.async_write_ha_state()
@callback
def _async_switch_changed(self, event):
"""Handle heater switch state changes."""
new_state = event.data.get("new_state")
if new_state is None:
return
self.async_write_ha_state()
@callback
def _async_update_temp(self, state):
"""Update thermostat with latest state from sensor."""
try:
self._cur_temp = float(state.state)
except ValueError as ex:
_LOGGER.error("Unable to update from sensor: %s", ex)
async def _async_control_heating(self, time=None, force=False):
"""Check if we need to turn heating on or off."""
async with self._temp_lock:
if not self._active and None not in (self._cur_temp, self._target_temp):
self._active = True
_LOGGER.info(
"Obtained current and target temperature. "
"Generic thermostat active. %s, %s",
self._cur_temp,
self._target_temp,
)
if not self._active or self._hvac_mode == HVAC_MODE_OFF:
return
if not force and time is None:
# If the `force` argument is True, we
# ignore `min_cycle_duration`.
# If the `time` argument is not none, we were invoked for
# keep-alive purposes, and `min_cycle_duration` is irrelevant.
if self.min_cycle_duration:
if self._is_device_active:
current_state = STATE_ON
else:
current_state = HVAC_MODE_OFF
long_enough = condition.state(
self.hass,
self.heater_entity_id,
current_state,
self.min_cycle_duration,
)
if not long_enough:
return
too_cold = self._target_temp >= self._cur_temp + self._cold_tolerance
too_hot = self._cur_temp >= self._target_temp + self._hot_tolerance
if self._is_device_active:
if (self.ac_mode and too_cold) or (not self.ac_mode and too_hot):
_LOGGER.info("Turning off heater %s", self.heater_entity_id)
await self._async_heater_turn_off()
elif time is not None:
# The time argument is passed only in keep-alive case
_LOGGER.info(
"Keep-alive - Turning on heater heater %s",
self.heater_entity_id,
)
await self._async_heater_turn_on()
else:
if (self.ac_mode and too_hot) or (not self.ac_mode and too_cold):
_LOGGER.info("Turning on heater %s", self.heater_entity_id)
await self._async_heater_turn_on()
elif time is not None:
# The time argument is passed only in keep-alive case
_LOGGER.info(
"Keep-alive - Turning off heater %s", self.heater_entity_id
)
await self._async_heater_turn_off()
@property
def _is_device_active(self):
"""If the toggleable device is currently active."""
return self.hass.states.is_state(self.heater_entity_id, STATE_ON)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
async def _async_heater_turn_on(self):
"""Turn heater toggleable device on."""
data = {ATTR_ENTITY_ID: self.heater_entity_id}
await self.hass.services.async_call(
HA_DOMAIN, SERVICE_TURN_ON, data, context=self._context
)
async def _async_heater_turn_off(self):
"""Turn heater toggleable device off."""
data = {ATTR_ENTITY_ID: self.heater_entity_id}
await self.hass.services.async_call(
HA_DOMAIN, SERVICE_TURN_OFF, data, context=self._context
)
async def async_set_preset_mode(self, preset_mode: str):
"""Set new preset mode."""
if preset_mode == PRESET_AWAY and not self._is_away:
self._is_away = True
self._saved_target_temp = self._target_temp
self._target_temp = self._away_temp
await self._async_control_heating(force=True)
elif preset_mode == PRESET_NONE and self._is_away:
self._is_away = False
self._target_temp = self._saved_target_temp
await self._async_control_heating(force=True)
self.async_write_ha_state()
|
{
"content_hash": "c6e069a6509276e6039ec717a185d5b1",
"timestamp": "",
"source": "github",
"line_count": 489,
"max_line_length": 96,
"avg_line_length": 34.877300613496935,
"alnum_prop": 0.5750806215186163,
"repo_name": "soldag/home-assistant",
"id": "4072c43bc27257bf1ce7a0c0dbb3ead6b9094879",
"size": "17055",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/generic_thermostat/climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19025087"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from . import assert_msg
from anipy import (
Browser,
Query
)
from anipy.ani.enum_ import (
Season,
MediaType,
SeriesType,
AnimeStatus,
MangaStatus,
SortBy
)
class TestBrowser(object):
def testQuery(self):
expected = {
'year': 2014,
'season': 'fall',
'type': 0,
'status': 'finished airing',
'sort': 'popularity-desc',
'airing_data': False,
'full_page': False,
'page': 1
}
query = Query(SeriesType.anime)
query\
.year(2014)\
.season(Season.fall)\
.type(MediaType.tv)\
.status(AnimeStatus.finishedAiring)\
.sort(SortBy.popularity.desc)\
.airingData(False).fullPage(False).page(1)
assert query.query == expected, \
assert_msg.format(actual=query.query, expected=expected)
assert query.serieType == SeriesType.anime, \
assert_msg.format(actual=query.serieType, expected=SeriesType.anime)
def testBrowser(self, ):
browser = Browser()
browser.executeQuery(Query(SeriesType.anime))
|
{
"content_hash": "bdd49e9daa62218cf4d3e0bc1be177fa",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 80,
"avg_line_length": 23.62,
"alnum_prop": 0.5529212531752752,
"repo_name": "twissell-/anipy",
"id": "01c10bf636e186f6558cd5de6609f13567f0e634",
"size": "1181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/testBrowser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51596"
}
],
"symlink_target": ""
}
|
"""A module used for downloading files."""
import hashlib
import os
import shutil
import subprocess as sp
import tempfile
from ftplib import FTP
import requests
from tqdm import tqdm
from .logger import geoparse_logger as logger
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
class Downloader(object):
"""Downloader class."""
def __init__(self, url, outdir=None, filename=None):
self.url = url
if outdir is None:
self.outdir = os.getcwd()
else:
self.outdir = outdir
if filename is None:
self.filename = self._get_filename()
else:
self.filename = filename
with tempfile.NamedTemporaryFile(delete=True) as tmpf:
self._temp_file_name = tmpf.name
@property
def destination(self):
"""Get the destination path.
This is the property should be calculated every time it is used because
a user could change the outdir and filename dynamically.
"""
return os.path.join(os.path.abspath(self.outdir), self.filename)
def download(self, force=False, silent=False):
"""Download from URL."""
def _download():
if self.url.startswith("http"):
self._download_http(silent=silent)
elif self.url.startswith("ftp"):
self._download_ftp(silent=silent)
else:
raise ValueError("Invalid URL %s" % self.url)
logger.debug("Moving %s to %s" % (self._temp_file_name, self.destination))
shutil.copyfile(self._temp_file_name, self.destination)
logger.debug("Successfully downloaded %s" % self.url)
try:
is_already_downloaded = os.path.isfile(self.destination)
if is_already_downloaded:
if force:
try:
os.remove(self.destination)
except Exception:
logger.error("Cannot delete %s" % self.destination)
logger.info("Downloading %s to %s" % (self.url, self.destination))
logger.debug(
"Downloading %s to %s" % (self.url, self._temp_file_name)
)
_download()
else:
logger.info(
(
"File %s already exist. Use force=True if you"
" would like to overwrite it."
)
% self.destination
)
else:
_download()
finally:
try:
os.remove(self._temp_file_name)
except OSError:
pass
def download_aspera(self, user, host, silent=False):
"""Download file with Aspera Connect.
For details see the documentation ov Aspera Connect
Args:
user (:obj:`str`): FTP user.
host (:obj:`str`): FTP host. Defaults to "ftp-trace.ncbi.nlm.nih.gov".
"""
aspera_home = os.environ.get("ASPERA_HOME", None)
if not aspera_home:
raise ValueError("environment variable $ASPERA_HOME not set")
if not os.path.exists(aspera_home):
raise ValueError(
"$ASPERA_HOME directory {} does not exist".format(aspera_home)
)
ascp = os.path.join(aspera_home, "connect/bin/ascp")
key = os.path.join(aspera_home, "connect/etc/asperaweb_id_dsa.openssh")
if not os.path.exists(ascp):
raise ValueError("could not find ascp binary")
if not os.path.exists(key):
raise ValueError("could not find openssh key")
parsed_url = urlparse(self.url)
cmd = "{} -i {} -k1 -T -l400m {}@{}:{} {}".format(
ascp, key, user, host, parsed_url.path, self._temp_file_name
)
logger.debug(cmd)
try:
pr = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = pr.communicate()
if not silent:
logger.debug("Aspera stdout: " + str(stdout))
logger.debug("Aspera stderr: " + str(stderr))
if pr.returncode == 0:
logger.debug(
"Moving %s to %s" % (self._temp_file_name, self.destination)
)
shutil.move(self._temp_file_name, self.destination)
logger.debug("Successfully downloaded %s" % self.url)
else:
logger.error("Failed to download %s using Aspera Connect" % self.url)
finally:
try:
os.remove(self._temp_file_name)
except OSError:
pass
def _get_filename(self):
filename = os.path.basename(urlparse(self.url).path).strip(" \n\t.")
if len(filename) == 0:
raise Exception("Cannot parse filename from %s" % self.url)
return filename
def _download_ftp(self, silent=False):
parsed_url = urlparse(self.url)
try:
ftp = FTP(parsed_url.netloc)
ftp.login()
total_size = ftp.size(parsed_url.path)
if total_size is None:
total_size = 0
wrote = list() # cannot add in the callback, has to be a list
with open(self._temp_file_name, "wb") as f:
if silent:
def _write(data):
f.write(data)
wrote.append(len(data))
ftp.retrbinary("RETR %s" % parsed_url.path, _write)
else:
with tqdm(
total=total_size,
unit="B",
unit_scale=True,
unit_divisor=1024,
leave=True,
) as pbar:
def _write(data):
data_length = len(data)
pbar.update(data_length)
f.write(data)
wrote.append(data_length)
ftp.retrbinary("RETR %s" % parsed_url.path, _write)
ftp.quit()
except Exception:
try:
ftp.quit()
logger.error(
"Error when trying to retreive %s." % self.url, exc_info=True
)
except Exception:
logger.error("Error when quiting FTP server.", exc_info=True)
if total_size != 0:
if sum(wrote) != total_size:
raise ValueError(
"Downloaded size do not match the expected size for %s" % (self.url)
)
else:
logger.debug("Size validation passed")
def _download_http(self, silent=False):
r = requests.get(self.url, stream=True)
r.raise_for_status()
# Total size in bytes.
total_size = int(r.headers.get("content-length", 0))
logger.debug("Total size: %s" % total_size)
md5_header = r.headers.get("Content-MD5")
logger.debug("md5: %s" % str(md5_header))
chunk_size = 1024
wrote = 0
with open(self._temp_file_name, "wb") as f:
if silent:
for chunk in r.iter_content(chunk_size):
if chunk:
f.write(chunk)
wrote += len(chunk)
else:
with tqdm(
total=total_size,
unit="B",
unit_scale=True,
unit_divisor=1024,
leave=True,
) as pbar:
for chunk in r.iter_content(chunk_size):
if chunk:
f.write(chunk)
pbar.update(len(chunk))
wrote += len(chunk)
if total_size != 0:
if wrote != total_size:
raise ValueError(
"Downloaded size do not match the expected size for %s" % (self.url)
)
else:
logger.debug("Size validation passed")
if md5_header:
logger.debug("Validating MD5 checksum...")
if md5_header == Downloader.md5sum(self._temp_file_name):
logger.debug("MD5 checksum passed")
else:
raise ValueError("MD5 checksum do NOT passed")
@staticmethod
def md5sum(filename, blocksize=8192):
"""Get the MD5 checksum of a file."""
with open(filename, "rb") as fh:
m = hashlib.md5()
while True:
data = fh.read(blocksize)
if not data:
break
m.update(data)
return m.hexdigest()
|
{
"content_hash": "6c26b4ace12c52eba8d533284f31a42b",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 88,
"avg_line_length": 36.11693548387097,
"alnum_prop": 0.49246399464106283,
"repo_name": "guma44/GEOparse",
"id": "c1c74d47992c10317fc4cdc47b2195ec1196620d",
"size": "8957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/GEOparse/downloader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "162654"
},
{
"name": "Makefile",
"bytes": "1961"
},
{
"name": "Python",
"bytes": "121671"
}
],
"symlink_target": ""
}
|
import pandas as pd
from bokeh.layouts import row, widgetbox
from bokeh.models import Select
from bokeh.palettes import Spectral5
from bokeh.plotting import curdoc, figure
from bokeh.sampledata.autompg import autompg
df = autompg.copy()
SIZES = list(range(6, 22, 3))
COLORS = Spectral5
ORIGINS = ['North America', 'Europe', 'Asia']
# data cleanup
df.cyl = [str(x) for x in df.cyl]
df.origin = [ORIGINS[x-1] for x in df.origin]
df['year'] = [str(x) for x in df.yr]
del df['yr']
df['mfr'] = [x.split()[0] for x in df.name]
df.loc[df.mfr=='chevy', 'mfr'] = 'chevrolet'
df.loc[df.mfr=='chevroelt', 'mfr'] = 'chevrolet'
df.loc[df.mfr=='maxda', 'mfr'] = 'mazda'
df.loc[df.mfr=='mercedes-benz', 'mfr'] = 'mercedes'
df.loc[df.mfr=='toyouta', 'mfr'] = 'toyota'
df.loc[df.mfr=='vokswagen', 'mfr'] = 'volkswagen'
df.loc[df.mfr=='vw', 'mfr'] = 'volkswagen'
del df['name']
columns = sorted(df.columns)
discrete = [x for x in columns if df[x].dtype == object]
continuous = [x for x in columns if x not in discrete]
quantileable = [x for x in continuous if len(df[x].unique()) > 20]
def create_figure():
xs = df[x.value].values
ys = df[y.value].values
x_title = x.value.title()
y_title = y.value.title()
kw = dict()
if x.value in discrete:
kw['x_range'] = sorted(set(xs))
if y.value in discrete:
kw['y_range'] = sorted(set(ys))
kw['title'] = "%s vs %s" % (x_title, y_title)
p = figure(plot_height=600, plot_width=800, tools='pan,box_zoom,reset', **kw)
p.xaxis.axis_label = x_title
p.yaxis.axis_label = y_title
if x.value in discrete:
p.xaxis.major_label_orientation = pd.np.pi / 4
sz = 9
if size.value != 'None':
groups = pd.qcut(df[size.value].values, len(SIZES))
sz = [SIZES[xx] for xx in groups.codes]
c = "#31AADE"
if color.value != 'None':
groups = pd.qcut(df[color.value].values, len(COLORS))
c = [COLORS[xx] for xx in groups.codes]
p.circle(x=xs, y=ys, color=c, size=sz, line_color="white", alpha=0.6, hover_color='white', hover_alpha=0.5)
return p
def update(attr, old, new):
layout.children[1] = create_figure()
x = Select(title='X-Axis', value='mpg', options=columns)
x.on_change('value', update)
y = Select(title='Y-Axis', value='hp', options=columns)
y.on_change('value', update)
size = Select(title='Size', value='None', options=['None'] + quantileable)
size.on_change('value', update)
color = Select(title='Color', value='None', options=['None'] + quantileable)
color.on_change('value', update)
controls = widgetbox([x, y, color, size], width=200)
layout = row(controls, create_figure())
curdoc().add_root(layout)
curdoc().title = "Crossfilter"
|
{
"content_hash": "eb7c70e251846ec2733c636a802a0477",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 111,
"avg_line_length": 29.347826086956523,
"alnum_prop": 0.6388888888888888,
"repo_name": "schoolie/bokeh",
"id": "39ded619a1f766ad09bc09e26244c3e413709275",
"size": "2700",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "examples/app/crossfilter/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "92841"
},
{
"name": "CoffeeScript",
"bytes": "1132562"
},
{
"name": "HTML",
"bytes": "47972"
},
{
"name": "JavaScript",
"bytes": "25865"
},
{
"name": "Jupyter Notebook",
"bytes": "3981"
},
{
"name": "Makefile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "2426955"
},
{
"name": "Shell",
"bytes": "3718"
},
{
"name": "TypeScript",
"bytes": "130084"
}
],
"symlink_target": ""
}
|
import sys
from FileHelper import FileHelper
# This chunck factory creates chunks from textfile
class ChuncksFactory:
# Files if a list of files
def __init__(self, files):
self.chunck_completed = 0 ;
self.files = files ;
self.nChunks = -1 ;
def divideIntoChunks(self,filenameGenerator):
# input : a file with one input/line
# output : chunks with one word/lines (chunks == give size)
stackOfValues = []
counter = 0;
for InputfileName in self.files:
filePointer = open(InputfileName, "r")
for line in filePointer:
stackOfValues.append(line)
if(sys.getsizeof(stackOfValues)>536870912): # more than 64Mo
FileHelper.writeListInFile(filenameGenerator(counter),stackOfValues)
counter = counter + 1
filePointer.close()
if len(stackOfValues) > 0: # Just in case ...
FileHelper.writeListInFile(filenameGenerator(counter),stackOfValues)
counter = counter + 1;
self.nChunks = counter
return
|
{
"content_hash": "1e58b8e31f1470bdcd35a3ac17e539c0",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 88,
"avg_line_length": 37.54838709677419,
"alnum_prop": 0.584192439862543,
"repo_name": "neosky2142/PyMR",
"id": "bc0cfda356d319a97c1ac8c4a17efdc781caf950",
"size": "1164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ChuncksFactory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37599"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import InstantiationContext
from caffe2.python.layers.tags import Tags
def _filter_layers(layers, include_tags):
if include_tags is None:
return layers
include_tags = set(include_tags)
return [l for l in layers if not include_tags.isdisjoint(l.tags)]
def shrink_output_schema(net, out_schema):
if len(out_schema.field_names()) <= 1:
return out_schema
exists = [net.BlobIsDefined(blob) for blob in out_schema.field_blobs()]
return schema.from_column_list(
[
col_name for ok, col_name in
zip(exists, out_schema.field_names()) if ok
],
[
col_type for ok, col_type in
zip(exists, out_schema.field_types()) if ok
],
[
col_blob for ok, col_blob in
zip(exists, out_schema.field_blobs()) if ok
],
[
col_meta for ok, col_meta in
zip(exists, out_schema.field_metadata()) if ok
]
)
def generate_predict_net(model, include_tags=None):
predict_net = core.Net('predict_net')
for layer in _filter_layers(model.layers, include_tags):
if Tags.EXCLUDE_FROM_PREDICTION not in layer.tags:
layer.add_operators(
predict_net, context=InstantiationContext.PREDICTION)
predict_net.set_input_record(model.input_feature_schema.clone())
output_schema = shrink_output_schema(
predict_net, model.output_schema.clone()
)
predict_net.set_output_record(output_schema)
return predict_net
def generate_eval_net(model, include_tags=None):
eval_net = core.Net('eval_net')
for layer in _filter_layers(model.layers, include_tags):
if Tags.EXCLUDE_FROM_EVAL not in layer.tags:
layer.add_operators(eval_net, context=InstantiationContext.EVAL)
input_schema = model.input_feature_schema + model.trainer_extra_schema
eval_net.set_input_record(input_schema)
output_schema = shrink_output_schema(
eval_net, model.output_schema + model.metrics_schema
)
eval_net.set_output_record(output_schema)
return eval_net
def _generate_training_net_only(model, include_tags=None):
train_net = core.Net('train_net')
train_init_net = model.create_init_net('train_init_net')
for layer in _filter_layers(model.layers, include_tags):
if Tags.EXCLUDE_FROM_TRAIN not in layer.tags:
layer.add_operators(train_net, train_init_net)
input_schema = model.input_feature_schema + model.trainer_extra_schema
train_net.set_input_record(input_schema)
output_schema = shrink_output_schema(
train_net, model.output_schema + model.metrics_schema
)
train_net.set_output_record(output_schema)
return train_init_net, train_net
def generate_training_nets_forward_only(model, include_tags=None):
train_init_net, train_net = _generate_training_net_only(model, include_tags)
return train_init_net, train_net
def generate_training_nets(model, include_tags=None):
train_init_net, train_net = _generate_training_net_only(model, include_tags)
loss = model.loss
grad_map = train_net.AddGradientOperators(loss.field_blobs())
model.apply_optimizers(train_net, train_init_net, grad_map)
return train_init_net, train_net
|
{
"content_hash": "b45ab9d75cff7d30e855f58cf4fa8fb4",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 80,
"avg_line_length": 34.294117647058826,
"alnum_prop": 0.6732418524871355,
"repo_name": "pietern/caffe2",
"id": "f14597effd0f20690f70ac659900aef1ca072ca7",
"size": "4254",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "caffe2/python/layer_model_instantiator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5415"
},
{
"name": "C",
"bytes": "316608"
},
{
"name": "C++",
"bytes": "4743501"
},
{
"name": "CMake",
"bytes": "139649"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "671183"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Makefile",
"bytes": "1225"
},
{
"name": "Metal",
"bytes": "36752"
},
{
"name": "Objective-C",
"bytes": "6505"
},
{
"name": "Objective-C++",
"bytes": "239139"
},
{
"name": "Python",
"bytes": "2902249"
},
{
"name": "Shell",
"bytes": "31734"
}
],
"symlink_target": ""
}
|
import json
from django.http import HttpRequest
from testcases import TestCaseWithFixture
class ViewsTestCase(TestCaseWithFixture):
def test_gets(self):
resp = self.client.get('/api/v1/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 1)
self.assertEqual(deserialized['products'], {'list_endpoint': '/api/v1/products/', 'schema': '/api/v1/products/schema/'})
resp = self.client.get('/api/v1/products/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 2)
self.assertEqual(deserialized['meta']['limit'], 20)
self.assertEqual(len(deserialized['objects']), 7)
self.assertEqual([obj['name'] for obj in deserialized['objects']],
[u'Skateboardrampe', u'Bigwheel', u'Trampolin', u'Laufrad', u'Bigwheel', u'Human Hamsterball', u'Ant Farm'])
resp = self.client.get('/api/v1/products/11111/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 5)
self.assertEqual(deserialized['name'], u'Skateboardrampe')
resp = self.client.get('/api/v1/products/set/11111;76123/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 1)
self.assertEqual(len(deserialized['objects']), 2)
self.assertEqual([obj['name'] for obj in deserialized['objects']], [u'Skateboardrampe', u'Bigwheel'])
# Same tests with \w+ instead of \d+ for primary key regexp:
resp = self.client.get('/api/v1/products/WS65150-01/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 5)
self.assertEqual(deserialized['name'], u'Trampolin')
resp = self.client.get('/api/v1/products/set/WS65150-01;65100A-01/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 1)
self.assertEqual(len(deserialized['objects']), 2)
self.assertEqual([obj['name'] for obj in deserialized['objects']], [u'Trampolin', u'Laufrad'])
# And now Slashes
resp = self.client.get('/api/v1/products/76123/01/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 5)
self.assertEqual(deserialized['name'], u'Bigwheel')
resp = self.client.get('/api/v1/products/set/76123/01;65100A-01/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 1)
self.assertEqual(len(deserialized['objects']), 2)
self.assertEqual([obj['name'] for obj in deserialized['objects']], [u'Bigwheel', u'Laufrad'])
resp = self.client.get('/api/v1/products/WS65150/01-01/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 5)
self.assertEqual(deserialized['name'], u'Human Hamsterball')
resp = self.client.get('/api/v1/products/set/76123/01;WS65150/01-01/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 1)
self.assertEqual(len(deserialized['objects']), 2)
self.assertEqual([obj['name'] for obj in deserialized['objects']], [u'Bigwheel', u'Human Hamsterball'])
# And now dots
resp = self.client.get('/api/v1/products/WS77.86/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 5)
self.assertEqual(deserialized['name'], u'Ant Farm')
# slashes, and more dots
resp = self.client.get('/api/v1/products/set/76123/01;WS77.86/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 1)
self.assertEqual(len(deserialized['objects']), 2)
self.assertEqual([obj['name'] for obj in deserialized['objects']], [u'Bigwheel', u'Ant Farm'])
def test_posts(self):
request = HttpRequest()
post_data = '{"name": "Ball", "artnr": "12345"}'
request._body = post_data
resp = self.client.post('/api/v1/products/', data=post_data, content_type='application/json')
self.assertEqual(resp.status_code, 201)
self.assertTrue(resp['location'].endswith('/api/v1/products/12345/'))
# make sure posted object exists
resp = self.client.get('/api/v1/products/12345/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj['name'], 'Ball')
self.assertEqual(obj['artnr'], '12345')
# With appended characters
request = HttpRequest()
post_data = '{"name": "Ball 2", "artnr": "12345ABC"}'
request._body = post_data
resp = self.client.post('/api/v1/products/', data=post_data, content_type='application/json')
self.assertEqual(resp.status_code, 201)
self.assertTrue(resp['location'].endswith('/api/v1/products/12345ABC/'))
# make sure posted object exists
resp = self.client.get('/api/v1/products/12345ABC/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj['name'], 'Ball 2')
self.assertEqual(obj['artnr'], '12345ABC')
# With prepended characters
request = HttpRequest()
post_data = '{"name": "Ball 3", "artnr": "WK12345"}'
request._body = post_data
resp = self.client.post('/api/v1/products/', data=post_data, content_type='application/json')
self.assertEqual(resp.status_code, 201)
self.assertTrue(resp['location'].endswith('/api/v1/products/WK12345/'))
# make sure posted object exists
resp = self.client.get('/api/v1/products/WK12345/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj['name'], 'Ball 3')
self.assertEqual(obj['artnr'], 'WK12345')
# Now Primary Keys with Slashes
request = HttpRequest()
post_data = '{"name": "Bigwheel", "artnr": "76123/03"}'
request._body = post_data
resp = self.client.post('/api/v1/products/', data=post_data, content_type='application/json')
self.assertEqual(resp.status_code, 201)
self.assertTrue(resp['location'].endswith('/api/v1/products/76123/03/'))
# make sure posted object exists
resp = self.client.get('/api/v1/products/76123/03/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj['name'], 'Bigwheel')
self.assertEqual(obj['artnr'], '76123/03')
request = HttpRequest()
post_data = '{"name": "Trampolin", "artnr": "WS65150/02"}'
request._body = post_data
resp = self.client.post('/api/v1/products/', data=post_data, content_type='application/json')
self.assertEqual(resp.status_code, 201)
self.assertTrue(resp['location'].endswith('/api/v1/products/WS65150/02/'))
# make sure posted object exists
resp = self.client.get('/api/v1/products/WS65150/02/', data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj['name'], 'Trampolin')
self.assertEqual(obj['artnr'], 'WS65150/02')
|
{
"content_hash": "0dd1f8e43b5d4e2e49cbc3b75c1cbfe7",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 128,
"avg_line_length": 49.567251461988306,
"alnum_prop": 0.6327277017461067,
"repo_name": "ipsosante/django-tastypie",
"id": "e45365dc41cded87e7ac5525a1291bd2e09647a8",
"size": "8476",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/alphanumeric/tests/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "988"
},
{
"name": "Python",
"bytes": "803225"
},
{
"name": "Shell",
"bytes": "1162"
}
],
"symlink_target": ""
}
|
import logging
import os
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
import oslo_i18n
from oslo_service import service
from oslo_service import systemd
import pbr.version
# NOTE(dstanek): i18n.enable_lazy() must be called before
# keystone.i18n._() is called to ensure it has the desired lazy lookup
# behavior. This includes cases, like keystone.exceptions, where
# keystone.i18n._() is called at import time.
oslo_i18n.enable_lazy()
from keystone.common import environment
from keystone.common import utils
from keystone import config
from keystone.i18n import _
from keystone.server import common
from keystone.version import service as keystone_service
CONF = cfg.CONF
class ServerWrapper(object):
"""Wraps a Server with some launching info & capabilities."""
def __init__(self, server, workers):
self.server = server
self.workers = workers
def launch_with(self, launcher):
self.server.listen()
if self.workers > 1:
# Use multi-process launcher
launcher.launch_service(self.server, self.workers)
else:
# Use single process launcher
launcher.launch_service(self.server)
def create_server(conf, name, host, port, workers):
app = keystone_service.loadapp('config:%s' % conf, name)
server = environment.Server(app, host=host, port=port,
keepalive=CONF.eventlet_server.tcp_keepalive,
keepidle=CONF.eventlet_server.tcp_keepidle)
if CONF.eventlet_server_ssl.enable:
server.set_ssl(CONF.eventlet_server_ssl.certfile,
CONF.eventlet_server_ssl.keyfile,
CONF.eventlet_server_ssl.ca_certs,
CONF.eventlet_server_ssl.cert_required)
return name, ServerWrapper(server, workers)
def serve(*servers):
logging.warning(_('Running keystone via eventlet is deprecated as of Kilo '
'in favor of running in a WSGI server (e.g. mod_wsgi). '
'Support for keystone under eventlet will be removed in '
'the "M"-Release.'))
if max([server[1].workers for server in servers]) > 1:
launcher = service.ProcessLauncher(CONF)
else:
launcher = service.ServiceLauncher(CONF)
for name, server in servers:
try:
server.launch_with(launcher)
except socket.error:
logging.exception(_('Failed to start the %(name)s server') % {
'name': name})
raise
# notify calling process we are ready to serve
systemd.notify_once()
for name, server in servers:
launcher.wait()
def _get_workers(worker_type_config_opt):
# Get the value from config, if the config value is None (not set), return
# the number of cpus with a minimum of 2.
worker_count = CONF.eventlet_server.get(worker_type_config_opt)
if not worker_count:
worker_count = max(2, processutils.get_worker_count())
return worker_count
def configure_threading():
monkeypatch_thread = not CONF.standard_threads
pydev_debug_url = utils.setup_remote_pydev_debug()
if pydev_debug_url:
# in order to work around errors caused by monkey patching we have to
# set the thread to False. An explanation is here:
# http://lists.openstack.org/pipermail/openstack-dev/2012-August/
# 000794.html
monkeypatch_thread = False
environment.use_eventlet(monkeypatch_thread)
def run(possible_topdir):
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
common.configure(
version=pbr.version.VersionInfo('keystone').version_string(),
config_files=config_files,
pre_setup_logging_fn=configure_threading)
paste_config = config.find_paste_config()
def create_servers():
admin_worker_count = _get_workers('admin_workers')
public_worker_count = _get_workers('public_workers')
servers = []
servers.append(create_server(paste_config,
'admin',
CONF.eventlet_server.admin_bind_host,
CONF.eventlet_server.admin_port,
admin_worker_count))
servers.append(create_server(paste_config,
'main',
CONF.eventlet_server.public_bind_host,
CONF.eventlet_server.public_port,
public_worker_count))
return servers
_unused, servers = common.setup_backends(
startup_application_fn=create_servers)
serve(*servers)
|
{
"content_hash": "c2531209ff1ad8a2303df6a69046ef8e",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 79,
"avg_line_length": 34.95035460992908,
"alnum_prop": 0.6152597402597403,
"repo_name": "maestro-hybrid-cloud/keystone",
"id": "f016b7f9f45bbcb3f72a29197674a1c841be594f",
"size": "5542",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/server/eventlet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "4000934"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
root = os.path.abspath(os.path.dirname(__file__))
path = lambda *p: os.path.join(root, *p)
setup(
name='bztools',
version=__import__('bugzilla').__version__,
description='Models and scripts to access the Bugzilla REST API.',
long_description=open(path('README.rst')).read(),
author='Jeff Balogh',
author_email='me@jeffbalogh.org',
url='http://github.com/jbalogh/bztools',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
# install_requires=['remoteobjects>=1.1'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
entry_points={
'console_scripts': [
'bzattach = scripts.attach:main',
],
},
)
|
{
"content_hash": "43da91efced10b8cb18fa8c3aabbb6e8",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 71,
"avg_line_length": 28.88888888888889,
"alnum_prop": 0.6153846153846154,
"repo_name": "jbalogh/bztools",
"id": "bc817b7aea2b1b3113eacd3156e4503d5f0f7eb8",
"size": "1040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
@package mi.dataset.parser.test
@file mi-dataset/mi/dataset/parser/test/test_cspp_eng_dcl.py
@author Jeff Roy
@brief Test code for a cspp_eng_dcl data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import RecoverableSampleException
from mi.core.log import get_logger
from mi.dataset.driver.cspp_eng.dcl.resource import RESOURCE_PATH
from mi.dataset.parser.cspp_eng_dcl import CsppEngDclParser
from mi.dataset.parser.utilities import particle_to_yml
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
MODULE_NAME = 'mi.dataset.parser.cspp_eng_dcl'
# The list of generated tests are the suggested tests, but there may
# be other tests needed to fully test your parser
@attr('UNIT', group='mi')
class CsppEngDclParserUnitTestCase(ParserUnitTestCase):
"""
cspp_eng_dcl Parser unit test suite
"""
def create_yml(self, particles, filename):
particle_to_yml(particles, os.path.join(RESOURCE_PATH, filename))
def test_simple(self):
"""
Read data from a file and pull out data particles
one at a time. Verify that the results are those we expected.
The file all_responses contains at least one of all expected
NMEA responses copied from various sample logs.
"""
log.info('START TEST SIMPLE')
# test along the telemetered path, current config
with open(os.path.join(RESOURCE_PATH, 'all_responses.ucspp.log'), 'rU') as file_handle:
parser = CsppEngDclParser({},
file_handle,
self.exception_callback)
particles = parser.get_records(10)
self.assert_particles(particles, 'all_responses.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_bad_data(self):
"""
Verify RecoverableSampleException is raised when a malformed line is encountered
and processing continues parsing valid lines.
"""
log.info('START TEST SIMPLE')
# test along the telemetered path, current config
with open(os.path.join(RESOURCE_PATH, 'bad_data.ucspp.log'), 'rU') as file_handle:
parser = CsppEngDclParser({},
file_handle,
self.exception_callback)
particles = parser.get_records(10)
self.assert_particles(particles, 'all_responses.yml', RESOURCE_PATH)
self.assertEqual(len(self.exception_callback_value), 1)
self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)
def test_bad_checksum(self):
"""
Verify RecoverableSampleException is raised when a bad checksum is encountered
and processing continues parsing valid lines.
"""
log.info('START TEST SIMPLE')
# test along the telemetered path, current config
with open(os.path.join(RESOURCE_PATH, 'bad_checksum.ucspp.log'), 'rU') as file_handle:
parser = CsppEngDclParser({},
file_handle,
self.exception_callback)
particles = parser.get_records(10)
self.assertEqual(len(particles), 3)
self.assertEqual(len(self.exception_callback_value), 1)
self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)
def test_bad_nmea_count(self):
"""
Verify RecoverableSampleException is raised when a nmea record is encountered
that does not contain the expected number of fields
and processing continues parsing valid lines.
"""
log.info('START TEST SIMPLE')
# test along the telemetered path, current config
with open(os.path.join(RESOURCE_PATH, 'bad_count.ucspp.log'), 'rU') as file_handle:
parser = CsppEngDclParser({},
file_handle,
self.exception_callback)
particles = parser.get_records(10)
self.assertEqual(len(particles), 3)
self.assertEqual(len(self.exception_callback_value), 1)
self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)
def test_just_header(self):
"""
Verify a file containing just the header is parsed correctly
"""
log.info('START TEST SIMPLE')
# test along the telemetered path, current config
with open(os.path.join(RESOURCE_PATH, 'just_header.ucspp.log'), 'rU') as file_handle:
parser = CsppEngDclParser({},
file_handle,
self.exception_callback)
particles = parser.get_records(10)
self.assertEqual(len(particles), 1)
self.assertEqual(self.exception_callback_value, [])
def test_partial_header(self):
"""
Verify a file containing just the header is parsed correctly
"""
log.info('START TEST SIMPLE')
# test along the telemetered path, current config
with open(os.path.join(RESOURCE_PATH, 'partial_header.ucspp.log'), 'rU') as file_handle:
parser = CsppEngDclParser({},
file_handle,
self.exception_callback)
particles = parser.get_records(10)
self.assertEqual(len(particles), 1)
self.assertEqual(self.exception_callback_value, [])
self.assertEqual(particles[0]._values[1].get('value'), None)
def test_no_date(self):
"""
Verify that a file that does not contain a DATE record
correctly creates the partially populated Data particle
with alternate timestamp.
"""
log.info('START TEST SIMPLE')
# test along the telemetered path, current config
with open(os.path.join(RESOURCE_PATH, 'no_date.ucspp.log'), 'rU') as file_handle:
parser = CsppEngDclParser({},
file_handle,
self.exception_callback)
particles = parser.get_records(10)
self.assert_particles(particles, 'no_date.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_no_response(self):
"""
Verify a file containing no responses is parsed correctly
"""
log.info('START TEST SIMPLE')
# test along the telemetered path, current config
with open(os.path.join(RESOURCE_PATH, 'no_response.ucspp.log'), 'rU') as file_handle:
parser = CsppEngDclParser({},
file_handle,
self.exception_callback)
particles = parser.get_records(10)
self.assertEqual(len(particles), 0)
self.assertEqual(self.exception_callback_value, [])
|
{
"content_hash": "d307a3bbde319fbf29de767bd8ed900c",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 96,
"avg_line_length": 42.05357142857143,
"alnum_prop": 0.6048124557678698,
"repo_name": "vipullakhani/mi-instrument",
"id": "fb48abfd9a0769a2bcb94d3b8818f716ac98949a",
"size": "7088",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mi/dataset/parser/test/test_cspp_eng_dcl.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "9968191"
}
],
"symlink_target": ""
}
|
"""Migrate an undercloud's stack data to use ephemeral Heat. Queries for
existing stacks and exports necessary data from the stack to the default
consistent working directory before backing up and dropping the heat database.
"""
import argparse
import logging
import os
import subprocess
import tarfile
import tempfile
import time
import yaml
from heatclient.client import Client
import keystoneauth1
from mistralclient.api import base as mistralclient_exc
from mistralclient.api import client as mistral_client
import openstack
from tripleo_common.utils import plan as plan_utils
LOG = logging.getLogger('undercloud')
ROLE_DATA_MAP_FILE = ('/var/lib/tripleo-config/'
'overcloud-stack-role-data-file-map.yaml')
def parse_args():
parser = argparse.ArgumentParser(
description="Upgrade an undercloud for ephemeral Heat.")
parser.add_argument(
'--cloud', '-c',
default='undercloud',
help='The name of the cloud used for the OpenStack connection.')
parser.add_argument(
'--stack', '-s',
action='append',
help='The stack(s) to migrate to using ephemeral Heat. Can be '
'specified multiple times. If not specified, all stacks '
'will be migrated')
parser.add_argument(
'--working-dir', '-w',
help='Directory to use for saving stack state. '
'Defaults to ~/overcloud-deploy/<stack>')
return parser.parse_args()
def database_exists():
"""Check if the heat database exists.
:return: True if the heat database exists, otherwise False
:rtype: bool
"""
output = subprocess.check_output([
'sudo', 'podman', 'exec', '-u', 'root', 'mysql',
'mysql', '-e', 'show databases like "heat"'
])
return 'heat' in str(output)
def backup_db(backup_dir):
"""Backup the heat database to the specified directory
:param backup_dir: The directory to store the backup
:type backup_dir: str
:return: Database tarfile backup path
:rtype: str
"""
heat_dir = os.path.join(backup_dir, 'heat-launcher')
if not os.path.isdir(heat_dir):
os.makedirs(heat_dir)
db_path = os.path.join(heat_dir, 'heat-db.sql')
LOG.info("Backing up heat database to {}".format(db_path))
with open(db_path, 'w') as out:
subprocess.run([
'sudo', 'podman', 'exec', '-u', 'root',
'mysql', 'mysqldump', 'heat'], stdout=out,
check=True)
os.chmod(db_path, 0o600)
tf_name = '{}-{}.tar.bzip2'.format(db_path, time.time())
tf = tarfile.open(tf_name, 'w:bz2')
tf.add(db_path, os.path.basename(db_path))
tf.close()
LOG.info("Created tarfile {}".format(tf_name))
return tf_name
def _decode(encoded):
"""Decode a string into utf-8
:param encoded: Encoded string
:type encoded: string
:return: Decoded string
:rtype: string
"""
if not encoded:
return ""
decoded = encoded.decode('utf-8')
if decoded.endswith('\n'):
decoded = decoded[:-1]
return decoded
def _get_ctlplane_vip():
"""Get the configured ctlplane VIP
:return: ctlplane VIP
:rtype: string
"""
return _decode(subprocess.check_output(
['sudo', 'hiera', 'controller_virtual_ip']))
def _get_ctlplane_ip():
"""Get the configured ctlplane IP
:return: ctlplane IP
:rtype: string
"""
return _decode(subprocess.check_output(
['sudo', 'hiera', 'ctlplane']))
def _make_stack_dirs(stacks, working_dir):
"""Create stack directory if it does not already exist
:stacks: List of overcloud stack names
:type stacks: list
:working_dir: Path to working directly
:type working_dir: str
:return: None
"""
for stack in stacks:
stack_dir = os.path.join(working_dir, stack)
if not os.path.exists(stack_dir):
os.makedirs(stack_dir)
def _log_and_raise(msg):
"""Log error message and raise Exception
:msg: Message string that will be logged, and added in Exception
:type msg: str
:return: None
"""
LOG.error(msg)
raise Exception(msg)
def _get_role_data_file(heat, stack, fd, temp_file_path):
"""Get the role data file for a stack
:param heat: Heat client
:type heat: heatclient.client.Client
:param stack: Stack name to query for passwords
:type stack: str
:fd: File descriptor
:type fd: int
:temp_file_path: Path to role data temp file
:type temp_file_path: str
:return: Path to the role data file
:rtype:: str
"""
try:
_stack = heat.get_stack(stack)
stack_outputs = {i['output_key']: i['output_value']
for i in _stack.outputs}
roles_data = stack_outputs[
'TripleoHeatTemplatesJinja2RenderingDataSources']['roles_data']
with os.fdopen(fd, 'w') as tmp:
tmp.write(yaml.safe_dump(roles_data))
roles_data_file = temp_file_path
except KeyError:
if not os.path.isfile(ROLE_DATA_MAP_FILE):
_log_and_raise("Overcloud stack role data mapping file: {} was "
"not found.".format(ROLE_DATA_MAP_FILE))
with open(ROLE_DATA_MAP_FILE, 'r') as f:
data = yaml.safe_load(f.read())
roles_data_file = data.get(stack)
if not roles_data_file or not os.path.isfile(roles_data_file):
_log_and_raise("Roles data file: {} for stack {} not found."
.format(roles_data_file, stack))
return roles_data_file
def drop_db():
"""Drop the heat database and heat users
:return: None
:rtype: None
"""
LOG.info("Dropping Heat database")
subprocess.check_call([
'sudo', 'podman', 'exec', '-u', 'root',
'mysql', 'mysql', 'heat', '-e',
'drop database if exists heat'])
LOG.info("Dropping Heat users")
subprocess.check_call([
'sudo', 'podman', 'exec', '-u', 'root',
'mysql', 'mysql', '-e',
'drop user if exists \'heat\'@\'{}\''.format(_get_ctlplane_ip())])
subprocess.check_call([
'sudo', 'podman', 'exec', '-u', 'root',
'mysql', 'mysql', '-e',
'drop user if exists \'heat\'@\'{}\''.format(_get_ctlplane_vip())])
subprocess.check_call([
'sudo', 'podman', 'exec', '-u', 'root',
'mysql', 'mysql', '-e',
'drop user if exists \'heat\'@\'%\''])
def export_passwords(heat, stack, stack_dir):
"""Export passwords from an existing stack and write them in Heat
environment file format to the specified directory.
:param heat: Heat client
:type heat: heatclient.client.Client
:param stack: Stack name to query for passwords
:type stack: str
:param stack_dir: Directory to save the generated Heat environment
containing the password values.
:type stack_dir: str
:return: None
:rtype: None
"""
passwords_path = os.path.join(
stack_dir, "tripleo-{}-passwords.yaml".format(stack))
LOG.info("Exporting passwords for stack %s to %s"
% (stack, passwords_path))
passwords = plan_utils.generate_passwords(heat=heat, container=stack)
password_params = dict(parameter_defaults=passwords)
with open(passwords_path, 'w') as f:
f.write(yaml.safe_dump(password_params))
os.chmod(passwords_path, 0o600)
def export_networks(stack, stack_dir, cloud):
"""Export networks from an existing stack and write network data file.
:param stack: Stack name to query for networks
:type stack: str
:param stack_dir: Directory to save the generated network data file
containing the stack network definitions.
:type stack_dir: str
:param cloud: Cloud name used to send OpenStack commands
:type cloud: str
:return: None
:rtype: None
"""
network_data_path = os.path.join(
stack_dir, "tripleo-{}-network-data.yaml".format(stack))
LOG.info("Exporting network from stack %s to %s"
% (stack, network_data_path))
subprocess.check_call(['openstack', 'overcloud', 'network', 'extract',
'--stack', stack, '--output', network_data_path,
'--yes'], env={'OS_CLOUD': cloud})
os.chmod(network_data_path, 0o600)
def export_network_virtual_ips(stack, stack_dir, cloud):
"""Export network virtual IPs from an existing stack and write network
vip data file.
:param stack: Stack name to query for networks
:type stack: str
:param stack_dir: Directory to save the generated data file
containing the stack virtual IP definitions.
:type stack_dir: str
:param cloud: Cloud name used to send OpenStack commands
:type cloud: str
:return: None
:rtype: None
"""
vip_data_path = os.path.join(
stack_dir, "tripleo-{}-virtual-ips.yaml".format(stack))
LOG.info("Exporting network virtual IPs from stack %s to %s"
% (stack, vip_data_path))
subprocess.check_call(['openstack', 'overcloud', 'network', 'vip',
'extract', '--stack', stack, '--output',
vip_data_path, '--yes'], env={'OS_CLOUD': cloud})
os.chmod(vip_data_path, 0o600)
def export_provisioned_nodes(heat, stack, stack_dir, cloud):
"""Export provisioned nodes from an existing stack and write baremetal
deployment definition file.
:param cloud: Heat client
:type cloud: heatclient.client.Client
:param stack: Stack name to query for networks
:type stack: str
:param stack_dir: Directory to save the generated data file
containing the stack baremetal deployment definitions.
:type stack_dir: str
:param cloud: Cloud name used to send OpenStack commands
:type cloud: str
:return: None
:rtype: None
"""
fd, temp_file_path = tempfile.mkstemp()
try:
roles_data_file = _get_role_data_file(heat, stack, fd, temp_file_path)
bm_deployment_path = os.path.join(
stack_dir, "tripleo-{}-baremetal-deployment.yaml".format(stack))
LOG.info("Exporting provisioned nodes from stack %s to %s"
% (stack, bm_deployment_path))
subprocess.check_call(['openstack', 'overcloud', 'node', 'extract',
'provisioned', '--stack', stack, '--roles-file',
roles_data_file, '--output',
bm_deployment_path, '--yes'], env={'OS_CLOUD': cloud})
os.chmod(bm_deployment_path, 0o600)
finally:
os.remove(temp_file_path)
def main():
logging.basicConfig()
LOG.setLevel(logging.INFO)
args = parse_args()
sudo_user = os.environ.get('SUDO_USER')
if not args.working_dir:
if sudo_user:
user_home = '~{}'.format(sudo_user)
else:
user_home = '~'
working_dir = os.path.join(
os.path.expanduser(user_home),
'overcloud-deploy')
else:
working_dir = args.working_dir
if not os.path.isdir(working_dir):
os.makedirs(working_dir)
conn = openstack.connection.from_config(cloud=args.cloud)
try:
heat = conn.orchestration
_heatclient = Client('1', endpoint=conn.endpoint_for('orchestration'),
token=conn.auth_token)
except keystoneauth1.exceptions.catalog.EndpointNotFound:
LOG.error("No Heat endpoint found, won't migrate any "
"existing stack data.")
raise
try:
stacks = args.stack or [s.name for s in heat.stacks()]
except openstack.exceptions.HttpException:
LOG.warning("No connection to Heat available, won't migrate any "
"existing stack data.")
stacks = []
# Make stack directories in the working directory if they don't not exist
_make_stack_dirs(stacks, working_dir)
for stack in stacks:
stack_dir = os.path.join(working_dir, stack)
export_networks(stack, stack_dir, args.cloud)
export_network_virtual_ips(stack, stack_dir, args.cloud)
export_provisioned_nodes(heat, stack, stack_dir, args.cloud)
if database_exists():
backup_dir = os.path.join(
working_dir,
'undercloud-upgrade-ephemeral-heat')
db_tar_path = backup_db(backup_dir)
else:
LOG.warning("No database found to backup.")
db_tar_path = None
# Get and store ssh keys from mistral environment
env_ssh_keys = None
try:
_workflowclient = mistral_client.client(
mistral_url=conn.endpoint_for('workflow'),
session=conn.session)
env_ssh_keys = _workflowclient.environments.get('ssh_keys')
except (keystoneauth1.exceptions.catalog.EndpointNotFound,
mistralclient_exc.APIException):
LOG.warning("Can not get ssh_keys from mistral environment"
"used for tripleo-admin user. This may cause "
"issues after upgrade.")
for stack in stacks:
stack_dir = os.path.join(working_dir, stack)
if db_tar_path:
# Symlink to the existing db backup
os.symlink(db_tar_path,
os.path.join(stack_dir, os.path.basename(db_tar_path)))
export_passwords(_heatclient, stack, stack_dir)
# Write the keys to stack_dir
if env_ssh_keys:
private_key = env_ssh_keys.variables['private_key']
public_key = env_ssh_keys.variables['public_key']
ssh_key_file = os.path.join(stack_dir, 'ssh_private_key')
with os.fdopen(
os.open(ssh_key_file,
flags=(os.O_WRONLY | os.O_CREAT | os.O_TRUNC),
mode=0o600), 'w') as fp:
fp.write(private_key)
with os.fdopen(
os.open('{}.pub'.format(ssh_key_file),
flags=(os.O_WRONLY | os.O_CREAT | os.O_TRUNC),
mode=0o600), 'w') as fp:
fp.write(public_key)
if database_exists():
drop_db()
# Chown all files to original user if running under sudo
if sudo_user:
subprocess.run([
'chown', '-R', '{}:{}'.format(sudo_user, sudo_user),
working_dir],
check=True)
if __name__ == '__main__':
main()
|
{
"content_hash": "810da1fe4c8171c985bfc24791d04b31",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 85,
"avg_line_length": 33.4,
"alnum_prop": 0.6049993037181451,
"repo_name": "openstack/tripleo-heat-templates",
"id": "c6ed9bfcd0e6e1aabee6e05f3128d14fb700f619",
"size": "14939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/undercloud-upgrade-ephemeral-heat.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "80675"
},
{
"name": "Python",
"bytes": "391465"
},
{
"name": "Shell",
"bytes": "52827"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/survey_tool/shared_survey_tool_liquid.iff"
result.attribute_template_id = 1
result.stfName("item_n","survey_tool_liquid")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "5f4034fb26e0e4f2decee34f916c6e12",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 24.076923076923077,
"alnum_prop": 0.6996805111821086,
"repo_name": "obi-two/Rebelion",
"id": "3b6ea54effcfa57a3d56e13b468d5c642ea4ca8a",
"size": "458",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/survey_tool/shared_survey_tool_liquid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""
Our hero Teemo is attacking an enemy Ashe with poison attacks! When Teemo attacks Ashe, Ashe gets poisoned for a exactly duration seconds. More formally, an attack at second t will mean Ashe is poisoned during the inclusive time interval [t, t + duration - 1]. If Teemo attacks again before the poison effect ends, the timer for it is reset, and the poison effect will end duration seconds after the new attack.
You are given a non-decreasing integer array timeSeries, where timeSeries[i] denotes that Teemo attacks Ashe at second timeSeries[i], and an integer duration.
Return the total number of seconds that Ashe is poisoned.
Example 1:
Input: timeSeries = [1,4], duration = 2
Output: 4
Explanation: Teemo's attacks on Ashe go as follows:
- At second 1, Teemo attacks, and Ashe is poisoned for seconds 1 and 2.
- At second 4, Teemo attacks, and Ashe is poisoned for seconds 4 and 5.
Ashe is poisoned for seconds 1, 2, 4, and 5, which is 4 seconds in total.
Example 2:
Input: timeSeries = [1,2], duration = 2
Output: 3
Explanation: Teemo's attacks on Ashe go as follows:
- At second 1, Teemo attacks, and Ashe is poisoned for seconds 1 and 2.
- At second 2 however, Teemo attacks again and resets the poison timer. Ashe is poisoned for seconds 2 and 3.
Ashe is poisoned for seconds 1, 2, and 3, which is 3 seconds in total.
Constraints:
1 <= timeSeries.length <= 104
0 <= timeSeries[i], duration <= 107
timeSeries is sorted in non-decreasing order.
"""
class Solution:
def findPoisonedDuration(self, timeSeries: List[int], duration: int) -> int:
total, end = 0, 0
for i in timeSeries:
if i >= end:
total += duration
else:
total += i - end + duration
end = i + duration
return total
|
{
"content_hash": "d9138636df7b40b1d2d30e9658a0633a",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 411,
"avg_line_length": 40.81818181818182,
"alnum_prop": 0.705456570155902,
"repo_name": "franklingu/leetcode-solutions",
"id": "d02e103e1562e9efdb3e19270e568b2c3d5cab64",
"size": "1798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questions/teemo-attacking/Solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8919"
},
{
"name": "Java",
"bytes": "173033"
},
{
"name": "Python",
"bytes": "996874"
},
{
"name": "Shell",
"bytes": "2559"
}
],
"symlink_target": ""
}
|
import pytest
from landlab import RasterModelGrid
from landlab.components import LandslideProbability
@pytest.fixture
def ls_prob():
grid = RasterModelGrid((20, 20), xy_spacing=10e0)
grid.add_zeros("topographic__slope", at="node", dtype=float)
grid.add_zeros("topographic__specific_contributing_area", at="node")
grid.add_zeros("soil__transmissivity", at="node")
grid.add_zeros("soil__saturated_hydraulic_conductivity", at="node")
grid.add_zeros("soil__mode_total_cohesion", at="node")
grid.add_zeros("soil__minimum_total_cohesion", at="node")
grid.add_zeros("soil__maximum_total_cohesion", at="node")
grid.add_zeros("soil__internal_friction_angle", at="node")
grid.add_zeros("soil__density", at="node")
grid.add_zeros("soil__thickness", at="node")
return LandslideProbability(grid)
|
{
"content_hash": "9d9a8b34b89fefa07bf29e60367ee9c9",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 72,
"avg_line_length": 39.76190476190476,
"alnum_prop": 0.7065868263473054,
"repo_name": "landlab/landlab",
"id": "e46af0d129910c40087c1a069715ed89cc38680d",
"size": "835",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/components/landslides/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "762"
},
{
"name": "Cython",
"bytes": "265735"
},
{
"name": "Gherkin",
"bytes": "1601"
},
{
"name": "Jupyter Notebook",
"bytes": "1373117"
},
{
"name": "Makefile",
"bytes": "2250"
},
{
"name": "Python",
"bytes": "4497175"
},
{
"name": "Roff",
"bytes": "445"
},
{
"name": "Shell",
"bytes": "1073"
},
{
"name": "TeX",
"bytes": "42252"
}
],
"symlink_target": ""
}
|
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class FwaasDriverBase(object):
"""Firewall as a Service Driver base class.
Using FwaasDriver Class, an instance of L3 perimeter Firewall
can be created. The firewall co-exists with the L3 agent.
One instance is created for each tenant. One firewall policy
is associated with each tenant (in the Havana release).
The Firewall can be visualized as having two zones (in Havana
release), trusted and untrusted.
All the 'internal' interfaces of Neutron Router is treated as trusted. The
interface connected to 'external network' is treated as untrusted.
The policy is applied on traffic ingressing/egressing interfaces on
the trusted zone. This implies that policy will be applied for traffic
passing from
- trusted to untrusted zones
- untrusted to trusted zones
- trusted to trusted zones
Policy WILL NOT be applied for traffic from untrusted to untrusted zones.
This is not a problem in Havana release as there is only one interface
connected to external network.
Since the policy is applied on the internal interfaces, the traffic
will be not be NATed to floating IP. For incoming traffic, the
traffic will get NATed to internal IP address before it hits
the firewall rules. So, while writing the rules, care should be
taken if using rules based on floating IP.
The firewall rule addition/deletion/insertion/update are done by the
management console. When the policy is sent to the driver, the complete
policy is sent and the whole policy has to be applied atomically. The
firewall rules will not get updated individually. This is to avoid problems
related to out-of-order notifications or inconsistent behaviour by partial
application of rules.
"""
@abc.abstractmethod
def create_firewall(self, apply_list, firewall):
"""Create the Firewall with default (drop all) policy.
The default policy will be applied on all the interfaces of
trusted zone.
"""
pass
@abc.abstractmethod
def delete_firewall(self, apply_list, firewall):
"""Delete firewall.
Removes all policies created by this instance and frees up
all the resources.
"""
pass
@abc.abstractmethod
def update_firewall(self, apply_list, firewall):
"""Apply the policy on all trusted interfaces.
Remove previous policy and apply the new policy on all trusted
interfaces.
"""
pass
@abc.abstractmethod
def apply_default_policy(self, apply_list, firewall):
"""Apply the default policy on all trusted interfaces.
Remove current policy and apply the default policy on all trusted
interfaces.
"""
pass
class NoopFwaasDriver(FwaasDriverBase):
"""Noop Fwaas Driver.
Firewall driver which does nothing.
This driver is for disabling Fwaas functionality.
"""
def create_firewall(self, apply_list, firewall):
pass
def delete_firewall(self, apply_list, firewall):
pass
def update_firewall(self, apply_list, firewall):
pass
def apply_default_policy(self, apply_list, firewall):
pass
|
{
"content_hash": "693c7d54b34452d22e8f0a449fa5bb9c",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 79,
"avg_line_length": 32.56435643564357,
"alnum_prop": 0.6983885679537853,
"repo_name": "CiscoSystems/vespa",
"id": "7128e251a2af17b1263013d9ae3cb7608124db50",
"size": "4021",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/services/firewall/drivers/fwaas_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "7243854"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
"""
Script that trains DTNN models on qm7 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
# Load QM7 dataset
tasks, datasets, transformers = dc.molnet.load_qm7_from_mat(move_mean=False)
train_dataset, valid_dataset, test_dataset = datasets
# Fit models
metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
# Batch size of models
batch_size = 50
n_embedding = 30
n_distance = 51
distance_min = -1.
distance_max = 9.2
n_hidden = 15
rate = 0.001
model = dc.models.DTNNModel(
len(tasks),
n_embedding=n_embedding,
n_hidden=n_hidden,
n_distance=n_distance,
distance_min=distance_min,
distance_max=distance_max,
output_activation=False,
batch_size=batch_size,
learning_rate=rate,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(train_dataset, nb_epoch=50)
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
|
{
"content_hash": "6627901a93d6d4735ead216bc4c347e9",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 76,
"avg_line_length": 24.8,
"alnum_prop": 0.7258064516129032,
"repo_name": "miaecle/deepchem",
"id": "7d3cf3ff0668303029c749c790fc927661183caa",
"size": "1240",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/qm7/qm7_tensorgraph_DTNN.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16453"
},
{
"name": "Dockerfile",
"bytes": "794"
},
{
"name": "HTML",
"bytes": "20618"
},
{
"name": "Jupyter Notebook",
"bytes": "59756"
},
{
"name": "Python",
"bytes": "2442260"
},
{
"name": "Shell",
"bytes": "11183"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
import contextlib
import pytest
from pyleus.storm import StormTuple, Bolt, SimpleBolt
from pyleus.testing import ComponentTestCase, mock
class TestBolt(ComponentTestCase):
INSTANCE_CLS = Bolt
def test_ack(self):
tup = mock.Mock(id=1234)
with mock.patch.object(self.instance, 'send_command', autospec=True) as \
mock_send_command:
self.instance.ack(tup)
mock_send_command.assert_called_once_with('ack', {'id': tup.id})
def test_heartbeat(self):
heartbeat = StormTuple(None, None, '__heartbeat', -1, [])
with mock.patch.multiple(self.instance,
process_tuple=mock.DEFAULT,
ack=mock.DEFAULT,
send_command=mock.DEFAULT) as values:
self.instance._process_tuple(heartbeat)
values['send_command'].assert_called_once_with('sync')
assert not values['process_tuple'].called
assert not values['ack'].called
def test_fail(self):
tup = mock.Mock(id=1234)
with mock.patch.object(self.instance, 'send_command', autospec=True) as \
mock_send_command:
self.instance.fail(tup)
mock_send_command.assert_called_once_with('fail', {'id': tup.id})
@contextlib.contextmanager
def _test_emit_helper(self, expected_command_dict):
with mock.patch.object(self.instance, 'read_taskid', autospec=True) as mock_read_taskid:
with mock.patch.object(self.instance, 'send_command', autospec=True) as mock_send_command:
yield mock_send_command
mock_read_taskid.assert_called_once_with()
mock_send_command.assert_called_once_with('emit', expected_command_dict)
@contextlib.contextmanager
def _test_emit_helper_no_taskid(self, expected_command_dict):
with mock.patch.object(self.instance, 'read_taskid', autospec=True) as mock_read_taskid:
with mock.patch.object(self.instance, 'send_command', autospec=True) as mock_send_command:
yield mock_send_command
assert mock_read_taskid.call_count == 0
mock_send_command.assert_called_once_with('emit', expected_command_dict)
def test_emit_simple(self):
expected_command_dict = {
'anchors': [],
'tuple': (1, 2, 3),
}
with self._test_emit_helper(expected_command_dict):
self.instance.emit((1, 2, 3))
def test_emit_simple_no_taskid(self):
expected_command_dict = {
'anchors': [],
'tuple': (1, 2, 3),
'need_task_ids': False,
}
with self._test_emit_helper_no_taskid(expected_command_dict):
self.instance.emit((1, 2, 3), need_task_ids=False)
def test_emit_with_list(self):
expected_command_dict = {
'anchors': [],
'tuple': tuple([1, 2, 3]),
}
with self._test_emit_helper(expected_command_dict):
self.instance.emit([1, 2, 3])
def test_emit_with_namedtuple(self):
"""Regression test for PYLEUS-60
Some versions of simplejson serialize namedtuples differently, so
Pyleus casts all outgoing tuple values to actual Python tuples before
emitting.
"""
MyTuple = namedtuple('MyTuple', "a b c")
values = MyTuple(1, 2, 3)
expected_command_dict = {
'anchors': [],
'tuple': tuple(values),
}
with self._test_emit_helper(expected_command_dict) as mock_send_command:
self.instance.emit(values)
_, command_dict = mock_send_command.call_args[0]
assert command_dict['tuple'].__class__ == tuple
def test_emit_with_stream(self):
expected_command_dict = {
'anchors': [],
'stream': mock.sentinel.stream,
'tuple': (1, 2, 3),
}
with self._test_emit_helper(expected_command_dict):
self.instance.emit((1, 2, 3), stream=mock.sentinel.stream)
def test_emit_with_anchors(self):
expected_command_dict = {
'anchors': [4, 5, 6],
'tuple': (1, 2, 3),
}
anchors = [mock.Mock(id=i) for i in (4, 5, 6)]
with self._test_emit_helper(expected_command_dict):
self.instance.emit((1, 2, 3), anchors=anchors)
def test_emit_with_direct_task(self):
expected_command_dict = {
'anchors': [],
'task': mock.sentinel.direct_task,
'tuple': (1, 2, 3),
}
with self._test_emit_helper(expected_command_dict):
self.instance.emit((1, 2, 3), direct_task=mock.sentinel.direct_task)
def test_emit_with_bad_values(self):
with pytest.raises(AssertionError):
self.instance.emit("not-a-list-or-tuple")
class TestSimpleBolt(ComponentTestCase):
INSTANCE_CLS = SimpleBolt
TICK = StormTuple(None, '__system', '__tick', None, None)
HEARTBEAT = StormTuple(None, None, '__heartbeat', -1, [])
TUPLE = StormTuple(None, None, None, None, None)
@pytest.fixture(autouse=True)
def setup_mocks(self, request):
patches = mock.patch.multiple(self.instance, process_tick=mock.DEFAULT,
process_tuple=mock.DEFAULT, ack=mock.DEFAULT,
sync=mock.DEFAULT)
request.addfinalizer(lambda: patches.__exit__(None, None, None))
values = patches.__enter__()
self.mock_process_tick = values['process_tick']
self.mock_process_tuple = values['process_tuple']
self.mock_ack = values['ack']
self.mock_sync = values['sync']
def test_ack(self):
self.instance._process_tuple(self.TICK)
self.instance._process_tuple(self.TUPLE)
self.mock_ack.assert_has_calls([
mock.call(self.TICK),
mock.call(self.TUPLE),
])
def test_tick(self):
self.instance._process_tuple(self.TICK)
self.mock_process_tick.assert_called_once_with()
assert not self.mock_process_tuple.called
def test_heartbeat(self):
self.instance._process_tuple(self.HEARTBEAT)
self.mock_sync.assert_called_once_with()
assert not self.mock_process_tuple.called
assert not self.mock_ack.called
def test_tuple(self):
self.instance._process_tuple(self.TUPLE)
assert not self.mock_process_tick.called
self.mock_process_tuple.assert_called_once_with(self.TUPLE)
def test_exception(self):
class MyException(Exception): pass
self.mock_process_tick.side_effect = MyException()
self.mock_process_tuple.side_effect = MyException()
with pytest.raises(MyException):
self.instance._process_tuple(self.TICK)
with pytest.raises(MyException):
self.instance._process_tuple(self.TUPLE)
|
{
"content_hash": "1c424b1dac96ec0d3e4e4330e1a87089",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 102,
"avg_line_length": 33.43478260869565,
"alnum_prop": 0.600346770697876,
"repo_name": "jirafe/pyleus",
"id": "cfed823164f24a4cebd9106b579ecc7492235fe5",
"size": "6921",
"binary": false,
"copies": "8",
"ref": "refs/heads/develop",
"path": "tests/storm/bolt_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "29072"
},
{
"name": "Makefile",
"bytes": "602"
},
{
"name": "Python",
"bytes": "126992"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from collections import namedtuple
from django.conf.urls import include, url
from django.test import TestCase
from rest_framework.compat import Resolver404, make_url_resolver
from rest_framework.test import APIRequestFactory
from rest_framework.urlpatterns import format_suffix_patterns
# A container class for test paths for the test case
URLTestPath = namedtuple('URLTestPath', ['path', 'args', 'kwargs'])
def dummy_view(request, *args, **kwargs):
pass
class FormatSuffixTests(TestCase):
"""
Tests `format_suffix_patterns` against different URLPatterns to ensure the
URLs still resolve properly, including any captured parameters.
"""
def _resolve_urlpatterns(self, urlpatterns, test_paths):
factory = APIRequestFactory()
try:
urlpatterns = format_suffix_patterns(urlpatterns)
except Exception:
self.fail("Failed to apply `format_suffix_patterns` on the supplied urlpatterns")
resolver = make_url_resolver(r'^/', urlpatterns)
for test_path in test_paths:
request = factory.get(test_path.path)
try:
callback, callback_args, callback_kwargs = resolver.resolve(request.path_info)
except Exception:
self.fail("Failed to resolve URL: %s" % request.path_info)
assert callback_args == test_path.args
assert callback_kwargs == test_path.kwargs
def test_trailing_slash(self):
factory = APIRequestFactory()
urlpatterns = format_suffix_patterns([
url(r'^test/$', dummy_view),
])
resolver = make_url_resolver(r'^/', urlpatterns)
test_paths = [
(URLTestPath('/test.api', (), {'format': 'api'}), True),
(URLTestPath('/test/.api', (), {'format': 'api'}), False),
(URLTestPath('/test.api/', (), {'format': 'api'}), True),
]
for test_path, expected_resolved in test_paths:
request = factory.get(test_path.path)
try:
callback, callback_args, callback_kwargs = resolver.resolve(request.path_info)
except Resolver404:
callback, callback_args, callback_kwargs = (None, None, None)
if not expected_resolved:
assert callback is None
continue
assert callback_args == test_path.args
assert callback_kwargs == test_path.kwargs
def test_format_suffix(self):
urlpatterns = [
url(r'^test$', dummy_view),
]
test_paths = [
URLTestPath('/test', (), {}),
URLTestPath('/test.api', (), {'format': 'api'}),
URLTestPath('/test.asdf', (), {'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_default_args(self):
urlpatterns = [
url(r'^test$', dummy_view, {'foo': 'bar'}),
]
test_paths = [
URLTestPath('/test', (), {'foo': 'bar', }),
URLTestPath('/test.api', (), {'foo': 'bar', 'format': 'api'}),
URLTestPath('/test.asdf', (), {'foo': 'bar', 'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_included_urls(self):
nested_patterns = [
url(r'^path$', dummy_view)
]
urlpatterns = [
url(r'^test/', include(nested_patterns), {'foo': 'bar'}),
]
test_paths = [
URLTestPath('/test/path', (), {'foo': 'bar', }),
URLTestPath('/test/path.api', (), {'foo': 'bar', 'format': 'api'}),
URLTestPath('/test/path.asdf', (), {'foo': 'bar', 'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
|
{
"content_hash": "fbbe87342f342310d2212e8952661ec4",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 94,
"avg_line_length": 37.64356435643565,
"alnum_prop": 0.5725933719095213,
"repo_name": "dmwyatt/django-rest-framework",
"id": "7320de4793efd414c45f78574064fa9f4a6a23a4",
"size": "3802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_urlpatterns.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "39327"
},
{
"name": "HTML",
"bytes": "81387"
},
{
"name": "JavaScript",
"bytes": "18201"
},
{
"name": "Python",
"bytes": "1182250"
}
],
"symlink_target": ""
}
|
from .main import Lightning
from .session import Session
from .visualization import Visualization, VisualizationLocal
from .types.plots import *
from .types.images import *
from .types.streaming import *
from .types.three import *
__version__ = "1.1.1"
|
{
"content_hash": "f7045ad9dc34458dde90e8fe63c3a7d0",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 28.22222222222222,
"alnum_prop": 0.7716535433070866,
"repo_name": "garretstuber/lightning-python",
"id": "e9616ef22c86f95575c9eee903f5271e2ae9c3ba",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lightning/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1202"
},
{
"name": "JavaScript",
"bytes": "3029809"
},
{
"name": "Python",
"bytes": "70844"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
def highway(input_, size, layer_size=1, bias=-2, f=tf.nn.relu):
"""Highway Network (cf. http://arxiv.org/abs/1505.00387).
t = sigmoid(Wy + b)
z = t * g(Wy + b) + (1 - t) * y
where g is nonlinearity, t is transform gate, and (1 - t) is carry gate.
"""
output = input_
for idx in range(layer_size):
output = f(tf.nn.rnn_cell._linear(output, size, 0, scope='output_lin_%d' % idx))
transform_gate = tf.sigmoid(tf.nn.rnn_cell._linear(input_, size, 0, scope='transform_lin_%d' % idx) + bias)
carry_gate = 1. - transform_gate
output = transform_gate * output + carry_gate * input_
return output
def last_relevant(output, length):
batch_size = tf.shape(output)[0]
max_length = tf.shape(output)[1]
out_size = int(output.get_shape()[2])
index = tf.range(0, batch_size) * max_length + (length - 1)
flat = tf.reshape(output, [-1, out_size])
relevant = tf.nn.embedding_lookup(flat, index)
return relevant
|
{
"content_hash": "e4b7124290887d5795385e5c81b5f47c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 115,
"avg_line_length": 33.93333333333333,
"alnum_prop": 0.6110019646365422,
"repo_name": "windowsyuli/cross_domain",
"id": "815f8b5a0c36fbaf540d65a7368977f9c9972add",
"size": "1018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neuralnet/ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "50032"
},
{
"name": "Makefile",
"bytes": "718"
},
{
"name": "Python",
"bytes": "120906"
},
{
"name": "Shell",
"bytes": "10170"
}
],
"symlink_target": ""
}
|
"""Unit tests for qrscp.py QR get service."""
import logging
import os
import subprocess
import sys
import tempfile
import time
import pytest
from pydicom import dcmread, Dataset
from pydicom.uid import (
ExplicitVRLittleEndian, ImplicitVRLittleEndian,
DeflatedExplicitVRLittleEndian, ExplicitVRBigEndian
)
from pynetdicom import (
AE, evt, debug_logger, DEFAULT_TRANSFER_SYNTAXES, build_role
)
from pynetdicom.sop_class import (
CTImageStorage, PatientRootQueryRetrieveInformationModelGet
)
#debug_logger()
APP_DIR = os.path.join(os.path.dirname(__file__), '../')
APP_FILE = os.path.join(APP_DIR, 'qrscp', 'qrscp.py')
DATA_DIR = os.path.join(APP_DIR, '../', 'tests', 'dicom_files')
def which(program):
# Determine if a given program is installed on PATH
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
def start_qrscp(args):
"""Start the qrscp.py app and return the process."""
pargs = [which('python'), APP_FILE] + [*args]
return subprocess.Popen(pargs)
def start_qrscp_cli(args):
"""Start the qrscp app using CLI and return the process."""
pargs = [which('python'), '-m', 'pynetdicom', 'qrscp'] + [*args]
return subprocess.Popen(pargs)
def _send_datasets():
pargs = [
which('python'), '-m', 'pynetdicom', 'storescu', 'localhost', '11112',
DATA_DIR, '-cx'
]
subprocess.Popen(pargs)
class GetSCPBase(object):
"""Tests for qrscp.py"""
def setup(self):
"""Run prior to each test"""
self.ae = None
self.p = None
self.func = None
self.tfile = tempfile.NamedTemporaryFile()
self.db_location = self.tfile.name
self.instance_location = tempfile.TemporaryDirectory()
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
if self.p:
self.p.kill()
self.p.wait(timeout=5)
def test_basic(self):
"""Test basic operation of the QR get service."""
self.p = p = self.func([
'--database-location', self.db_location,
'--instance-location', self.instance_location.name,
'-d'
])
time.sleep(1)
_send_datasets()
time.sleep(1)
query = Dataset()
query.QueryRetrieveLevel = 'PATIENT'
query.PatientID = '1CT1'
datasets = []
def handle_store(event):
datasets.append(event.dataset)
return 0x0000
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
model = PatientRootQueryRetrieveInformationModelGet
ae.add_requested_context(model)
ae.add_requested_context(CTImageStorage)
role = build_role(CTImageStorage, scp_role=True)
assoc = ae.associate(
'localhost', 11112, ext_neg=[role],
evt_handlers=[(evt.EVT_C_STORE, handle_store)]
)
assert assoc.is_established
responses = assoc.send_c_get(query, model)
status, ds = next(responses)
assert status.Status == 0xFF00
assert ds is None
status, ds = next(responses)
assert status.Status == 0x0000
assert ds is None
pytest.raises(StopIteration, next, responses)
assoc.release()
p.terminate()
p.wait()
assert 1 == len(datasets)
assert "CompressedSamples^CT1" == datasets[0].PatientName
class TestGetSCP(GetSCPBase):
"""Tests for qrscp.py"""
def setup(self):
"""Run prior to each test"""
super().setup()
self.ae = None
self.p = None
self.func = start_qrscp
class TestGetSCPCLI(GetSCPBase):
"""Tests for qrscp using CLI"""
def setup(self):
"""Run prior to each test"""
super().setup()
self.ae = None
self.p = None
self.func = start_qrscp_cli
|
{
"content_hash": "d67b67df3361c5ab2f6cdb545fd2b5d4",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 78,
"avg_line_length": 26.385093167701864,
"alnum_prop": 0.5972222222222222,
"repo_name": "scaramallion/pynetdicom3",
"id": "a5d61758c6c29f9f57b99c5f8bcb689beb7d577e",
"size": "4248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pynetdicom/apps/tests/test_qrscp_get.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2135993"
},
{
"name": "Shell",
"bytes": "7504"
}
],
"symlink_target": ""
}
|
'''
Convert back and forth between the molecule (open boundary) and the 0D PBC
system.
'''
import numpy
from pyscf import gto, scf
from pyscf.pbc import gto as pbcgto
from pyscf.pbc import scf as pbcscf
from pyscf.pbc import df
cell = pbcgto.Cell()
cell.atom = 'N 0 0 0; N 0 0 1.2'
cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.a = numpy.eye(3)
cell.dimension = 0
cell.symmetry = True
cell.build()
mf = pbcscf.RHF(cell)
mf.with_df = df.AFTDF(cell)
mf.run()
print('E(HF) with 0D PBC RHF calculation %s' % mf.e_tot)
#
# Convert cell to mol.
#
# Except lattice vectors, the mole object inherits all parameters from the
# cell object, like geometry, basis sets, and pseudopotential. Using the
# generated mol object with molecular code, it should produce the same results
# as the 0D PBC calculation
#
mol = cell.to_mol()
mf = scf.RHF(mol).run()
print('E(HF) with molecular RHF calculation %s' % mf.e_tot)
# Cell and Mole have almost the same structure. If cell was fed to the
# molecular functions, the code is able to handle the cell without any
# errors. However, due to the different treatments of nuclear repulsion
# energy, a small discrepancy will be found in the total energy.
mf = scf.RHF(cell).run()
print('E(HF) of molecular RHF with cell %s' % mf.e_tot)
#
# Convert mol back to cell.
#
# The mol ojbect above contains all information of the pbc system which was
# initialized at the beginning. Using the "view" method to convert mol back to
# the cell object, all information can be transfer to the resultant cell
# object. Lattice vectors "a" are not available in the mole object. It needs
# to be specified in the cell.
#
cell_0D = mol.view(pbcgto.Cell)
cell_0D.a = numpy.eye(3)
cell_0D.dimension = 0
mf = pbcscf.RHF(cell).density_fit().run()
print('E(HF) with 0D PBC RHF calculation %s' % mf.e_tot)
|
{
"content_hash": "7340b69d19e9b1748408be10e39ea110",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 78,
"avg_line_length": 31.46551724137931,
"alnum_prop": 0.7276712328767123,
"repo_name": "sunqm/pyscf",
"id": "d5fd3b1d3ec8c2913073cdc77b5a6d28c61d4845",
"size": "1848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pbc/31-pbc_0D_as_mol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2805171"
},
{
"name": "CMake",
"bytes": "19597"
},
{
"name": "Common Lisp",
"bytes": "40515"
},
{
"name": "Dockerfile",
"bytes": "447"
},
{
"name": "Makefile",
"bytes": "6797"
},
{
"name": "Python",
"bytes": "19630497"
},
{
"name": "Roff",
"bytes": "429"
},
{
"name": "Shell",
"bytes": "6564"
}
],
"symlink_target": ""
}
|
class create_gist:
def create(self, fields):
r = requests.post("https://api.github.com/gists",
data=json.dumps(fields),
headers={'user-agent':client.name})
return r.json()
def create(self, key, fields):
r = requests.post("https://api.github.com/gists",
data=json.dumps(fields),
headers={'user-agent':client.name},
auth=(key,'x-oauth-basic'))
return r.json()
|
{
"content_hash": "d2dabcb8101a322e5c7d24cb03412dcb",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 40.92307692307692,
"alnum_prop": 0.48120300751879697,
"repo_name": "eightnoteight/x-gist",
"id": "ff303b02e7d93bf7d01287fc91065d8a36dab788",
"size": "579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gist/create_gist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4154"
}
],
"symlink_target": ""
}
|
"""Tests for the IPython tab-completion machinery."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import sys
import textwrap
import unittest
from contextlib import contextmanager
import nose.tools as nt
from traitlets.config.loader import Config
from IPython import get_ipython
from IPython.core import completer
from IPython.external import decorators
from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
from IPython.utils.generics import complete_object
from IPython.testing import decorators as dec
from IPython.core.completer import (
Completion,
provisionalcompleter,
match_dict_keys,
_deduplicate_completions,
)
from nose.tools import assert_in, assert_not_in
# -----------------------------------------------------------------------------
# Test functions
# -----------------------------------------------------------------------------
@contextmanager
def greedy_completion():
ip = get_ipython()
greedy_original = ip.Completer.greedy
try:
ip.Completer.greedy = True
yield
finally:
ip.Completer.greedy = greedy_original
def test_protect_filename():
if sys.platform == "win32":
pairs = [
("abc", "abc"),
(" abc", '" abc"'),
("a bc", '"a bc"'),
("a bc", '"a bc"'),
(" bc", '" bc"'),
]
else:
pairs = [
("abc", "abc"),
(" abc", r"\ abc"),
("a bc", r"a\ bc"),
("a bc", r"a\ \ bc"),
(" bc", r"\ \ bc"),
# On posix, we also protect parens and other special characters.
("a(bc", r"a\(bc"),
("a)bc", r"a\)bc"),
("a( )bc", r"a\(\ \)bc"),
("a[1]bc", r"a\[1\]bc"),
("a{1}bc", r"a\{1\}bc"),
("a#bc", r"a\#bc"),
("a?bc", r"a\?bc"),
("a=bc", r"a\=bc"),
("a\\bc", r"a\\bc"),
("a|bc", r"a\|bc"),
("a;bc", r"a\;bc"),
("a:bc", r"a\:bc"),
("a'bc", r"a\'bc"),
("a*bc", r"a\*bc"),
('a"bc', r"a\"bc"),
("a^bc", r"a\^bc"),
("a&bc", r"a\&bc"),
]
# run the actual tests
for s1, s2 in pairs:
s1p = completer.protect_filename(s1)
nt.assert_equal(s1p, s2)
def check_line_split(splitter, test_specs):
for part1, part2, split in test_specs:
cursor_pos = len(part1)
line = part1 + part2
out = splitter.split_line(line, cursor_pos)
nt.assert_equal(out, split)
def test_line_split():
"""Basic line splitter test with default specs."""
sp = completer.CompletionSplitter()
# The format of the test specs is: part1, part2, expected answer. Parts 1
# and 2 are joined into the 'line' sent to the splitter, as if the cursor
# was at the end of part1. So an empty part2 represents someone hitting
# tab at the end of the line, the most common case.
t = [
("run some/scrip", "", "some/scrip"),
("run scripts/er", "ror.py foo", "scripts/er"),
("echo $HOM", "", "HOM"),
("print sys.pa", "", "sys.pa"),
("print(sys.pa", "", "sys.pa"),
("execfile('scripts/er", "", "scripts/er"),
("a[x.", "", "x."),
("a[x.", "y", "x."),
('cd "some_file/', "", "some_file/"),
]
check_line_split(sp, t)
# Ensure splitting works OK with unicode by re-running the tests with
# all inputs turned into unicode
check_line_split(sp, [map(str, p) for p in t])
class NamedInstanceMetaclass(type):
def __getitem__(cls, item):
return cls.get_instance(item)
class NamedInstanceClass(metaclass=NamedInstanceMetaclass):
def __init__(self, name):
if not hasattr(self.__class__, "instances"):
self.__class__.instances = {}
self.__class__.instances[name] = self
@classmethod
def _ipython_key_completions_(cls):
return cls.instances.keys()
@classmethod
def get_instance(cls, name):
return cls.instances[name]
class KeyCompletable:
def __init__(self, things=()):
self.things = things
def _ipython_key_completions_(self):
return list(self.things)
class TestCompleter(unittest.TestCase):
def setUp(self):
"""
We want to silence all PendingDeprecationWarning when testing the completer
"""
self._assertwarns = self.assertWarns(PendingDeprecationWarning)
self._assertwarns.__enter__()
def tearDown(self):
try:
self._assertwarns.__exit__(None, None, None)
except AssertionError:
pass
def test_custom_completion_error(self):
"""Test that errors from custom attribute completers are silenced."""
ip = get_ipython()
class A:
pass
ip.user_ns["x"] = A()
@complete_object.register(A)
def complete_A(a, existing_completions):
raise TypeError("this should be silenced")
ip.complete("x.")
def test_custom_completion_ordering(self):
"""Test that errors from custom attribute completers are silenced."""
ip = get_ipython()
_, matches = ip.complete('in')
assert matches.index('input') < matches.index('int')
def complete_example(a):
return ['example2', 'example1']
ip.Completer.custom_completers.add_re('ex*', complete_example)
_, matches = ip.complete('ex')
assert matches.index('example2') < matches.index('example1')
def test_unicode_completions(self):
ip = get_ipython()
# Some strings that trigger different types of completion. Check them both
# in str and unicode forms
s = ["ru", "%ru", "cd /", "floa", "float(x)/"]
for t in s + list(map(str, s)):
# We don't need to check exact completion values (they may change
# depending on the state of the namespace, but at least no exceptions
# should be thrown and the return value should be a pair of text, list
# values.
text, matches = ip.complete(t)
nt.assert_true(isinstance(text, str))
nt.assert_true(isinstance(matches, list))
def test_latex_completions(self):
from IPython.core.latex_symbols import latex_symbols
import random
ip = get_ipython()
# Test some random unicode symbols
keys = random.sample(latex_symbols.keys(), 10)
for k in keys:
text, matches = ip.complete(k)
nt.assert_equal(len(matches), 1)
nt.assert_equal(text, k)
nt.assert_equal(matches[0], latex_symbols[k])
# Test a more complex line
text, matches = ip.complete("print(\\alpha")
nt.assert_equal(text, "\\alpha")
nt.assert_equal(matches[0], latex_symbols["\\alpha"])
# Test multiple matching latex symbols
text, matches = ip.complete("\\al")
nt.assert_in("\\alpha", matches)
nt.assert_in("\\aleph", matches)
def test_latex_no_results(self):
"""
forward latex should really return nothing in either field if nothing is found.
"""
ip = get_ipython()
text, matches = ip.Completer.latex_matches("\\really_i_should_match_nothing")
nt.assert_equal(text, "")
nt.assert_equal(matches, [])
def test_back_latex_completion(self):
ip = get_ipython()
# do not return more than 1 matches fro \beta, only the latex one.
name, matches = ip.complete("\\β")
nt.assert_equal(matches, ['\\beta'])
def test_back_unicode_completion(self):
ip = get_ipython()
name, matches = ip.complete("\\Ⅴ")
nt.assert_equal(matches, ["\\ROMAN NUMERAL FIVE"])
def test_forward_unicode_completion(self):
ip = get_ipython()
name, matches = ip.complete("\\ROMAN NUMERAL FIVE")
nt.assert_equal(len(matches), 1)
nt.assert_equal(matches[0], "Ⅴ")
@nt.nottest # now we have a completion for \jmath
@decorators.knownfailureif(
sys.platform == "win32", "Fails if there is a C:\\j... path"
)
def test_no_ascii_back_completion(self):
ip = get_ipython()
with TemporaryWorkingDirectory(): # Avoid any filename completions
# single ascii letter that don't have yet completions
for letter in "jJ":
name, matches = ip.complete("\\" + letter)
nt.assert_equal(matches, [])
class CompletionSplitterTestCase(unittest.TestCase):
def setUp(self):
self.sp = completer.CompletionSplitter()
def test_delim_setting(self):
self.sp.delims = " "
nt.assert_equal(self.sp.delims, " ")
nt.assert_equal(self.sp._delim_expr, r"[\ ]")
def test_spaces(self):
"""Test with only spaces as split chars."""
self.sp.delims = " "
t = [("foo", "", "foo"), ("run foo", "", "foo"), ("run foo", "bar", "foo")]
check_line_split(self.sp, t)
def test_has_open_quotes1(self):
for s in ["'", "'''", "'hi' '"]:
nt.assert_equal(completer.has_open_quotes(s), "'")
def test_has_open_quotes2(self):
for s in ['"', '"""', '"hi" "']:
nt.assert_equal(completer.has_open_quotes(s), '"')
def test_has_open_quotes3(self):
for s in ["''", "''' '''", "'hi' 'ipython'"]:
nt.assert_false(completer.has_open_quotes(s))
def test_has_open_quotes4(self):
for s in ['""', '""" """', '"hi" "ipython"']:
nt.assert_false(completer.has_open_quotes(s))
@decorators.knownfailureif(
sys.platform == "win32", "abspath completions fail on Windows"
)
def test_abspath_file_completions(self):
ip = get_ipython()
with TemporaryDirectory() as tmpdir:
prefix = os.path.join(tmpdir, "foo")
suffixes = ["1", "2"]
names = [prefix + s for s in suffixes]
for n in names:
open(n, "w").close()
# Check simple completion
c = ip.complete(prefix)[1]
nt.assert_equal(c, names)
# Now check with a function call
cmd = 'a = f("%s' % prefix
c = ip.complete(prefix, cmd)[1]
comp = [prefix + s for s in suffixes]
nt.assert_equal(c, comp)
def test_local_file_completions(self):
ip = get_ipython()
with TemporaryWorkingDirectory():
prefix = "./foo"
suffixes = ["1", "2"]
names = [prefix + s for s in suffixes]
for n in names:
open(n, "w").close()
# Check simple completion
c = ip.complete(prefix)[1]
nt.assert_equal(c, names)
# Now check with a function call
cmd = 'a = f("%s' % prefix
c = ip.complete(prefix, cmd)[1]
comp = {prefix + s for s in suffixes}
nt.assert_true(comp.issubset(set(c)))
def test_quoted_file_completions(self):
ip = get_ipython()
with TemporaryWorkingDirectory():
name = "foo'bar"
open(name, "w").close()
# Don't escape Windows
escaped = name if sys.platform == "win32" else "foo\\'bar"
# Single quote matches embedded single quote
text = "open('foo"
c = ip.Completer._complete(
cursor_line=0, cursor_pos=len(text), full_text=text
)[1]
nt.assert_equal(c, [escaped])
# Double quote requires no escape
text = 'open("foo'
c = ip.Completer._complete(
cursor_line=0, cursor_pos=len(text), full_text=text
)[1]
nt.assert_equal(c, [name])
# No quote requires an escape
text = "%ls foo"
c = ip.Completer._complete(
cursor_line=0, cursor_pos=len(text), full_text=text
)[1]
nt.assert_equal(c, [escaped])
def test_all_completions_dups(self):
"""
Make sure the output of `IPCompleter.all_completions` does not have
duplicated prefixes.
"""
ip = get_ipython()
c = ip.Completer
ip.ex("class TestClass():\n\ta=1\n\ta1=2")
for jedi_status in [True, False]:
with provisionalcompleter():
ip.Completer.use_jedi = jedi_status
matches = c.all_completions("TestCl")
assert matches == ['TestClass'], jedi_status
matches = c.all_completions("TestClass.")
assert len(matches) > 2, jedi_status
matches = c.all_completions("TestClass.a")
assert matches == ['TestClass.a', 'TestClass.a1'], jedi_status
def test_jedi(self):
"""
A couple of issue we had with Jedi
"""
ip = get_ipython()
def _test_complete(reason, s, comp, start=None, end=None):
l = len(s)
start = start if start is not None else l
end = end if end is not None else l
with provisionalcompleter():
ip.Completer.use_jedi = True
completions = set(ip.Completer.completions(s, l))
ip.Completer.use_jedi = False
assert_in(Completion(start, end, comp), completions, reason)
def _test_not_complete(reason, s, comp):
l = len(s)
with provisionalcompleter():
ip.Completer.use_jedi = True
completions = set(ip.Completer.completions(s, l))
ip.Completer.use_jedi = False
assert_not_in(Completion(l, l, comp), completions, reason)
import jedi
jedi_version = tuple(int(i) for i in jedi.__version__.split(".")[:3])
if jedi_version > (0, 10):
yield _test_complete, "jedi >0.9 should complete and not crash", "a=1;a.", "real"
yield _test_complete, "can infer first argument", 'a=(1,"foo");a[0].', "real"
yield _test_complete, "can infer second argument", 'a=(1,"foo");a[1].', "capitalize"
yield _test_complete, "cover duplicate completions", "im", "import", 0, 2
yield _test_not_complete, "does not mix types", 'a=(1,"foo");a[0].', "capitalize"
def test_completion_have_signature(self):
"""
Lets make sure jedi is capable of pulling out the signature of the function we are completing.
"""
ip = get_ipython()
with provisionalcompleter():
ip.Completer.use_jedi = True
completions = ip.Completer.completions("ope", 3)
c = next(completions) # should be `open`
ip.Completer.use_jedi = False
assert "file" in c.signature, "Signature of function was not found by completer"
assert (
"encoding" in c.signature
), "Signature of function was not found by completer"
def test_deduplicate_completions(self):
"""
Test that completions are correctly deduplicated (even if ranges are not the same)
"""
ip = get_ipython()
ip.ex(
textwrap.dedent(
"""
class Z:
zoo = 1
"""
)
)
with provisionalcompleter():
ip.Completer.use_jedi = True
l = list(
_deduplicate_completions("Z.z", ip.Completer.completions("Z.z", 3))
)
ip.Completer.use_jedi = False
assert len(l) == 1, "Completions (Z.z<tab>) correctly deduplicate: %s " % l
assert l[0].text == "zoo" # and not `it.accumulate`
def test_greedy_completions(self):
"""
Test the capability of the Greedy completer.
Most of the test here does not really show off the greedy completer, for proof
each of the text below now pass with Jedi. The greedy completer is capable of more.
See the :any:`test_dict_key_completion_contexts`
"""
ip = get_ipython()
ip.ex("a=list(range(5))")
_, c = ip.complete(".", line="a[0].")
nt.assert_false(".real" in c, "Shouldn't have completed on a[0]: %s" % c)
def _(line, cursor_pos, expect, message, completion):
with greedy_completion(), provisionalcompleter():
ip.Completer.use_jedi = False
_, c = ip.complete(".", line=line, cursor_pos=cursor_pos)
nt.assert_in(expect, c, message % c)
ip.Completer.use_jedi = True
with provisionalcompleter():
completions = ip.Completer.completions(line, cursor_pos)
nt.assert_in(completion, completions)
with provisionalcompleter():
yield _, "a[0].", 5, "a[0].real", "Should have completed on a[0].: %s", Completion(
5, 5, "real"
)
yield _, "a[0].r", 6, "a[0].real", "Should have completed on a[0].r: %s", Completion(
5, 6, "real"
)
yield _, "a[0].from_", 10, "a[0].from_bytes", "Should have completed on a[0].from_: %s", Completion(
5, 10, "from_bytes"
)
def test_omit__names(self):
# also happens to test IPCompleter as a configurable
ip = get_ipython()
ip._hidden_attr = 1
ip._x = {}
c = ip.Completer
ip.ex("ip=get_ipython()")
cfg = Config()
cfg.IPCompleter.omit__names = 0
c.update_config(cfg)
with provisionalcompleter():
c.use_jedi = False
s, matches = c.complete("ip.")
nt.assert_in("ip.__str__", matches)
nt.assert_in("ip._hidden_attr", matches)
# c.use_jedi = True
# completions = set(c.completions('ip.', 3))
# nt.assert_in(Completion(3, 3, '__str__'), completions)
# nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
cfg = Config()
cfg.IPCompleter.omit__names = 1
c.update_config(cfg)
with provisionalcompleter():
c.use_jedi = False
s, matches = c.complete("ip.")
nt.assert_not_in("ip.__str__", matches)
# nt.assert_in('ip._hidden_attr', matches)
# c.use_jedi = True
# completions = set(c.completions('ip.', 3))
# nt.assert_not_in(Completion(3,3,'__str__'), completions)
# nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
cfg = Config()
cfg.IPCompleter.omit__names = 2
c.update_config(cfg)
with provisionalcompleter():
c.use_jedi = False
s, matches = c.complete("ip.")
nt.assert_not_in("ip.__str__", matches)
nt.assert_not_in("ip._hidden_attr", matches)
# c.use_jedi = True
# completions = set(c.completions('ip.', 3))
# nt.assert_not_in(Completion(3,3,'__str__'), completions)
# nt.assert_not_in(Completion(3,3, "_hidden_attr"), completions)
with provisionalcompleter():
c.use_jedi = False
s, matches = c.complete("ip._x.")
nt.assert_in("ip._x.keys", matches)
# c.use_jedi = True
# completions = set(c.completions('ip._x.', 6))
# nt.assert_in(Completion(6,6, "keys"), completions)
del ip._hidden_attr
del ip._x
def test_limit_to__all__False_ok(self):
"""
Limit to all is deprecated, once we remove it this test can go away.
"""
ip = get_ipython()
c = ip.Completer
c.use_jedi = False
ip.ex("class D: x=24")
ip.ex("d=D()")
cfg = Config()
cfg.IPCompleter.limit_to__all__ = False
c.update_config(cfg)
s, matches = c.complete("d.")
nt.assert_in("d.x", matches)
def test_get__all__entries_ok(self):
class A:
__all__ = ["x", 1]
words = completer.get__all__entries(A())
nt.assert_equal(words, ["x"])
def test_get__all__entries_no__all__ok(self):
class A:
pass
words = completer.get__all__entries(A())
nt.assert_equal(words, [])
def test_func_kw_completions(self):
ip = get_ipython()
c = ip.Completer
c.use_jedi = False
ip.ex("def myfunc(a=1,b=2): return a+b")
s, matches = c.complete(None, "myfunc(1,b")
nt.assert_in("b=", matches)
# Simulate completing with cursor right after b (pos==10):
s, matches = c.complete(None, "myfunc(1,b)", 10)
nt.assert_in("b=", matches)
s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
nt.assert_in("b=", matches)
# builtin function
s, matches = c.complete(None, "min(k, k")
nt.assert_in("key=", matches)
def test_default_arguments_from_docstring(self):
ip = get_ipython()
c = ip.Completer
kwd = c._default_arguments_from_docstring("min(iterable[, key=func]) -> value")
nt.assert_equal(kwd, ["key"])
# with cython type etc
kwd = c._default_arguments_from_docstring(
"Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
)
nt.assert_equal(kwd, ["ncall", "resume", "nsplit"])
# white spaces
kwd = c._default_arguments_from_docstring(
"\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
)
nt.assert_equal(kwd, ["ncall", "resume", "nsplit"])
def test_line_magics(self):
ip = get_ipython()
c = ip.Completer
s, matches = c.complete(None, "lsmag")
nt.assert_in("%lsmagic", matches)
s, matches = c.complete(None, "%lsmag")
nt.assert_in("%lsmagic", matches)
def test_cell_magics(self):
from IPython.core.magic import register_cell_magic
@register_cell_magic
def _foo_cellm(line, cell):
pass
ip = get_ipython()
c = ip.Completer
s, matches = c.complete(None, "_foo_ce")
nt.assert_in("%%_foo_cellm", matches)
s, matches = c.complete(None, "%%_foo_ce")
nt.assert_in("%%_foo_cellm", matches)
def test_line_cell_magics(self):
from IPython.core.magic import register_line_cell_magic
@register_line_cell_magic
def _bar_cellm(line, cell):
pass
ip = get_ipython()
c = ip.Completer
# The policy here is trickier, see comments in completion code. The
# returned values depend on whether the user passes %% or not explicitly,
# and this will show a difference if the same name is both a line and cell
# magic.
s, matches = c.complete(None, "_bar_ce")
nt.assert_in("%_bar_cellm", matches)
nt.assert_in("%%_bar_cellm", matches)
s, matches = c.complete(None, "%_bar_ce")
nt.assert_in("%_bar_cellm", matches)
nt.assert_in("%%_bar_cellm", matches)
s, matches = c.complete(None, "%%_bar_ce")
nt.assert_not_in("%_bar_cellm", matches)
nt.assert_in("%%_bar_cellm", matches)
def test_magic_completion_order(self):
ip = get_ipython()
c = ip.Completer
# Test ordering of line and cell magics.
text, matches = c.complete("timeit")
nt.assert_equal(matches, ["%timeit", "%%timeit"])
def test_magic_completion_shadowing(self):
ip = get_ipython()
c = ip.Completer
c.use_jedi = False
# Before importing matplotlib, %matplotlib magic should be the only option.
text, matches = c.complete("mat")
nt.assert_equal(matches, ["%matplotlib"])
# The newly introduced name should shadow the magic.
ip.run_cell("matplotlib = 1")
text, matches = c.complete("mat")
nt.assert_equal(matches, ["matplotlib"])
# After removing matplotlib from namespace, the magic should again be
# the only option.
del ip.user_ns["matplotlib"]
text, matches = c.complete("mat")
nt.assert_equal(matches, ["%matplotlib"])
def test_magic_completion_shadowing_explicit(self):
"""
If the user try to complete a shadowed magic, and explicit % start should
still return the completions.
"""
ip = get_ipython()
c = ip.Completer
# Before importing matplotlib, %matplotlib magic should be the only option.
text, matches = c.complete("%mat")
nt.assert_equal(matches, ["%matplotlib"])
ip.run_cell("matplotlib = 1")
# After removing matplotlib from namespace, the magic should still be
# the only option.
text, matches = c.complete("%mat")
nt.assert_equal(matches, ["%matplotlib"])
def test_magic_config(self):
ip = get_ipython()
c = ip.Completer
s, matches = c.complete(None, "conf")
nt.assert_in("%config", matches)
s, matches = c.complete(None, "conf")
nt.assert_not_in("AliasManager", matches)
s, matches = c.complete(None, "config ")
nt.assert_in("AliasManager", matches)
s, matches = c.complete(None, "%config ")
nt.assert_in("AliasManager", matches)
s, matches = c.complete(None, "config Ali")
nt.assert_list_equal(["AliasManager"], matches)
s, matches = c.complete(None, "%config Ali")
nt.assert_list_equal(["AliasManager"], matches)
s, matches = c.complete(None, "config AliasManager")
nt.assert_list_equal(["AliasManager"], matches)
s, matches = c.complete(None, "%config AliasManager")
nt.assert_list_equal(["AliasManager"], matches)
s, matches = c.complete(None, "config AliasManager.")
nt.assert_in("AliasManager.default_aliases", matches)
s, matches = c.complete(None, "%config AliasManager.")
nt.assert_in("AliasManager.default_aliases", matches)
s, matches = c.complete(None, "config AliasManager.de")
nt.assert_list_equal(["AliasManager.default_aliases"], matches)
s, matches = c.complete(None, "config AliasManager.de")
nt.assert_list_equal(["AliasManager.default_aliases"], matches)
def test_magic_color(self):
ip = get_ipython()
c = ip.Completer
s, matches = c.complete(None, "colo")
nt.assert_in("%colors", matches)
s, matches = c.complete(None, "colo")
nt.assert_not_in("NoColor", matches)
s, matches = c.complete(None, "%colors") # No trailing space
nt.assert_not_in("NoColor", matches)
s, matches = c.complete(None, "colors ")
nt.assert_in("NoColor", matches)
s, matches = c.complete(None, "%colors ")
nt.assert_in("NoColor", matches)
s, matches = c.complete(None, "colors NoCo")
nt.assert_list_equal(["NoColor"], matches)
s, matches = c.complete(None, "%colors NoCo")
nt.assert_list_equal(["NoColor"], matches)
def test_match_dict_keys(self):
"""
Test that match_dict_keys works on a couple of use case does return what
expected, and does not crash
"""
delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
keys = ["foo", b"far"]
assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2, ["far"])
assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2, ["far"])
assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2, ["far"])
assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2, ["far"])
assert match_dict_keys(keys, "'", delims=delims) == ("'", 1, ["foo"])
assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1, ["foo"])
assert match_dict_keys(keys, '"', delims=delims) == ('"', 1, ["foo"])
assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1, ["foo"])
match_dict_keys
def test_dict_key_completion_string(self):
"""Test dictionary key completion for string keys"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["d"] = {"abc": None}
# check completion at different stages
_, matches = complete(line_buffer="d[")
nt.assert_in("'abc'", matches)
nt.assert_not_in("'abc']", matches)
_, matches = complete(line_buffer="d['")
nt.assert_in("abc", matches)
nt.assert_not_in("abc']", matches)
_, matches = complete(line_buffer="d['a")
nt.assert_in("abc", matches)
nt.assert_not_in("abc']", matches)
# check use of different quoting
_, matches = complete(line_buffer='d["')
nt.assert_in("abc", matches)
nt.assert_not_in('abc"]', matches)
_, matches = complete(line_buffer='d["a')
nt.assert_in("abc", matches)
nt.assert_not_in('abc"]', matches)
# check sensitivity to following context
_, matches = complete(line_buffer="d[]", cursor_pos=2)
nt.assert_in("'abc'", matches)
_, matches = complete(line_buffer="d['']", cursor_pos=3)
nt.assert_in("abc", matches)
nt.assert_not_in("abc'", matches)
nt.assert_not_in("abc']", matches)
# check multiple solutions are correctly returned and that noise is not
ip.user_ns["d"] = {
"abc": None,
"abd": None,
"bad": None,
object(): None,
5: None,
}
_, matches = complete(line_buffer="d['a")
nt.assert_in("abc", matches)
nt.assert_in("abd", matches)
nt.assert_not_in("bad", matches)
assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
# check escaping and whitespace
ip.user_ns["d"] = {"a\nb": None, "a'b": None, 'a"b': None, "a word": None}
_, matches = complete(line_buffer="d['a")
nt.assert_in("a\\nb", matches)
nt.assert_in("a\\'b", matches)
nt.assert_in('a"b', matches)
nt.assert_in("a word", matches)
assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
# - can complete on non-initial word of the string
_, matches = complete(line_buffer="d['a w")
nt.assert_in("word", matches)
# - understands quote escaping
_, matches = complete(line_buffer="d['a\\'")
nt.assert_in("b", matches)
# - default quoting should work like repr
_, matches = complete(line_buffer="d[")
nt.assert_in('"a\'b"', matches)
# - when opening quote with ", possible to match with unescaped apostrophe
_, matches = complete(line_buffer="d[\"a'")
nt.assert_in("b", matches)
# need to not split at delims that readline won't split at
if "-" not in ip.Completer.splitter.delims:
ip.user_ns["d"] = {"before-after": None}
_, matches = complete(line_buffer="d['before-af")
nt.assert_in("before-after", matches)
def test_dict_key_completion_contexts(self):
"""Test expression contexts in which dict key completion occurs"""
ip = get_ipython()
complete = ip.Completer.complete
d = {"abc": None}
ip.user_ns["d"] = d
class C:
data = d
ip.user_ns["C"] = C
ip.user_ns["get"] = lambda: d
def assert_no_completion(**kwargs):
_, matches = complete(**kwargs)
nt.assert_not_in("abc", matches)
nt.assert_not_in("abc'", matches)
nt.assert_not_in("abc']", matches)
nt.assert_not_in("'abc'", matches)
nt.assert_not_in("'abc']", matches)
def assert_completion(**kwargs):
_, matches = complete(**kwargs)
nt.assert_in("'abc'", matches)
nt.assert_not_in("'abc']", matches)
# no completion after string closed, even if reopened
assert_no_completion(line_buffer="d['a'")
assert_no_completion(line_buffer='d["a"')
assert_no_completion(line_buffer="d['a' + ")
assert_no_completion(line_buffer="d['a' + '")
# completion in non-trivial expressions
assert_completion(line_buffer="+ d[")
assert_completion(line_buffer="(d[")
assert_completion(line_buffer="C.data[")
# greedy flag
def assert_completion(**kwargs):
_, matches = complete(**kwargs)
nt.assert_in("get()['abc']", matches)
assert_no_completion(line_buffer="get()[")
with greedy_completion():
assert_completion(line_buffer="get()[")
assert_completion(line_buffer="get()['")
assert_completion(line_buffer="get()['a")
assert_completion(line_buffer="get()['ab")
assert_completion(line_buffer="get()['abc")
def test_dict_key_completion_bytes(self):
"""Test handling of bytes in dict key completion"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["d"] = {"abc": None, b"abd": None}
_, matches = complete(line_buffer="d[")
nt.assert_in("'abc'", matches)
nt.assert_in("b'abd'", matches)
if False: # not currently implemented
_, matches = complete(line_buffer="d[b")
nt.assert_in("b'abd'", matches)
nt.assert_not_in("b'abc'", matches)
_, matches = complete(line_buffer="d[b'")
nt.assert_in("abd", matches)
nt.assert_not_in("abc", matches)
_, matches = complete(line_buffer="d[B'")
nt.assert_in("abd", matches)
nt.assert_not_in("abc", matches)
_, matches = complete(line_buffer="d['")
nt.assert_in("abc", matches)
nt.assert_not_in("abd", matches)
def test_dict_key_completion_unicode_py3(self):
"""Test handling of unicode in dict key completion"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["d"] = {"a\u05d0": None}
# query using escape
if sys.platform != "win32":
# Known failure on Windows
_, matches = complete(line_buffer="d['a\\u05d0")
nt.assert_in("u05d0", matches) # tokenized after \\
# query using character
_, matches = complete(line_buffer="d['a\u05d0")
nt.assert_in("a\u05d0", matches)
with greedy_completion():
# query using escape
_, matches = complete(line_buffer="d['a\\u05d0")
nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
# query using character
_, matches = complete(line_buffer="d['a\u05d0")
nt.assert_in("d['a\u05d0']", matches)
@dec.skip_without("numpy")
def test_struct_array_key_completion(self):
"""Test dict key completion applies to numpy struct arrays"""
import numpy
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["d"] = numpy.array([], dtype=[("hello", "f"), ("world", "f")])
_, matches = complete(line_buffer="d['")
nt.assert_in("hello", matches)
nt.assert_in("world", matches)
# complete on the numpy struct itself
dt = numpy.dtype(
[("my_head", [("my_dt", ">u4"), ("my_df", ">u4")]), ("my_data", ">f4", 5)]
)
x = numpy.zeros(2, dtype=dt)
ip.user_ns["d"] = x[1]
_, matches = complete(line_buffer="d['")
nt.assert_in("my_head", matches)
nt.assert_in("my_data", matches)
# complete on a nested level
with greedy_completion():
ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
_, matches = complete(line_buffer="d[1]['my_head']['")
nt.assert_true(any(["my_dt" in m for m in matches]))
nt.assert_true(any(["my_df" in m for m in matches]))
@dec.skip_without("pandas")
def test_dataframe_key_completion(self):
"""Test dict key completion applies to pandas DataFrames"""
import pandas
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["d"] = pandas.DataFrame({"hello": [1], "world": [2]})
_, matches = complete(line_buffer="d['")
nt.assert_in("hello", matches)
nt.assert_in("world", matches)
def test_dict_key_completion_invalids(self):
"""Smoke test cases dict key completion can't handle"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["no_getitem"] = None
ip.user_ns["no_keys"] = []
ip.user_ns["cant_call_keys"] = dict
ip.user_ns["empty"] = {}
ip.user_ns["d"] = {"abc": 5}
_, matches = complete(line_buffer="no_getitem['")
_, matches = complete(line_buffer="no_keys['")
_, matches = complete(line_buffer="cant_call_keys['")
_, matches = complete(line_buffer="empty['")
_, matches = complete(line_buffer="name_error['")
_, matches = complete(line_buffer="d['\\") # incomplete escape
def test_object_key_completion(self):
ip = get_ipython()
ip.user_ns["key_completable"] = KeyCompletable(["qwerty", "qwick"])
_, matches = ip.Completer.complete(line_buffer="key_completable['qw")
nt.assert_in("qwerty", matches)
nt.assert_in("qwick", matches)
def test_class_key_completion(self):
ip = get_ipython()
NamedInstanceClass("qwerty")
NamedInstanceClass("qwick")
ip.user_ns["named_instance_class"] = NamedInstanceClass
_, matches = ip.Completer.complete(line_buffer="named_instance_class['qw")
nt.assert_in("qwerty", matches)
nt.assert_in("qwick", matches)
def test_tryimport(self):
"""
Test that try-import don't crash on trailing dot, and import modules before
"""
from IPython.core.completerlib import try_import
assert try_import("IPython.")
def test_aimport_module_completer(self):
ip = get_ipython()
_, matches = ip.complete("i", "%aimport i")
nt.assert_in("io", matches)
nt.assert_not_in("int", matches)
def test_nested_import_module_completer(self):
ip = get_ipython()
_, matches = ip.complete(None, "import IPython.co", 17)
nt.assert_in("IPython.core", matches)
nt.assert_not_in("import IPython.core", matches)
nt.assert_not_in("IPython.display", matches)
def test_import_module_completer(self):
ip = get_ipython()
_, matches = ip.complete("i", "import i")
nt.assert_in("io", matches)
nt.assert_not_in("int", matches)
def test_from_module_completer(self):
ip = get_ipython()
_, matches = ip.complete("B", "from io import B", 16)
nt.assert_in("BytesIO", matches)
nt.assert_not_in("BaseException", matches)
def test_snake_case_completion(self):
ip = get_ipython()
ip.Completer.use_jedi = False
ip.user_ns["some_three"] = 3
ip.user_ns["some_four"] = 4
_, matches = ip.complete("s_", "print(s_f")
nt.assert_in("some_three", matches)
nt.assert_in("some_four", matches)
def test_mix_terms(self):
ip = get_ipython()
from textwrap import dedent
ip.Completer.use_jedi = False
ip.ex(
dedent(
"""
class Test:
def meth(self, meth_arg1):
print("meth")
def meth_1(self, meth1_arg1, meth1_arg2):
print("meth1")
def meth_2(self, meth2_arg1, meth2_arg2):
print("meth2")
test = Test()
"""
)
)
_, matches = ip.complete(None, "test.meth(")
nt.assert_in("meth_arg1=", matches)
nt.assert_not_in("meth2_arg1=", matches)
|
{
"content_hash": "0b3a29fbb6bd0c3ac69c8c08f25291d1",
"timestamp": "",
"source": "github",
"line_count": 1110,
"max_line_length": 112,
"avg_line_length": 36.1027027027027,
"alnum_prop": 0.5479113639766432,
"repo_name": "sserrot/champion_relationships",
"id": "2c19e2e01876ef8d976774681c8b73ab89266367",
"size": "40097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/IPython/core/tests/test_completer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
}
|
from django import template
from django.contrib.auth.models import User, AnonymousUser, Group
from django.core.urlresolvers import reverse
from django.test import TestCase
from follow import signals, utils
from .models import Follow
from .utils import register
register(User)
register(Group)
class FollowTest(TestCase):
urls = 'follow.urls'
def setUp(self):
self.lennon = User.objects.create(username='lennon')
self.lennon.set_password('test')
self.lennon.save()
self.hendrix = User.objects.create(username='hendrix')
self.musicians = Group.objects.create()
self.lennon.groups.add(self.musicians)
def test_follow(self):
follow = Follow.objects.create(self.lennon, self.hendrix)
_, result = Follow.objects.get_or_create(self.lennon, self.hendrix)
self.assertEqual(False, result)
result = Follow.objects.is_following(self.lennon, self.hendrix)
self.assertEqual(True, result)
result = Follow.objects.is_following(self.hendrix, self.lennon)
self.assertEqual(False, result)
result = Follow.objects.get_follows(User)
self.assertEqual(1, len(result))
self.assertEqual(self.lennon, result[0].user)
result = Follow.objects.get_follows(self.hendrix)
self.assertEqual(1, len(result))
self.assertEqual(self.lennon, result[0].user)
result = self.hendrix.get_follows()
self.assertEqual(1, len(result))
self.assertEqual(self.lennon, result[0].user)
result = self.lennon.get_follows()
self.assertEqual(0, len(result), result)
utils.toggle(self.lennon, self.hendrix)
self.assertEqual(0, len(self.hendrix.get_follows()))
utils.toggle(self.lennon, self.hendrix)
self.assertEqual(1, len(self.hendrix.get_follows()))
def test_get_follows_for_queryset(self):
utils.follow(self.hendrix, self.lennon)
utils.follow(self.lennon, self.hendrix)
result = Follow.objects.get_follows(User.objects.all())
self.assertEqual(2, result.count())
def test_follow_http(self):
self.client.login(username='lennon', password='test')
follow_url = reverse('follow', args=['auth', 'user', self.hendrix.id])
unfollow_url = reverse('follow', args=['auth', 'user', self.hendrix.id])
toggle_url = reverse('toggle', args=['auth', 'user', self.hendrix.id])
response = self.client.post(follow_url)
self.assertEqual(302, response.status_code)
response = self.client.post(follow_url)
self.assertEqual(302, response.status_code)
response = self.client.post(unfollow_url)
self.assertEqual(302, response.status_code)
response = self.client.post(toggle_url)
self.assertEqual(302, response.status_code)
def test_get_fail(self):
self.client.login(username='lennon', password='test')
follow_url = reverse('follow', args=['auth', 'user', self.hendrix.id])
unfollow_url = reverse('follow', args=['auth', 'user', self.hendrix.id])
response = self.client.get(follow_url)
self.assertEqual(400, response.status_code)
response = self.client.get(unfollow_url)
self.assertEqual(400, response.status_code)
def test_no_absolute_url(self):
self.client.login(username='lennon', password='test')
get_absolute_url = User.get_absolute_url
User.get_absolute_url = None
follow_url = utils.follow_link(self.hendrix)
response = self.client.post(follow_url)
self.assertEqual(500, response.status_code)
def test_template_tags(self):
follow_url = reverse('follow', args=['auth', 'user', self.hendrix.id])
unfollow_url = reverse('unfollow', args=['auth', 'user', self.hendrix.id])
request = type('Request', (object,), {'user': self.lennon})()
self.assertEqual(follow_url, utils.follow_link(self.hendrix))
self.assertEqual(unfollow_url, utils.unfollow_link(self.hendrix))
tpl = template.Template("""{% load follow_tags %}{% follow_url obj %}""")
ctx = template.Context({
'obj':self.hendrix,
'request': request
})
self.assertEqual(follow_url, tpl.render(ctx))
utils.follow(self.lennon, self.hendrix)
self.assertEqual(unfollow_url, tpl.render(ctx))
utils.unfollow(self.lennon, self.hendrix)
self.assertEqual(follow_url, tpl.render(ctx))
tpl = template.Template("""{% load follow_tags %}{% follow_url obj user %}""")
ctx2 = template.Context({
'obj': self.lennon,
'user': self.hendrix,
'request': request
})
self.assertEqual(utils.follow_url(self.hendrix, self.lennon), tpl.render(ctx2))
tpl = template.Template("""{% load follow_tags %}{% if request.user|is_following:obj %}True{% else %}False{% endif %}""")
self.assertEqual("False", tpl.render(ctx))
utils.follow(self.lennon, self.hendrix)
self.assertEqual("True", tpl.render(ctx))
tpl = template.Template("""{% load follow_tags %}{% follow_form obj %}""")
self.assertEqual(True, isinstance(tpl.render(ctx), unicode))
tpl = template.Template("""{% load follow_tags %}{% follow_form obj "follow/form.html" %}""")
self.assertEqual(True, isinstance(tpl.render(ctx), unicode))
def test_signals(self):
Handler = type('Handler', (object,), {
'inc': lambda self: setattr(self, 'i', getattr(self, 'i') + 1),
'i': 0
})
user_handler = Handler()
group_handler = Handler()
def follow_handler(sender, user, target, instance, **kwargs):
self.assertEqual(sender, User)
self.assertEqual(self.lennon, user)
self.assertEqual(self.hendrix, target)
self.assertEqual(True, isinstance(instance, Follow))
user_handler.inc()
def unfollow_handler(sender, user, target, instance, **kwargs):
self.assertEqual(sender, User)
self.assertEqual(self.lennon, user)
self.assertEqual(self.hendrix, target)
self.assertEqual(True, isinstance(instance, Follow))
user_handler.inc()
def group_follow_handler(sender, **kwargs):
self.assertEqual(sender, Group)
group_handler.inc()
def group_unfollow_handler(sender, **kwargs):
self.assertEqual(sender, Group)
group_handler.inc()
signals.followed.connect(follow_handler, sender=User, dispatch_uid='userfollow')
signals.unfollowed.connect(unfollow_handler, sender=User, dispatch_uid='userunfollow')
signals.followed.connect(group_follow_handler, sender=Group, dispatch_uid='groupfollow')
signals.unfollowed.connect(group_unfollow_handler, sender=Group, dispatch_uid='groupunfollow')
utils.follow(self.lennon, self.hendrix)
utils.unfollow(self.lennon, self.hendrix)
self.assertEqual(2, user_handler.i)
utils.follow(self.lennon, self.musicians)
utils.unfollow(self.lennon, self.musicians)
self.assertEqual(2, user_handler.i)
self.assertEqual(2, group_handler.i)
def test_anonymous_is_following(self):
self.assertEqual(False, Follow.objects.is_following(AnonymousUser(), self.lennon))
|
{
"content_hash": "2a777b33561743b36272891946b581e4",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 129,
"avg_line_length": 38.11219512195122,
"alnum_prop": 0.6070651478305389,
"repo_name": "affan2/django-follow",
"id": "514f66a67a41e8720f7b47fc125353c6875591dd",
"size": "7813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "follow/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6054"
},
{
"name": "Python",
"bytes": "387587"
}
],
"symlink_target": ""
}
|
"""Stupid tests that ensure logging works as expected"""
from __future__ import division, absolute_import, print_function
import sys
import threading
import logging as log
from six import StringIO
import unittest
import beets.logging as blog
from beets import plugins, ui
import beetsplug
from test import _common
from test._common import TestCase
from test import helper
import six
class LoggingTest(TestCase):
def test_logging_management(self):
l1 = log.getLogger("foo123")
l2 = blog.getLogger("foo123")
self.assertEqual(l1, l2)
self.assertEqual(l1.__class__, log.Logger)
l3 = blog.getLogger("bar123")
l4 = log.getLogger("bar123")
self.assertEqual(l3, l4)
self.assertEqual(l3.__class__, blog.BeetsLogger)
self.assertIsInstance(l3, (blog.StrFormatLogger,
blog.ThreadLocalLevelLogger))
l5 = l3.getChild("shalala")
self.assertEqual(l5.__class__, blog.BeetsLogger)
l6 = blog.getLogger()
self.assertNotEqual(l1, l6)
def test_str_format_logging(self):
l = blog.getLogger("baz123")
stream = StringIO()
handler = log.StreamHandler(stream)
l.addHandler(handler)
l.propagate = False
l.warning(u"foo {0} {bar}", "oof", bar=u"baz")
handler.flush()
self.assertTrue(stream.getvalue(), u"foo oof baz")
class LoggingLevelTest(unittest.TestCase, helper.TestHelper):
class DummyModule(object):
class DummyPlugin(plugins.BeetsPlugin):
def __init__(self):
plugins.BeetsPlugin.__init__(self, 'dummy')
self.import_stages = [self.import_stage]
self.register_listener('dummy_event', self.listener)
def log_all(self, name):
self._log.debug(u'debug ' + name)
self._log.info(u'info ' + name)
self._log.warning(u'warning ' + name)
def commands(self):
cmd = ui.Subcommand('dummy')
cmd.func = lambda _, __, ___: self.log_all('cmd')
return (cmd,)
def import_stage(self, session, task):
self.log_all('import_stage')
def listener(self):
self.log_all('listener')
def setUp(self):
sys.modules['beetsplug.dummy'] = self.DummyModule
beetsplug.dummy = self.DummyModule
self.setup_beets()
self.load_plugins('dummy')
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
del beetsplug.dummy
sys.modules.pop('beetsplug.dummy')
self.DummyModule.DummyPlugin.listeners = None
self.DummyModule.DummyPlugin._raw_listeners = None
def test_command_level0(self):
self.config['verbose'] = 0
with helper.capture_log() as logs:
self.run_command('dummy')
self.assertIn(u'dummy: warning cmd', logs)
self.assertIn(u'dummy: info cmd', logs)
self.assertNotIn(u'dummy: debug cmd', logs)
def test_command_level1(self):
self.config['verbose'] = 1
with helper.capture_log() as logs:
self.run_command('dummy')
self.assertIn(u'dummy: warning cmd', logs)
self.assertIn(u'dummy: info cmd', logs)
self.assertIn(u'dummy: debug cmd', logs)
def test_command_level2(self):
self.config['verbose'] = 2
with helper.capture_log() as logs:
self.run_command('dummy')
self.assertIn(u'dummy: warning cmd', logs)
self.assertIn(u'dummy: info cmd', logs)
self.assertIn(u'dummy: debug cmd', logs)
def test_listener_level0(self):
self.config['verbose'] = 0
with helper.capture_log() as logs:
plugins.send('dummy_event')
self.assertIn(u'dummy: warning listener', logs)
self.assertNotIn(u'dummy: info listener', logs)
self.assertNotIn(u'dummy: debug listener', logs)
def test_listener_level1(self):
self.config['verbose'] = 1
with helper.capture_log() as logs:
plugins.send('dummy_event')
self.assertIn(u'dummy: warning listener', logs)
self.assertIn(u'dummy: info listener', logs)
self.assertNotIn(u'dummy: debug listener', logs)
def test_listener_level2(self):
self.config['verbose'] = 2
with helper.capture_log() as logs:
plugins.send('dummy_event')
self.assertIn(u'dummy: warning listener', logs)
self.assertIn(u'dummy: info listener', logs)
self.assertIn(u'dummy: debug listener', logs)
def test_import_stage_level0(self):
self.config['verbose'] = 0
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertIn(u'dummy: warning import_stage', logs)
self.assertNotIn(u'dummy: info import_stage', logs)
self.assertNotIn(u'dummy: debug import_stage', logs)
def test_import_stage_level1(self):
self.config['verbose'] = 1
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertIn(u'dummy: warning import_stage', logs)
self.assertIn(u'dummy: info import_stage', logs)
self.assertNotIn(u'dummy: debug import_stage', logs)
def test_import_stage_level2(self):
self.config['verbose'] = 2
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertIn(u'dummy: warning import_stage', logs)
self.assertIn(u'dummy: info import_stage', logs)
self.assertIn(u'dummy: debug import_stage', logs)
@_common.slow_test()
class ConcurrentEventsTest(TestCase, helper.TestHelper):
"""Similar to LoggingLevelTest but lower-level and focused on multiple
events interaction. Since this is a bit heavy we don't do it in
LoggingLevelTest.
"""
class DummyPlugin(plugins.BeetsPlugin):
def __init__(self, test_case):
plugins.BeetsPlugin.__init__(self, 'dummy')
self.register_listener('dummy_event1', self.listener1)
self.register_listener('dummy_event2', self.listener2)
self.lock1 = threading.Lock()
self.lock2 = threading.Lock()
self.test_case = test_case
self.exc_info = None
self.t1_step = self.t2_step = 0
def log_all(self, name):
self._log.debug(u'debug ' + name)
self._log.info(u'info ' + name)
self._log.warning(u'warning ' + name)
def listener1(self):
try:
self.test_case.assertEqual(self._log.level, log.INFO)
self.t1_step = 1
self.lock1.acquire()
self.test_case.assertEqual(self._log.level, log.INFO)
self.t1_step = 2
except Exception:
import sys
self.exc_info = sys.exc_info()
def listener2(self):
try:
self.test_case.assertEqual(self._log.level, log.DEBUG)
self.t2_step = 1
self.lock2.acquire()
self.test_case.assertEqual(self._log.level, log.DEBUG)
self.t2_step = 2
except Exception:
import sys
self.exc_info = sys.exc_info()
def setUp(self):
self.setup_beets(disk=True)
def tearDown(self):
self.teardown_beets()
def test_concurrent_events(self):
dp = self.DummyPlugin(self)
def check_dp_exc():
if dp.exc_info:
six.reraise(dp.exc_info[1], None, dp.exc_info[2])
try:
dp.lock1.acquire()
dp.lock2.acquire()
self.assertEqual(dp._log.level, log.NOTSET)
self.config['verbose'] = 1
t1 = threading.Thread(target=dp.listeners['dummy_event1'][0])
t1.start() # blocked. t1 tested its log level
while dp.t1_step != 1:
check_dp_exc()
self.assertTrue(t1.is_alive())
self.assertEqual(dp._log.level, log.NOTSET)
self.config['verbose'] = 2
t2 = threading.Thread(target=dp.listeners['dummy_event2'][0])
t2.start() # blocked. t2 tested its log level
while dp.t2_step != 1:
check_dp_exc()
self.assertTrue(t2.is_alive())
self.assertEqual(dp._log.level, log.NOTSET)
dp.lock1.release() # dummy_event1 tests its log level + finishes
while dp.t1_step != 2:
check_dp_exc()
t1.join(.1)
self.assertFalse(t1.is_alive())
self.assertTrue(t2.is_alive())
self.assertEqual(dp._log.level, log.NOTSET)
dp.lock2.release() # dummy_event2 tests its log level + finishes
while dp.t2_step != 2:
check_dp_exc()
t2.join(.1)
self.assertFalse(t2.is_alive())
except Exception:
print(u"Alive threads:", threading.enumerate())
if dp.lock1.locked():
print(u"Releasing lock1 after exception in test")
dp.lock1.release()
if dp.lock2.locked():
print(u"Releasing lock2 after exception in test")
dp.lock2.release()
print(u"Alive threads:", threading.enumerate())
raise
def test_root_logger_levels(self):
"""Root logger level should be shared between threads.
"""
self.config['threaded'] = True
blog.getLogger('beets').set_global_level(blog.WARNING)
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertEqual(logs, [])
blog.getLogger('beets').set_global_level(blog.INFO)
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
for l in logs:
self.assertIn(u"import", l)
self.assertIn(u"album", l)
blog.getLogger('beets').set_global_level(blog.DEBUG)
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertIn(u"Sending event: database_change", logs)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
{
"content_hash": "0b341747822325e09e041646b10e1e92",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 77,
"avg_line_length": 35.126666666666665,
"alnum_prop": 0.5776238375403302,
"repo_name": "shamangeorge/beets",
"id": "826b2447b51f31b752beb2142ed1838510c377c2",
"size": "10563",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "test/test_logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3539"
},
{
"name": "HTML",
"bytes": "7094"
},
{
"name": "JavaScript",
"bytes": "86092"
},
{
"name": "Python",
"bytes": "2027754"
},
{
"name": "Shell",
"bytes": "7448"
}
],
"symlink_target": ""
}
|
"""Document formatting specifics for parsing and working with documents."""
import logging
from grow.common import utils
from grow.documents import document_front_matter as doc_front_matter
# Set markdown logging level to info.
logging.getLogger('MARKDOWN').setLevel(logging.INFO)
BOUNDARY_SEPARATOR = '---'
class Error(Exception):
pass
class BadFormatError(Error, ValueError):
pass
class BadLocalesError(BadFormatError):
pass
class DocumentFormat(object):
"""
Document formatting specifics for parsing and working with documents.
Defines how to handle documents formatted in various syntax formats.
"""
def __init__(self, doc):
self._doc = doc
self._content = None
self._raw_content = None
@staticmethod
def from_doc(*args, **kwargs):
doc = kwargs.get('doc', None)
if not doc:
raise BadFormatError(
'Missing `doc` keyword argument for creating format')
if doc.ext in ('.html', '.htm'):
return HtmlDocumentFormat(*args, **kwargs)
if doc.ext in ('.markdown', '.mdown', '.mkdn', '.mkd', '.md'):
return MarkdownDocumentFormat(*args, **kwargs)
if doc.ext in ('.yaml', '.yml'):
return YamlDocumentFormat(*args, **kwargs)
return TextDocumentFormat(*args, **kwargs)
@staticmethod
def format_doc(front_matter, content):
if front_matter and content:
return '{0}\n{1}\n{0}\n{2}\n'.format(
BOUNDARY_SEPARATOR, front_matter.strip(), content.strip())
elif front_matter:
return '{}\n'.format(front_matter.strip())
return '{}\n'.format(content.strip())
def _parse_content(self):
"""Parse the content from the raw content."""
_, parsed_content = doc_front_matter.DocumentFrontMatter\
.split_front_matter(self.raw_content)
return parsed_content
def _parse_front_matter(self):
"""Parse the front matter from the raw content."""
return doc_front_matter.DocumentFrontMatter(
self._doc)
@property
def content(self):
"""Lazy load the content after checking the content cache."""
if self._content:
return self._content
self._content = self._parse_content()
return self._content
@utils.cached_property
def front_matter(self):
cached_front_matter = self._doc.pod.podcache.document_cache\
.get_property(self._doc, 'front_matter')
if cached_front_matter:
return doc_front_matter.DocumentFrontMatter(
self._doc, raw_front_matter=cached_front_matter)
front_matter = self._parse_front_matter()
self._doc.pod.podcache.document_cache.add_property(
self._doc, 'front_matter', front_matter.export())
return front_matter
@property
def raw_content(self):
if self._raw_content:
return self._raw_content
if self._doc.exists:
self._raw_content = self._doc.pod.read_file(self._doc.pod_path)
return self._raw_content
@utils.cached_property
def formatted(self):
return self.content
def to_raw_content(self):
"""Formats the front matter and content into a raw_content string."""
raw_front_matter = self.front_matter.export()
return self.format_doc(raw_front_matter, self.content)
def update(self, fields=utils.SENTINEL, content=utils.SENTINEL):
"""Updates content and frontmatter."""
if fields is not utils.SENTINEL:
raw_front_matter = utils.dump_yaml(fields)
self.front_matter.update_raw_front_matter(raw_front_matter)
self._doc.pod.podcache.document_cache.add_property(
self._doc, 'front_matter', self.front_matter.export())
if content is not utils.SENTINEL:
self._content = content
self._raw_content = self.to_raw_content()
class HtmlDocumentFormat(DocumentFormat):
@utils.cached_property
def formatted(self):
val = self.content
return val.decode('utf-8') if val is not None else None
class MarkdownDocumentFormat(DocumentFormat):
@utils.cached_property
def markdown(self):
"""Instance of pod flavored markdown."""
return self._doc.pod.markdown
@property
def toc(self):
"""Markdown TOC extension."""
# Make sure that the document conversion has happened.
_ = self.formatted
# pylint: disable=no-member
return self.markdown.toc
@utils.cached_property
def formatted(self):
"""Markdown formatted content."""
return self.markdown.convert(
self.content.decode('utf-8')) if self.content else None
class TextDocumentFormat(DocumentFormat):
pass
class YamlDocumentFormat(DocumentFormat):
def _parse_content(self):
return None
def _parse_front_matter(self):
return doc_front_matter.DocumentFrontMatter(
self._doc, raw_front_matter=self.raw_content)
|
{
"content_hash": "4751fe8c9c046f56cc3084d59868efbb",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 77,
"avg_line_length": 30.63855421686747,
"alnum_prop": 0.6305544632324027,
"repo_name": "grow/pygrow",
"id": "5b5ddc153ee4c637b0ae11f11be185639119b0e8",
"size": "5086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grow/documents/document_format.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "527"
},
{
"name": "HTML",
"bytes": "8714"
},
{
"name": "Python",
"bytes": "309004"
},
{
"name": "Shell",
"bytes": "4219"
}
],
"symlink_target": ""
}
|
from flask import render_template
# from flask import render_template, redirect, url_for, abort
# from flask_login import current_user
from . import main
@main.route('/')
def index():
# return '<h1>Hello World!</h1>'
return render_template('main/index.html')
@main.route('/download')
def download():
return render_template('main/pricing.html')
@main.route('/product')
def product():
return render_template('main/pricing.html')
@main.route('/purchase')
def purchase():
return render_template('main/pricing.html')
@main.route('/about-us')
def aboutus():
return render_template('main/about-us.html')
@main.route('/contact-us')
def contactus():
return render_template('main/contact-us.html')
# @main.route('/user/<username>')
# def user(username):
# user = User.query.filter_by(username=username).first_or_404()
# page = request.args.get('page', 1, type=int)
# pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
# page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
# error_out=False)
# posts = pagination.items
# return render_template('user.html', user=user, posts=posts,
# pagination=pagination)
# @main.route('/edit-profile', methods = ['GET', 'POST'])
# @login_required
# def edit_profile():
# form = EditProfileForm()
# if form.validate_on_submit():
# current_user.name = form.name.data
# current_user.location = form.location.data
# current_user.about_me = form.about_me.data
# db.session.add(current_user)
# flash('Your profile has been updated.')
# return redirect(url_for('.user', username=current_user.username))
# form.name.data = current_user.name
# form.location.data = current_user.location
# form.about_me.data = current_user.about_me
# return render_template('edit_profile.html', form = form)
# @main.route('/edit-profile/<int:id>', methods = ['GET', 'POST'])
# @login_required
# @admin_required
# def edit_profile_admin(id):
# user = User.query.get_or_404(id)
# form = EditProfileAdminForm(user=user)
# if form.validate_on_submit():
# user.email = form.email.data
# user.username = form.username.data
# user.confirmed = form.confirmed.data
# user.role = Role.query.get(form.role.data)
# user.name = form.name.data
# user.location = form.location.data
# user.about_me = form.about_me.data
# db.session.add(user)
# flash('The profile has been updated.')
# return redirect(url_for('.user', username=user.username))
# form.email.data = user.email
# form.username.data = user.username
# form.confirmed.data = user.confirmed
# form.role.data = user.role_id
# form.name.data = user.name
# form.location.data = user.location
# form.about_me.data = user.about_me
# return render_template('edit_profile.html', form=form, user=user)
|
{
"content_hash": "4adfd10a6a0296dfe28078a7592598aa",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 75,
"avg_line_length": 34.02298850574713,
"alnum_prop": 0.6483108108108108,
"repo_name": "cmacro/mogufsite",
"id": "05744f49e3de8e92545c5887b797755c37b73f18",
"size": "2960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "2638"
},
{
"name": "ApacheConf",
"bytes": "741"
},
{
"name": "C",
"bytes": "31019"
},
{
"name": "CSS",
"bytes": "2142070"
},
{
"name": "HTML",
"bytes": "238553"
},
{
"name": "Java",
"bytes": "10645"
},
{
"name": "JavaScript",
"bytes": "2052855"
},
{
"name": "Lasso",
"bytes": "2280"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "PHP",
"bytes": "173775"
},
{
"name": "Perl",
"bytes": "2800"
},
{
"name": "Python",
"bytes": "90699"
},
{
"name": "Ruby",
"bytes": "1276"
},
{
"name": "Shell",
"bytes": "1092"
}
],
"symlink_target": ""
}
|
"""Test Home Assistant remote methods and classes."""
# pylint: disable=protected-access
import asyncio
import threading
import unittest
from unittest.mock import patch
import homeassistant.core as ha
import homeassistant.bootstrap as bootstrap
import homeassistant.remote as remote
import homeassistant.components.http as http
from homeassistant.const import HTTP_HEADER_HA_AUTH, EVENT_STATE_CHANGED
import homeassistant.util.dt as dt_util
from tests.common import (
get_test_instance_port, get_test_home_assistant, get_test_config_dir)
API_PASSWORD = 'test1234'
MASTER_PORT = get_test_instance_port()
SLAVE_PORT = get_test_instance_port()
BROKEN_PORT = get_test_instance_port()
HTTP_BASE_URL = 'http://127.0.0.1:{}'.format(MASTER_PORT)
HA_HEADERS = {HTTP_HEADER_HA_AUTH: API_PASSWORD}
broken_api = remote.API('127.0.0.1', "bladiebla")
hass, slave, master_api = None, None, None
def _url(path=''):
"""Helper method to generate URLs."""
return HTTP_BASE_URL + path
# pylint: disable=invalid-name
def setUpModule():
"""Initalization of a Home Assistant server and Slave instance."""
global hass, slave, master_api
hass = get_test_home_assistant()
hass.bus.listen('test_event', lambda _: _)
hass.states.set('test.test', 'a_state')
bootstrap.setup_component(
hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_API_PASSWORD: API_PASSWORD,
http.CONF_SERVER_PORT: MASTER_PORT}})
bootstrap.setup_component(hass, 'api')
hass.start()
master_api = remote.API('127.0.0.1', API_PASSWORD, MASTER_PORT)
# Start slave
loop = asyncio.new_event_loop()
# FIXME: should not be a daemon
threading.Thread(name='SlaveThread', daemon=True,
target=loop.run_forever).start()
slave = remote.HomeAssistant(master_api, loop=loop)
slave.config.config_dir = get_test_config_dir()
slave.config.skip_pip = True
bootstrap.setup_component(
slave, http.DOMAIN,
{http.DOMAIN: {http.CONF_API_PASSWORD: API_PASSWORD,
http.CONF_SERVER_PORT: SLAVE_PORT}})
with patch.object(ha, '_async_create_timer', return_value=None):
slave.start()
# pylint: disable=invalid-name
def tearDownModule():
"""Stop the Home Assistant server and slave."""
slave.stop()
hass.stop()
class TestRemoteMethods(unittest.TestCase):
"""Test the homeassistant.remote module."""
def tearDown(self):
"""Stop everything that was started."""
slave.block_till_done()
hass.block_till_done()
def test_validate_api(self):
"""Test Python API validate_api."""
self.assertEqual(remote.APIStatus.OK, remote.validate_api(master_api))
self.assertEqual(
remote.APIStatus.INVALID_PASSWORD,
remote.validate_api(
remote.API('127.0.0.1', API_PASSWORD + 'A', MASTER_PORT)))
self.assertEqual(
remote.APIStatus.CANNOT_CONNECT, remote.validate_api(broken_api))
def test_get_event_listeners(self):
"""Test Python API get_event_listeners."""
local_data = hass.bus.listeners
remote_data = remote.get_event_listeners(master_api)
for event in remote_data:
self.assertEqual(local_data.pop(event["event"]),
event["listener_count"])
self.assertEqual(len(local_data), 0)
self.assertEqual({}, remote.get_event_listeners(broken_api))
def test_fire_event(self):
"""Test Python API fire_event."""
test_value = []
@ha.callback
def listener(event):
"""Helper method that will verify our event got called."""
test_value.append(1)
hass.bus.listen("test.event_no_data", listener)
remote.fire_event(master_api, "test.event_no_data")
hass.block_till_done()
self.assertEqual(1, len(test_value))
# Should not trigger any exception
remote.fire_event(broken_api, "test.event_no_data")
def test_get_state(self):
"""Test Python API get_state."""
self.assertEqual(
hass.states.get('test.test'),
remote.get_state(master_api, 'test.test'))
self.assertEqual(None, remote.get_state(broken_api, 'test.test'))
def test_get_states(self):
"""Test Python API get_state_entity_ids."""
self.assertEqual(hass.states.all(), remote.get_states(master_api))
self.assertEqual([], remote.get_states(broken_api))
def test_remove_state(self):
"""Test Python API set_state."""
hass.states.set('test.remove_state', 'set_test')
self.assertIn('test.remove_state', hass.states.entity_ids())
remote.remove_state(master_api, 'test.remove_state')
self.assertNotIn('test.remove_state', hass.states.entity_ids())
def test_set_state(self):
"""Test Python API set_state."""
remote.set_state(master_api, 'test.test', 'set_test')
state = hass.states.get('test.test')
self.assertIsNotNone(state)
self.assertEqual('set_test', state.state)
self.assertFalse(remote.set_state(broken_api, 'test.test', 'set_test'))
def test_set_state_with_push(self):
"""Test Python API set_state with push option."""
events = []
hass.bus.listen(EVENT_STATE_CHANGED, lambda ev: events.append(ev))
remote.set_state(master_api, 'test.test', 'set_test_2')
remote.set_state(master_api, 'test.test', 'set_test_2')
hass.block_till_done()
self.assertEqual(1, len(events))
remote.set_state(
master_api, 'test.test', 'set_test_2', force_update=True)
hass.block_till_done()
self.assertEqual(2, len(events))
def test_is_state(self):
"""Test Python API is_state."""
self.assertTrue(
remote.is_state(master_api, 'test.test',
hass.states.get('test.test').state))
self.assertFalse(
remote.is_state(broken_api, 'test.test',
hass.states.get('test.test').state))
def test_get_services(self):
"""Test Python API get_services."""
local_services = hass.services.services
for serv_domain in remote.get_services(master_api):
local = local_services.pop(serv_domain["domain"])
self.assertEqual(local, serv_domain["services"])
self.assertEqual({}, remote.get_services(broken_api))
def test_call_service(self):
"""Test Python API services.call."""
test_value = []
@ha.callback
def listener(service_call):
"""Helper method that will verify that our service got called."""
test_value.append(1)
hass.services.register("test_domain", "test_service", listener)
remote.call_service(master_api, "test_domain", "test_service")
hass.block_till_done()
self.assertEqual(1, len(test_value))
# Should not raise an exception
remote.call_service(broken_api, "test_domain", "test_service")
def test_json_encoder(self):
"""Test the JSON Encoder."""
ha_json_enc = remote.JSONEncoder()
state = hass.states.get('test.test')
self.assertEqual(state.as_dict(), ha_json_enc.default(state))
# Default method raises TypeError if non HA object
self.assertRaises(TypeError, ha_json_enc.default, 1)
now = dt_util.utcnow()
self.assertEqual(now.isoformat(), ha_json_enc.default(now))
class TestRemoteClasses(unittest.TestCase):
"""Test the homeassistant.remote module."""
def tearDown(self):
"""Stop everything that was started."""
slave.block_till_done()
hass.block_till_done()
def test_home_assistant_init(self):
"""Test HomeAssistant init."""
# Wrong password
self.assertRaises(
ha.HomeAssistantError, remote.HomeAssistant,
remote.API('127.0.0.1', API_PASSWORD + 'A', 8124))
# Wrong port
self.assertRaises(
ha.HomeAssistantError, remote.HomeAssistant,
remote.API('127.0.0.1', API_PASSWORD, BROKEN_PORT))
def test_statemachine_init(self):
"""Test if remote.StateMachine copies all states on init."""
self.assertEqual(sorted(hass.states.all()),
sorted(slave.states.all()))
def test_statemachine_set(self):
"""Test if setting the state on a slave is recorded."""
slave.states.set("remote.test", "remote.statemachine test")
# Wait till slave tells master
slave.block_till_done()
# Wait till master gives updated state
hass.block_till_done()
self.assertEqual("remote.statemachine test",
slave.states.get("remote.test").state)
def test_statemachine_remove_from_master(self):
"""Remove statemachine from master."""
hass.states.set("remote.master_remove", "remove me!")
hass.block_till_done()
slave.block_till_done()
self.assertIn('remote.master_remove', slave.states.entity_ids())
hass.states.remove("remote.master_remove")
hass.block_till_done()
slave.block_till_done()
self.assertNotIn('remote.master_remove', slave.states.entity_ids())
def test_statemachine_remove_from_slave(self):
"""Remove statemachine from slave."""
hass.states.set("remote.slave_remove", "remove me!")
hass.block_till_done()
self.assertIn('remote.slave_remove', slave.states.entity_ids())
self.assertTrue(slave.states.remove("remote.slave_remove"))
slave.block_till_done()
hass.block_till_done()
self.assertNotIn('remote.slave_remove', slave.states.entity_ids())
def test_eventbus_fire(self):
"""Test if events fired from the eventbus get fired."""
hass_call = []
slave_call = []
hass.bus.listen("test.event_no_data", lambda _: hass_call.append(1))
slave.bus.listen("test.event_no_data", lambda _: slave_call.append(1))
slave.bus.fire("test.event_no_data")
# Wait till slave tells master
slave.block_till_done()
# Wait till master gives updated event
hass.block_till_done()
self.assertEqual(1, len(hass_call))
self.assertEqual(1, len(slave_call))
def test_get_config(self):
"""Test the return of the configuration."""
self.assertEqual(hass.config.as_dict(), remote.get_config(master_api))
|
{
"content_hash": "9a56195e89f577594469ac4d4f1951df",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 79,
"avg_line_length": 33.321766561514195,
"alnum_prop": 0.6253905140585061,
"repo_name": "philipbl/home-assistant",
"id": "55d8ca18b5fc800bbea0a052d5574cd2ac9886e9",
"size": "10563",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/test_remote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1446622"
},
{
"name": "Python",
"bytes": "3985732"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
}
|
import django
from django.conf import settings
from django.core.management import call_command, get_commands, load_command_class
from django.core.management.base import BaseCommand, CommandError
from django.db import connection
try:
from django.utils.six.moves import input
except ImportError:
input = raw_input
from tenant_schemas.utils import get_tenant_model, get_public_schema_name
class BaseTenantCommand(BaseCommand):
"""
Generic command class useful for iterating any existing command
over all schemata. The actual command name is expected in the
class variable COMMAND_NAME of the subclass.
"""
def __new__(cls, *args, **kwargs):
"""
Sets option_list and help dynamically.
"""
obj = super(BaseTenantCommand, cls).__new__(cls, *args, **kwargs)
app_name = get_commands()[obj.COMMAND_NAME]
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
obj._original_command = app_name
else:
obj._original_command = load_command_class(app_name, obj.COMMAND_NAME)
# prepend the command's original help with the info about schemata
# iteration
obj.help = (
"Calls {cmd} for all registered schemata. You can use regular "
"{cmd} options.\n\nOriginal help for {cmd}:\n\n{help}".format(
cmd=obj.COMMAND_NAME,
help=getattr(obj._original_command, 'help', 'none'),
)
)
return obj
def add_arguments(self, parser):
super(BaseTenantCommand, self).add_arguments(parser)
parser.add_argument("-s", "--schema", dest="schema_name")
parser.add_argument("-p", "--skip-public", dest="skip_public",
action="store_true", default=False)
# use the privately held reference to the underlying command to invoke
# the add_arguments path on this parser instance
self._original_command.add_arguments(parser)
def execute_command(self, tenant, command_name, *args, **options):
verbosity = int(options.get('verbosity'))
if verbosity >= 1:
print()
print(self.style.NOTICE("=== Switching to schema '")
+ self.style.SQL_TABLE(tenant.schema_name)
+ self.style.NOTICE("' then calling %s:" % command_name))
connection.set_tenant(tenant)
# call the original command with the args it knows
call_command(command_name, *args, **options)
def handle(self, *args, **options):
"""
Iterates a command over all registered schemata.
"""
if options['schema_name']:
# only run on a particular schema
connection.set_schema_to_public()
self.execute_command(get_tenant_model().objects.get(schema_name=options['schema_name']), self.COMMAND_NAME,
*args, **options)
else:
for tenant in get_tenant_model().objects.all():
if not (options['skip_public'] and tenant.schema_name == get_public_schema_name()):
self.execute_command(tenant, self.COMMAND_NAME, *args, **options)
class InteractiveTenantOption(object):
def add_arguments(self, parser):
parser.add_argument("-s", "--schema", dest="schema_name", help="specify tenant schema")
def get_tenant_from_options_or_interactive(self, **options):
TenantModel = get_tenant_model()
all_tenants = TenantModel.objects.all()
if not all_tenants:
raise CommandError("""There are no tenants in the system.
To learn how create a tenant, see:
https://django-tenant-schemas.readthedocs.io/en/latest/use.html#creating-a-tenant""")
if options.get('schema_name'):
tenant_schema = options['schema_name']
else:
while True:
tenant_schema = input("Enter Tenant Schema ('?' to list schemas): ")
if tenant_schema == '?':
print('\n'.join(["%s - %s" % (t.schema_name, t.domain_url,) for t in all_tenants]))
else:
break
if tenant_schema not in [t.schema_name for t in all_tenants]:
raise CommandError("Invalid tenant schema, '%s'" % (tenant_schema,))
return TenantModel.objects.get(schema_name=tenant_schema)
class TenantWrappedCommand(InteractiveTenantOption, BaseCommand):
"""
Generic command class useful for running any existing command
on a particular tenant. The actual command name is expected in the
class variable COMMAND_NAME of the subclass.
"""
def __new__(cls, *args, **kwargs):
obj = super(TenantWrappedCommand, cls).__new__(cls, *args, **kwargs)
obj.command_instance = obj.COMMAND()
if django.VERSION <= (1,10,0):
obj.option_list = obj.command_instance.option_list
return obj
def add_arguments(self, parser):
super(TenantWrappedCommand, self).add_arguments(parser)
self.command_instance.add_arguments(parser)
def handle(self, *args, **options):
tenant = self.get_tenant_from_options_or_interactive(**options)
connection.set_tenant(tenant)
self.command_instance.execute(*args, **options)
class SyncCommon(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--tenant', action='store_true', dest='tenant', default=False,
help='Tells Django to populate only tenant applications.')
parser.add_argument('--shared', action='store_true', dest='shared', default=False,
help='Tells Django to populate only shared applications.')
parser.add_argument('--app_label', action='store', dest='app_label', nargs='?',
help='App label of an application to synchronize the state.')
parser.add_argument('--migration_name', action='store', dest='migration_name', nargs='?',
help=('Database state will be brought to the state after that '
'migration. Use the name "zero" to unapply all migrations.'))
parser.add_argument("-s", "--schema", dest="schema_name")
parser.add_argument('--executor', action='store', dest='executor', default=None,
help='Executor for running migrations [standard (default)|parallel]')
def handle(self, *args, **options):
self.sync_tenant = options.get('tenant')
self.sync_public = options.get('shared')
self.schema_name = options.get('schema_name')
self.executor = options.get('executor')
self.installed_apps = settings.INSTALLED_APPS
self.args = args
self.options = options
if self.schema_name:
if self.sync_public:
raise CommandError("schema should only be used with the --tenant switch.")
elif self.schema_name == get_public_schema_name():
self.sync_public = True
else:
self.sync_tenant = True
elif not self.sync_public and not self.sync_tenant:
# no options set, sync both
self.sync_tenant = True
self.sync_public = True
if hasattr(settings, 'TENANT_APPS'):
self.tenant_apps = settings.TENANT_APPS
if hasattr(settings, 'SHARED_APPS'):
self.shared_apps = settings.SHARED_APPS
def _notice(self, output):
if int(self.options.get('verbosity', 1)) >= 1:
self.stdout.write(self.style.NOTICE(output))
|
{
"content_hash": "72b29739e9d1ecaf4017473a01196acf",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 119,
"avg_line_length": 41.5792349726776,
"alnum_prop": 0.6132211854382967,
"repo_name": "ArtProcessors/django-tenant-schemas",
"id": "c3c28edd8b50144e55d630e0ac207915a1ae776a",
"size": "7609",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tenant_schemas/management/commands/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "26"
},
{
"name": "Python",
"bytes": "111505"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
import pyqtgraph as pg
from PyQt4 import QtCore, QtGui, uic
import numpy as np
app = QtGui.QApplication(sys.argv)
ui_path = os.path.join(os.path.curdir, 'ui', 'QBGraph.ui')
if sys.platform == 'win32':
ui_path = os.path.join(os.path.curdir, 'ui', 'Windows', 'QBGraph.ui')
form_class, base_class = uic.loadUiType(ui_path)
__all__ = ['BarGraphItem']
class BarGraphItem(pg.GraphicsObject):
def __init__(self, **opts):
pg.GraphicsObject.__init__(self)
self.opts = dict(
x=None,
y=None,
x0=None,
y0=None,
x1=None,
y1=None,
height=None,
width=None,
pen=None,
brush=None,
pens=None,
brushes=None,
)
self.setOpts(**opts)
def setOpts(self, **opts):
self.opts.update(opts)
self.picture = None
self.update()
self.informViewBoundsChanged()
def drawPicture(self):
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
pen = self.opts['pen']
pens = self.opts['pens']
if pen is None and pens is None:
pen = pg.getConfigOption('foreground')
brush = self.opts['brush']
brushes = self.opts['brushes']
if brush is None and brushes is None:
brush = (128, 128, 128)
def asarray(x):
if x is None or np.isscalar(x) or isinstance(x, np.ndarray):
return x
return np.array(x)
x = asarray(self.opts.get('x'))
x0 = asarray(self.opts.get('x0'))
x1 = asarray(self.opts.get('x1'))
width = asarray(self.opts.get('width'))
if x0 is None:
if width is None:
raise Exception('must specify either x0 or width')
if x1 is not None:
x0 = x1 - width
elif x is not None:
x0 = x - width/2.
else:
raise Exception('must specify at least one of x, x0, or x1')
if width is None:
if x1 is None:
raise Exception('must specify either x1 or width')
width = x1 - x0
y = asarray(self.opts.get('y'))
y0 = asarray(self.opts.get('y0'))
y1 = asarray(self.opts.get('y1'))
height = asarray(self.opts.get('height'))
if y0 is None:
if height is None:
y0 = 0
elif y1 is not None:
y0 = y1 - height
elif y is not None:
y0 = y - height/2.
else:
y0 = 0
if height is None:
if y1 is None:
raise Exception('must specify either y1 or height')
height = y1 - y0
p.setPen(pg.mkPen(pen))
p.setBrush(pg.mkBrush(brush))
for i in range(len(x0)):
if pens is not None:
p.setPen(pg.mkPen(pens[i]))
if brushes is not None:
p.setBrush(pg.mkBrush(brushes[i]))
if np.isscalar(y0):
y = y0
else:
y = y0[i]
if np.isscalar(width):
w = width
else:
w = width[i]
p.drawRect(QtCore.QRectF(x0[i], y, w, height[i]))
p.end()
self.prepareGeometryChange()
def paint(self, p, *args):
if self.picture is None:
self.drawPicture()
self.picture.play(p)
def boundingRect(self):
if self.picture is None:
self.drawPicture()
return QtCore.QRectF(self.picture.boundingRect())
class QBPlot(QtGui.QDialog, form_class):
def __init__(self, xtitle, ytitle,*args):
super(QBPlot, self).__init__(*args)
self.setupUi(self)
self.x_axis_title = xtitle
self.y_axis_title = ytitle
pg.setConfigOption('background', (226, 226, 226))
pg.setConfigOption('foreground', 'k')
def initialize_plot(self):
self.plot_widget = pg.PlotWidget()
self.plot_widget.plotItem.showGrid(True, True, alpha=0.2)
self.plot_widget.plotItem.setLabel('bottom', text=self.x_axis_title)
self.plot_widget.plotItem.setLabel('left', text=self.y_axis_title)
self.plot_widget.plotItem.setMouseEnabled(x=False, y=False)
self.plot_widget_viewbox = self.plot_widget.plotItem.getViewBox()
self.plot_widget.plotItem.enableAutoRange(axis=self.plot_widget_viewbox.XYAxes)
self.plot_layout.addWidget(self.plot_widget)
self.plot_widget.plotItem.addLegend()
self.legend_added = 0
def add_legends(self):
style = pg.PlotDataItem(pen=pg.mkPen({'color':'1A64B0','width': 4}))
self.plot_widget.plotItem.legend.addItem(style, "In ORF")
style = pg.PlotDataItem(pen=pg.mkPen({'color':'1CC5FF','width': 4}))
self.plot_widget.plotItem.legend.addItem(style, "Upstream")
style = pg.PlotDataItem(pen=pg.mkPen({'color':'777777','width': 4}))
self.plot_widget.plotItem.legend.addItem(style, "Downstream / Out of Frame")
style = pg.PlotDataItem(pen=pg.mkPen({'color':'D13A26','width': 4}))
self.plot_widget.plotItem.legend.addItem(style, "Start/Stop of CDS")
def plot(self, data, start, stop):
self.plot_widget.clear()
x_grey = []
y_grey = []
x_blue = []
y_blue = []
x_lightblue = []
y_lightblue = []
max_count = 0
for row, row_item in enumerate(data):
if row != 0:
if row_item[4] == "In Frame" and row_item[3] == "In ORF":
x_blue.append(int(row_item[0]))
y_blue.append(float(row_item[1]))
if max_count < float(row_item[1]):
max_count = float(row_item[1])
elif row_item[4] == "In Frame" and row_item[3] == "Upstream":
x_lightblue.append(int(row_item[0]))
y_lightblue.append(float(row_item[1]))
if max_count < float(row_item[1]):
max_count = float(row_item[1])
else:
x_grey.append(int(row_item[0]))
y_grey.append(float(row_item[1]))
if max_count < float(row_item[1]):
max_count = float(row_item[1])
x_neg = [start, stop]
if max_count > 1:
y_neg = [max_count * 0.05 * -1, max_count * 0.05 * -1]
else:
y_neg = [-1, -1]
self.plot_widget.addItem(BarGraphItem(x=x_blue, height=y_blue, width=5,
pen=pg.mkPen({'color':'1A64B0','width': 1}),
brush='1A64B0'))
self.plot_widget.addItem(BarGraphItem(x=x_lightblue, height=y_lightblue, width=5,
pen=pg.mkPen({'color':'1CC5FF','width': 1}),
brush='1CC5FF'))
self.plot_widget.addItem(BarGraphItem(x=x_grey, height=y_grey, width=5,
pen=pg.mkPen({'color':'777777','width': 1}),
brush='777777'))
self.plot_widget.addItem(BarGraphItem(x=x_neg, height=y_neg, width=20,
pen=pg.mkPen({'color':'D13A26','width': 1}),
brush='D13A26'))
if self.legend_added == 0:
self.add_legends()
self.legend_added = 1
def clear_plot(self):
self.plot_widget.clear()
class RDPlot(pg.PlotWidget):
def __init__(self, xtitle, ytitle, *args):
super(RDPlot, self).__init__(*args)
self.x_axis_title = xtitle
self.y_axis_title = ytitle
self.plotItem.showGrid(True, True, alpha=0.2)
self.plotItem.setLabel('bottom', text=self.x_axis_title)
self.plotItem.setLabel('left', text=self.y_axis_title)
self.plotItem.setMouseEnabled(x=False, y=False)
self.plot_widget_viewbox = self.plotItem.getViewBox()
self.plotItem.enableAutoRange(axis=self.plot_widget_viewbox.XYAxes)
self.plotItem.addLegend()
self.legend_added = 0
def add_legends(self):
style = pg.PlotDataItem(pen=pg.mkPen({'color':'1A64B0','width': 8}))
self.plotItem.legend.addItem(style, "In ORF")
style = pg.PlotDataItem(pen=pg.mkPen({'color':'777777','width': 8}))
self.plotItem.legend.addItem(style, "Not in CDS")
style = pg.PlotDataItem(pen=pg.mkPen({'color':'D13A26','width': 8}))
self.plotItem.legend.addItem(style, "Start/Stop of CDS")
def plot(self, data, start, stop):
self.clear()
x_grey = []
y_grey = []
x_blue = []
y_blue = []
max_count = 0
for bp, count, orf in data:
if orf == "In ORF":
x_blue.append(int(bp))
y_blue.append(int(count))
if max_count < count:
max_count = count
else:
x_grey.append(int(bp))
y_grey.append(int(count))
if max_count < count:
max_count = count
x_neg = [start, stop]
if max_count > 1:
y_neg = [max_count * 0.05 * -1, max_count * 0.05 * -1]
else:
y_neg = [-1, -1]
self.addItem(BarGraphItem(x=x_blue, height=y_blue, width=15,
pen=pg.mkPen({'color':'1A64B0','width': 1}),
brush='1A64B0'))
self.addItem(BarGraphItem(x=x_grey, height=y_grey, width=15,
pen=pg.mkPen({'color':'777777','width': 1}),
brush='777777'))
self.addItem(BarGraphItem(x=x_neg, height=y_neg, width=20,
pen=pg.mkPen({'color':'D13A26','width': 1}),
brush='D13A26'))
if self.legend_added == 0:
self.add_legends()
self.legend_added = 1
|
{
"content_hash": "803a2b29170a1f2b2f8bcf8e27cba69d",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 90,
"avg_line_length": 35.423611111111114,
"alnum_prop": 0.5081356596745736,
"repo_name": "emptyewer/DEEPN",
"id": "8f087b5edeb34102bec011ad48f5246e8ccd7441",
"size": "10202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "functions/plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "833"
},
{
"name": "Inno Setup",
"bytes": "1623"
},
{
"name": "OpenEdge ABL",
"bytes": "41306261"
},
{
"name": "Python",
"bytes": "1108317"
},
{
"name": "Shell",
"bytes": "1944"
},
{
"name": "TeX",
"bytes": "95537"
}
],
"symlink_target": ""
}
|
__author__ = 'ntrepid8'
import agilebot.cmd_util
import logging
from logging import NullHandler
import json
import sys
import argparse
from agilebot import util
from functools import partial
import os
logger = logging.getLogger('agilebot.slack')
logger.addHandler(NullHandler())
def create_bot(args, conf):
# update trello arguments
conf = util.update_config_group('slack', args, conf)
# create the bot
return agilebot.cmd_util.create_bot(conf, logger)
def cmd_slack_post(args, conf):
bot = create_bot(args, conf)
try:
resp = bot.slack.post_msg(
text=args.text,
webhook_url=args.webhook_url,
channel=args.channel,
icon_emoji=args.icon_emoji,
username=args.username
)
except Exception as e:
logger.error('{}'.format(e))
sys.exit(1)
else:
print(json.dumps(resp))
def cmd_slack_help(parser, text=None):
t = text or 'slack'
logger.debug('show {} help'.format(t))
parser.print_help()
def sub_command(main_subparsers):
# slack sub-command
slack_parser = main_subparsers.add_parser('slack', help='slack interaction')
subparsers = slack_parser.add_subparsers(help='sub-commands', dest='subparser_1')
slack_parser.set_defaults(func_help=partial(cmd_slack_help, slack_parser, 'slack'))
# SUB-COMMAND: post (p)
p_desc = 'Post a message to a slack channel.'
p_parser = subparsers.add_parser(
'post',
aliases=['p'],
description=p_desc,
formatter_class=argparse.MetavarTypeHelpFormatter,
help=p_desc)
# p required arguments
p_req_group = p_parser.add_argument_group(
'required arguments',
)
p_req_group.add_argument('--text', '-t', required=True, type=str, help='text content of the message')
# p additional required arguments
p_add_group = p_parser.add_argument_group(
'additional required arguments',
'Required and may be specified here or in the configuration file.'
)
p_add_group.add_argument('--channel', type=str, help='Slack channel name')
p_add_group.add_argument('--username', type=str, help='username of the bot')
p_add_group.add_argument('--webhook-url', type=str, help='Slack url to POST the message to')
p_add_group.set_defaults(
channel=os.environ.get('SLACK_CHANNEL'),
username=os.environ.get('SLACK_USERNAME'),
webhook_url=os.environ.get('SLACK_WEBHOOK_URL'),
)
# p optional arguments
p_opt_group = p_parser.add_argument_group(
'additional optional arguments',
'Optional and may be specified here or in the configuration file.'
)
p_opt_group.add_argument('--icon-emoji', default=':ghost:', type=str, help='emoji to use for the bot icon')
p_opt_group.set_defaults(
icon_emoji=os.environ.get('SLACK_ICON_EMOJI'),
)
# p defaults
p_parser.set_defaults(func=cmd_slack_post)
return slack_parser
|
{
"content_hash": "3c7b558887880738656fd2f745fc8f45",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 111,
"avg_line_length": 32.01075268817204,
"alnum_prop": 0.6540141081625798,
"repo_name": "ntrepid8/agilebot",
"id": "75646676d5bc0342ef47f8a75478843d28a4e232",
"size": "2977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agilebot/cmd_slack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44599"
}
],
"symlink_target": ""
}
|
'''Unit tests for grit.gather.policy_json'''
import os
import re
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
import unittest
from grit.gather import muppet_strings
from grit.gather import policy_json
class PolicyJsonUnittest(unittest.TestCase):
def GetExpectedOutput(self, original):
expected = eval(original)
for key, message in expected['messages'].iteritems():
del message['desc']
return expected
def testEmpty(self):
original = "{'policy_definitions': [], 'messages': {}}"
gatherer = policy_json.PolicyJson(original)
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 0)
self.failUnless(eval(original) == eval(gatherer.Translate('en')))
def testGeneralPolicy(self):
original = (
"{"
" 'policy_definitions': ["
" {"
" 'name': 'HomepageLocation',"
" 'type': 'string',"
" 'supported_on': ['chrome.*:8-'],"
" 'features': {'dynamic_refresh': 1},"
" 'example_value': 'http://chromium.org',"
" 'caption': 'nothing special 1',"
" 'desc': 'nothing special 2',"
" 'label': 'nothing special 3',"
" },"
" ],"
" 'messages': {"
" 'msg_identifier': {"
" 'text': 'nothing special 3',"
" 'desc': 'nothing special descr 3',"
" }"
" }"
"}")
gatherer = policy_json.PolicyJson(original)
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 4)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == eval(gatherer.Translate('en')))
def testEnum(self):
original = (
"{"
" 'policy_definitions': ["
" {"
" 'name': 'Policy1',"
" 'items': ["
" {"
" 'name': 'Item1',"
" 'caption': 'nothing special',"
" }"
" ]"
" },"
" ],"
" 'messages': {}"
"}")
gatherer = policy_json.PolicyJson(original)
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == eval(gatherer.Translate('en')))
def testSubPolicy(self):
original = (
"{"
" 'policy_definitions': ["
" {"
" 'policies': ["
" {"
" 'name': 'Policy1',"
" 'caption': 'nothing special',"
" }"
" ]"
" },"
" ],"
" 'messages': {}"
"}")
gatherer = policy_json.PolicyJson(original)
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == eval(gatherer.Translate('en')))
def testEscapingAndLineBreaks(self):
original = """{
'policy_definitions': [],
'messages': {
'msg1': {
# The following line will contain two backslash characters when it
# ends up in eval().
'text': '''backslashes, Sir? \\\\''',
'desc': '',
},
'msg2': {
'text': '''quotes, Madam? "''',
'desc': '',
},
'msg3': {
# The following line will contain two backslash characters when it
# ends up in eval().
'text': 'backslashes, Sir? \\\\',
'desc': '',
},
'msg4': {
'text': "quotes, Madam? '",
'desc': '',
},
'msg5': {
'text': '''what happens
with a newline?''',
'desc': ''
},
'msg6': {
# The following line will contain a backslash+n when it ends up in
# eval().
'text': 'what happens\\nwith a newline? (Episode 1)',
'desc': ''
}
}
}"""
gatherer = policy_json.PolicyJson(original)
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 6)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == eval(gatherer.Translate('en')))
def testPlaceholders(self):
original = """{
'policy_definitions': [
{
'name': 'Policy1',
'caption': '''Please install
<ph name="PRODUCT_NAME">$1<ex>Google Chrome</ex></ph>.''',
},
],
'messages': {}
}"""
gatherer = policy_json.PolicyJson(original)
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = eval(re.sub('<ph.*ph>', '$1', original))
self.failUnless(expected == eval(gatherer.Translate('en')))
self.failUnless(gatherer.GetCliques()[0].translateable)
msg = gatherer.GetCliques()[0].GetMessage()
self.failUnless(len(msg.GetPlaceholders()) == 1)
ph = msg.GetPlaceholders()[0]
self.failUnless(ph.GetOriginal() == '$1')
self.failUnless(ph.GetPresentation() == 'PRODUCT_NAME')
self.failUnless(ph.GetExample() == 'Google Chrome')
def testGetDescription(self):
gatherer = policy_json.PolicyJson({})
self.assertEquals(
gatherer._GetDescription({'name': 'Policy1'}, 'policy', None, 'desc'),
'Description of the policy named Policy1')
self.assertEquals(
gatherer._GetDescription({'name': 'Plcy2'}, 'policy', None, 'caption'),
'Caption of the policy named Plcy2')
self.assertEquals(
gatherer._GetDescription({'name': 'Plcy3'}, 'policy', None, 'label'),
'Label of the policy named Plcy3')
self.assertEquals(
gatherer._GetDescription({'name': 'Item'}, 'enum_item',
{'name': 'Policy'}, 'caption'),
'Caption of the option named Item in policy Policy')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "931837f1b606449b03ff4a5209b74929",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 79,
"avg_line_length": 32.05945945945946,
"alnum_prop": 0.5191367391670881,
"repo_name": "Crystalnix/house-of-life-chromium",
"id": "9ebddb8d5aef5ec2d4b8e3c5f3efbd22012f9ab4",
"size": "6120",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/grit/grit/gather/policy_json_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "3418"
},
{
"name": "C",
"bytes": "88445923"
},
{
"name": "C#",
"bytes": "73756"
},
{
"name": "C++",
"bytes": "77228136"
},
{
"name": "Emacs Lisp",
"bytes": "6648"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Go",
"bytes": "3744"
},
{
"name": "Java",
"bytes": "11354"
},
{
"name": "JavaScript",
"bytes": "6191433"
},
{
"name": "Objective-C",
"bytes": "4023654"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "Perl",
"bytes": "92217"
},
{
"name": "Python",
"bytes": "5604932"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Shell",
"bytes": "1234672"
},
{
"name": "Tcl",
"bytes": "200213"
}
],
"symlink_target": ""
}
|
'''
Sample config JSON stanza:
{
"plugins": [
{"id": "http://bibfra.me/tool/pybibframe#labelizer",
"lookup": {
"http://bibfra.me/vocab/lite/Foobar": {
"separator": " ",
"marcOrder": True,
"properties": ["http://bibfra.me/vocab/lite/title","http://bibfra.me/vocab/lite/name"]
},
"http://bibfra.me/vocab/lite/Grobnitz": [ {
"separator": "lambda ctx: '-' if ctx['nextProperty'] == "http://bibfra.me/vocab/lite/name" else ' '"],
"wrapper": "lambda ctx: '[]' if 'medium' in ctx['currentProperty'] else None",
"multivalSeparator": " | ",
"marcOrder": True,
"properties": ["http://bibfra.me/vocab/lite/title","http://bibfra.me/vocab/lite/name"]
},
{
"separator": ' ',
"marcOrder": True,
"properties": ["http://bibfra.me/vocab/lite/p1", "http://bibfra.me/vocab/lite/p2"]
}
}
"default-label": "UNKNOWN LABEL"
},
]
}
The configuration is specified using a dictionary with type URIs as keys, and one or more
rule dictionaries as values (a single rule dict requires no list enclosure). If the
first rule dictionary fails to produce a label, the next rule dictionary is used. If at
the end of this process no label has been produced, the label specified in "default-label"
will be returned. Each rule dictionary can contain these keys; "separator" which
specifies the string used to separate property values, "multivalSeparator" which specifies the
separator used when the property value is multi-valued, "wrapper" which specifies
a string of length two whose respective characters will be used to wrap the property value,
"marcOrder" a boolean that indicates whether the properties values should be ordered as
they were encountered in the MARC if True (otherwise the order in the "properties" key
will be used), and "properties" containing the list of property URIs.
"separator" and "wrapper" can be callables that return strings when provided a context
dictionary describing the state of the labelizing process. The four keys in the context
are currentProperty, currentValue, nextProperty, and nextValue.
Note that as the configuration needs to be represented as JSON, the callables are
encapsulated as strings. As non-callables are also strings, there's ambiguity there
that we resolve by asserting that any string longer than 5 characters will be
treated as a callable. Of course, if this configuration is consumed as a Python
dictionary, then the values can be actual callables.
Already built into demo config:
marc2bf -c test/resource/democonfig.json --mod=bibframe.plugin test/resource/gunslinger.marc.xml
'''
import os
import json
import itertools
import asyncio
from itertools import tee, zip_longest
from versa import I, VERSA_BASEIRI, ORIGIN, RELATIONSHIP, TARGET
from versa.util import simple_lookup
from amara3 import iri
from bibframe import BFZ, BFLC, g_services, BF_INIT_TASK, BF_MARCREC_TASK, BF_MATRES_TASK, BF_FINAL_TASK
RDF_NAMESPACE = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
RDFS_NAMESPACE = 'http://www.w3.org/2000/01/rdf-schema#'
VTYPE_REL = I(iri.absolutize('type', VERSA_BASEIRI))
RDFS_LABEL = RDFS_NAMESPACE + 'label'
def pairwise(iterable):
a, b = tee(iterable)
next(b, None)
return zip_longest(a, b)
#A plug-in is a series of callables, each of which handles a phase of
#Process
#The only phase predefined for all plug-ins is BF_INIT_TASK
#One convenient way to organize the Plug-in is as a class
#In this case we want to create a separate instance for each full processing pass (bfconvert)
class labelizer(object):
PLUGIN_ID = 'http://bibfra.me/tool/pybibframe#labelizer'
def __init__(self, pinfo, config=None):
#print ('BF_INIT_TASK', linkreport.PLUGIN_ID)
self._config = config or {}
#If you need state maintained throughout a full processing pass, you can use instance attributes
#Now set up the other plug-in phases
pinfo[BF_MARCREC_TASK] = self.handle_record_links
pinfo[BF_MATRES_TASK] = self.handle_materialized_resource
pinfo[BF_FINAL_TASK] = self.finalize
return
#acyncio.Task is used to manage the tasks, so it's a good idea to use the standard decorator
#if you don't know what that means you should still be OK just using the sample syntax below as is, and just writign a regular function
#But you can squeeze out a lot of power by getting to know the wonders of asyncio.Task
@asyncio.coroutine
def handle_record_links(self, model, params):
'''
Task coroutine of the main event loop for MARC conversion, called with
In this case update a report of links encountered in the MARC/XML
model -- raw Versa model with converted resource information from each MARC/XML record processed
params -- parameters passed in from processing:
params['workid']: ID of the work constructed from the MARC record
params['instanceid']: list of IDs of instances constructed from the MARC record
'''
#print ('BF_MARCREC_TASK', linkreport.PLUGIN_ID)
#Get the configured default vocabulary base IRI
vocabbase = params['vocabbase']
for obj,_r,typ,_a in model.match(None, VTYPE_REL, None):
# build labels based on model order, iterating over every property of
# every resource, and building the label if that property is consulted
rule = self._config['lookup'].get(typ)
if rule is None: continue
rules = rule if isinstance(rule, list) else [rule]
label = ''
for rule in rules:
def chunk_eval(s):
# used when configuration is stored in JSON and one of these labelizer instructions is an eval-able string
# a known Python injection attack vector, so mentioned in README
if isinstance(s, str) and len(s) > 5:
s = eval(s, {'I': I}, locals())
return s
marc_order = rule.get('marcOrder', False)
separator = chunk_eval(rule.get('separator', ' '))
wrapper = chunk_eval(rule.get('wrapper', None))
multivalsep = chunk_eval(rule.get('multivalSeparator', ' | '))
props = rule.get('properties', [])
if marc_order:
link_stream = pairwise((l for l in model.match(obj, None, None) if l[1] in props))
else:
link_stream = pairwise((l for p in props for l in model.match(obj, p, None)))
#print("LABELIZING {} of type {}".format(obj, typ))
for (link1, link2) in link_stream:
_o1,rel1,target1,_a1 = link1
_o2,rel2,target2,_a2 = link2 if link2 is not None else (None, None, None, None)
ctx = {
'currentProperty': rel1,
'currentValue': target1,
'nextProperty': rel2,
'nextValue': target2,
}
_wrapper = wrapper(ctx) if callable(wrapper) else wrapper
if _wrapper:
target1 = _wrapper[0]+target1+_wrapper[1]
label += target1
if rel2 == rel1:
_multivalsep = multivalsep(ctx) if callable(multivalsep) else multivalsep
label += _multivalsep
elif rel2 is not None:
_separator = separator(ctx) if callable(separator) else separator
label += _separator
#print("current label", label)
if label:
model.add(obj, I(RDFS_LABEL), label)
break # we've found a rule that produces a label, so skip other rules
label = ''
if not label and 'default-label' in self._config:
# if we've gone through all rules and not produced a label, yield specified default
model.add(obj, I(RDFS_LABEL), self._config['default-label'])
return
@asyncio.coroutine
def handle_materialized_resource(self, model, params):
'''
Task coroutine of the main event loop for MARC conversion, called whenever a new resource is materialized
In this case generate the report of links encountered in the MARC/XML
You can set the value of params['renamed_materialized_id'] to rename the resource
'''
eid = params['materialized_id']
first_seen = params['first_seen']
logger = params['logger']
logger.debug('Materialized resource with ID {0}{1}'.format(eid, ' (first encounter)' if first_seen else ''))
return
@asyncio.coroutine
def finalize(self):
'''
Task coroutine of the main event loop for MARC conversion, called to finalize processing
In this case generate the report of links encountered in the MARC/XML
'''
return
PLUGIN_INFO = {labelizer.PLUGIN_ID: {BF_INIT_TASK: labelizer}}
|
{
"content_hash": "f3516b3a823c81393183baaf7ddd715a",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 139,
"avg_line_length": 44.60952380952381,
"alnum_prop": 0.6203031596925704,
"repo_name": "zepheira/pybibframe",
"id": "05199b88a3e7e0b4c41d6e6d9c943101d36cdaf2",
"size": "9368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/plugin/labelizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "504071"
}
],
"symlink_target": ""
}
|
from test_support import verbose, findfile
import tokenize, os, sys
if verbose:
print 'starting...'
file = open(findfile('tokenize_tests'+os.extsep+'py'))
tokenize.tokenize(file.readline)
if verbose:
print 'finished'
|
{
"content_hash": "19747c60537cdbae7e4413ebd738e6b5",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 54,
"avg_line_length": 25.11111111111111,
"alnum_prop": 0.7345132743362832,
"repo_name": "mariaantoanelam/Licenta",
"id": "7ac5624e9452101661ca38551ebdecdd3d6cb4e7",
"size": "226",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "Lib/test/test_tokenize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31035"
},
{
"name": "HTML",
"bytes": "134311"
},
{
"name": "Java",
"bytes": "161404"
},
{
"name": "JavaScript",
"bytes": "11470"
},
{
"name": "Python",
"bytes": "4053763"
}
],
"symlink_target": ""
}
|
"""functions to get decorative/informative text out of strings..."""
import re
import unicodedata
from translate.lang import data
def spacestart(str1):
"""returns all the whitespace from the start of the string"""
newstring = u""
for c in str1:
if c.isspace():
newstring += c
else:
break
return newstring
def spaceend(str1):
"""returns all the whitespace from the end of the string"""
newstring = u""
for n in range(len(str1)):
c = str1[-1-n]
if c.isspace():
newstring = c + newstring
else:
break
return newstring
def puncstart(str1, punctuation):
"""returns all the punctuation from the start of the string"""
newstring = u""
for c in str1:
if c in punctuation or c.isspace():
newstring += c
else:
break
return newstring
def puncend(str1, punctuation):
"""returns all the punctuation from the end of the string"""
# An implementation with regular expressions was slightly slower.
newstring = u""
for n in range(len(str1)):
c = str1[-1-n]
if c in punctuation or c.isspace():
newstring = c + newstring
else:
break
return newstring.replace(u"\u00a0", u" ")
def ispurepunctuation(str1):
"""checks whether the string is entirely punctuation"""
for c in str1:
if c.isalnum():
return False
return len(str1)
def isvalidaccelerator(accelerator, acceptlist=None):
"""returns whether the given accelerator character is valid
@type accelerator: character
@param accelerator: A character to be checked for accelerator validity
@type acceptlist: String
@param acceptlist: A list of characters that are permissible as accelerators
@rtype: Boolean
@return: True if the supplied character is an acceptable accelerator
"""
assert isinstance(accelerator, unicode)
assert isinstance(acceptlist, unicode) or acceptlist is None
if len(accelerator) == 0:
return False
if acceptlist is not None:
acceptlist = data.normalize(acceptlist)
if accelerator in acceptlist:
return True
return False
else:
# Old code path - ensures that we don't get a large number of regressions
accelerator = accelerator.replace("_","")
if accelerator in u"-?":
return True
if not accelerator.isalnum():
return False
# We don't want to have accelerators on characters with diacritics, so let's
# see if the character can decompose.
decomposition = unicodedata.decomposition(accelerator)
# Next we strip out any extra information like <this>
decomposition = re.sub("<[^>]+>", "", decomposition).strip()
return decomposition.count(" ") == 0
def findaccelerators(str1, accelmarker, acceptlist=None):
"""returns all the accelerators and locations in str1 marked with a given marker"""
accelerators = []
badaccelerators = []
currentpos = 0
while currentpos >= 0:
currentpos = str1.find(accelmarker, currentpos)
if currentpos >= 0:
accelstart = currentpos
currentpos += len(accelmarker)
# we assume accelerators are single characters
accelend = currentpos + 1
if accelend > len(str1): break
accelerator = str1[currentpos:accelend]
currentpos = accelend
if isvalidaccelerator(accelerator, acceptlist):
accelerators.append((accelstart, accelerator))
else:
badaccelerators.append((accelstart, accelerator))
return accelerators, badaccelerators
def findmarkedvariables(str1, startmarker, endmarker, ignorelist=[]):
"""returns all the variables and locations in str1 marked with a given marker"""
variables = []
currentpos = 0
while currentpos >= 0:
variable = None
currentpos = str1.find(startmarker, currentpos)
if currentpos >= 0:
startmatch = currentpos
currentpos += len(startmarker)
if endmarker is None:
# handle case without an end marker - use any non-alphanumeric character as the end marker, var must be len > 1
endmatch = currentpos
for n in range(currentpos, len(str1)):
if not (str1[n].isalnum() or str1[n] == '_'):
endmatch = n
break
if currentpos == endmatch: endmatch = len(str1)
if currentpos < endmatch:
variable = str1[currentpos:endmatch]
currentpos = endmatch
elif type(endmarker) == int:
# setting endmarker to an int means it is a fixed-length variable string (usually endmarker==1)
endmatch = currentpos + endmarker
if endmatch > len(str1): break
variable = str1[currentpos:endmatch]
currentpos = endmatch
else:
endmatch = str1.find(endmarker, currentpos)
if endmatch == -1: break
# search backwards in case there's an intervening startmarker (if not it's OK)...
start2 = str1.rfind(startmarker, currentpos, endmatch)
if start2 != -1:
startmatch2 = start2
start2 += len(startmarker)
if start2 != currentpos:
currentpos = start2
startmatch = startmatch2
variable = str1[currentpos:endmatch]
currentpos = endmatch + len(endmarker)
if variable is not None and variable not in ignorelist:
if not variable or variable.replace("_","").replace(".","").isalnum():
variables.append((startmatch, variable))
return variables
def getaccelerators(accelmarker, acceptlist=None):
"""returns a function that gets a list of accelerators marked using accelmarker"""
def getmarkedaccelerators(str1):
"""returns all the accelerators in str1 marked with a given marker"""
acclocs, badlocs = findaccelerators(str1, accelmarker, acceptlist)
accelerators = [accelerator for accelstart, accelerator in acclocs]
badaccelerators = [accelerator for accelstart, accelerator in badlocs]
return accelerators, badaccelerators
return getmarkedaccelerators
def getvariables(startmarker, endmarker):
"""returns a function that gets a list of variables marked using startmarker and endmarker"""
def getmarkedvariables(str1):
"""returns all the variables in str1 marked with a given marker"""
varlocs = findmarkedvariables(str1, startmarker, endmarker)
variables = [variable for accelstart, variable in varlocs]
return variables
return getmarkedvariables
def getnumbers(str1):
"""returns any numbers that are in the string"""
# TODO: handle locale-based periods e.g. 2,5 for Afrikaans
assert isinstance(str1, unicode)
numbers = []
innumber = False
degreesign = u'\xb0'
lastnumber = ""
carryperiod = ""
for chr1 in str1:
if chr1.isdigit():
innumber = True
elif innumber:
if not (chr1 == '.' or chr1 == degreesign):
innumber = False
if lastnumber:
numbers.append(lastnumber)
lastnumber = ""
if innumber:
if chr1 == degreesign:
lastnumber += chr1
elif chr1 == '.':
carryperiod += chr1
else:
lastnumber += carryperiod + chr1
carryperiod = ""
else:
carryperiod = ""
if innumber:
if lastnumber:
numbers.append(lastnumber)
return numbers
def getfunctions(str1, punctuation):
"""returns the functions() that are in a string, while ignoring the trailing
punctuation in the given parameter"""
punctuation = punctuation.replace("(", "").replace(")", "")
return [word.rstrip(punctuation) for word in str1.split() if word.rstrip(punctuation).endswith("()")]
def getemails(str1):
"""returns the email addresses that are in a string"""
return re.findall('[\w\.\-]+@[\w\.\-]+', str1)
def geturls(str1):
"""returns the URIs in a string"""
URLPAT = 'https?:[\w/\.:;+\-~\%#\$?=&,()]+|www\.[\w/\.:;+\-~\%#\$?=&,()]+|' +\
'ftp:[\w/\.:;+\-~\%#?=&,]+'
return re.findall(URLPAT, str1)
def countaccelerators(accelmarker, acceptlist=None):
"""returns a function that counts the number of accelerators marked with the given marker"""
def countmarkedaccelerators(str1):
"""returns all the variables in str1 marked with a given marker"""
acclocs, badlocs = findaccelerators(str1, accelmarker, acceptlist)
return len(acclocs), len(badlocs)
return countmarkedaccelerators
|
{
"content_hash": "40b0dcbb8e801d8ed272707f890a3d52",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 127,
"avg_line_length": 38.743589743589745,
"alnum_prop": 0.6061107434370174,
"repo_name": "dbbhattacharya/kitsune",
"id": "ecee9ec3fc073640859f5ae1a8b76a9c426a29f9",
"size": "9894",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "vendor/packages/translate-toolkit/translate/filters/decoration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
}
|
"""Measure the memory growth during a function call.
"""
from __future__ import print_function
from guppy import hpy #1
if sys.version_info.major < 3:
range = xrange
def check_memory_growth(function, *args, **kwargs): #2
"""Measure the memory usage of `function`.
"""
measurer = hpy() #3
measurer.setref() #4
inital_memory = measurer.heap().size #5
function(*args, **kwargs) #6
return measurer.heap().size - inital_memory #7
if __name__ == '__main__':
def test():
"""Do some tests with different memory usage patterns.
"""
def make_big(number): #8
"""Function without side effects.
It cleans up all used memory after it returns.
"""
return range(number)
data = [] #9
def grow(number):
"""Function with side effects on global list.
"""
for x in range(number):
data.append(x) #10
size = int(1e6)
print('memory make_big:', check_memory_growth(make_big,
size)) #11
print('memory grow:', check_memory_growth(grow, size)) #12
test()
|
{
"content_hash": "db6dde34fca075e9ac92548b2249101b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 67,
"avg_line_length": 33.888888888888886,
"alnum_prop": 0.42360655737704916,
"repo_name": "rawrgulmuffins/presentation_notes",
"id": "98381db827349089ae314a9589ede761d398fbeb",
"size": "1555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycon2016/tutorials/measure_dont_guess/handout/measuring/memory_growth_hpy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1200602"
},
{
"name": "Python",
"bytes": "152253"
}
],
"symlink_target": ""
}
|
import requests
from requests import Request, Session
from requests.packages.urllib3.util import Retry
from requests.adapters import HTTPAdapter
from datetime import datetime, timezone
from time import sleep
import lxml
import lxml.html as lh
from urllib.parse import quote, urlsplit
import re
from operator import itemgetter
from copy import deepcopy
#tmp import
# from lxml import etree
from lxml.etree import strip_elements
# import logging
# logging.basicConfig(level=logging.DEBUG)
# import time
# def timing(f):
# def wrap(*args):
# time1 = time.time()
# ret = f(*args)
# time2 = time.time()
# print("{} function took {:0.3f} ms".format(f.__name__, (time2-time1)*1000.0))
# return ret
# return wrap
class TwitterClient():
FIDDLER_DEBUG = False
@staticmethod
def init_default_session(retrys=5,backoff_factor=0.1):
session = Session()
session.headers.update(
{'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',
'Accept-Encoding' : 'gzip, deflate, sdch, br',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'en-GB,en-US;q=0.8,en;q=0.6',
'X-Requested-With': 'XMLHttpRequest'})
retries = Retry(total=retrys,
backoff_factor=backoff_factor,
status_forcelist=[ 500, 502, 503, 504 ])
session.mount('https://', HTTPAdapter(max_retries=retries))
if TwitterClient.FIDDLER_DEBUG:
proxies = {'http': 'http://127.0.0.1:8888', 'https': 'https://127.0.0.1:8888'}
session.proxies.update(proxies)
return session
def __init__(self,
session=None,
timeout=12,
continue_on_empty_result=True):
if session is None:
session = self.init_default_session()
self.session = session
self.timeout = timeout
self.continue_on_empty_result = continue_on_empty_result
self.search_url = 'https://twitter.com/i/search/timeline'
self.user_url = 'https://twitter.com/i/profiles/show/{username}/timeline/tweets'
def search_query(self, queryBuilder, raw_query_str=None):
if raw_query_str is None:
raw_query_str = queryBuilder.build()
request = self._prepare_request(self.search_url, raw_query_str)
resp = self._execute_request(request)
resp_json = resp.json()
# Extract Results
tweets = []
if resp_json is not None and resp_json['items_html'] is not None:
tweets = self.parse_tweets(resp_json['items_html'])
next_query = deepcopy(queryBuilder)
next_query.max_position = resp_json.get('min_position') #switch the labels because twitter mislabels them
next_query.min_position = resp_json.get('max_position')
next_query.reset_error_state = False
min_id = max_id = None
if len(tweets) > 0:
min_id = tweets[0]['id_str']
max_id = tweets[1]['id_str']
retval = {
'_request': request,
'_response_raw': resp,
'_response_json': resp_json,
'refresh_query': queryBuilder,
'next_query': next_query,
'tweets': tweets,
'min_id': min_id,
'max_id': max_id
}
return retval
def user_query(self, user):
raise NotImplementedError
def get_search_iterator(self, search_query):
# determine if this is the first query or a continuation.
search_query.autoset_reset_error_state()
result = self.search_query(search_query)
next_query = result['next_query']
yield result
while True:
if len(result['tweets']) == 0:
if not self.continue_on_empty_result:
print('No tweets returned terminating program')
break
else:
break
# TODO remimplement
result = self.search_query(next_query)
next_query = result['next_query']
yield result
# def binary_search_
# def get_search_iterator(self, queryBuilder):
# qb = qb_prev = deepcopy(queryBuilder)
# result = self.search_query(qb)
# prev_min_tweetId = None
# yield result
# while True:
# if len(result['tweets']) == 0:
# if not self.continue_on_empty_result:
# print('No tweets returned terminating program')
# break
# else:
# # Sometimes the API stops returning tweets even when there are more
# # we can try to find these tweets by modifying the max_position parameter.
# int_minId = int(qb.min_tweetId)
# for x in range(8, len(qb.min_tweetId)): #TODO impl something more sophisticated
# qb.min_tweetId = int_minId - 10**x
# result = self.search(qb)
# if len(result['tweets']) > 0:
# break
# else:
# print('No tweets returned terminating program')
# # if we didnt find any point to continue from, break.
# break
# if qb.max_tweetId is None:
# qb.max_tweetId = result['tweets'][0]['id_str']
# # In a high volume search query like 'a' must use the max_tweet_id provided by the result,
# # otherwise the same results will be returned many times. (only happens during the first ~10 pages of results)
# res_min_pos = result['response_json'].get('min_position')
# if res_min_pos is not None:
# split = res_min_pos.split('-')
# qb.max_tweetId = split[2]
# prev_min_tweetId = qb.min_tweetId
# qb.min_tweetId = result['tweets'][-1]['id_str']
# # If the current request returns the same tweets as the last
# # the query is configured wrong
# # TODO create more accurate metric
# if prev_min_tweetId is qb.min_tweetId:
# break
# result = self.search_query(qb)
# yield result
def _execute_request(self, prepared_request):
try:
if TwitterClient.FIDDLER_DEBUG:
result = self.session.send(prepared_request, timeout=self.timeout, verify=False)
else:
result = self.session.send(prepared_request, timeout=self.timeout)
return result
except requests.exceptions.Timeout as e:
raise
def _prepare_request(self, url, payload_str):
req = Request('GET', url, params=payload_str, cookies={})
return self.session.prepare_request(req)
@staticmethod
def _encode_max_postion_param(min, max):
return "TWEET-{0}-{1}".format(min, max)
def parse_tweets(self, items_html):
try:
html = lh.fromstring(items_html)
except lxml.etree.ParserError as e:
return []
tweets = []
for li in html.cssselect('li.js-stream-item'):
# Check if is a tweet type element
if 'data-item-id' not in li.attrib:
continue
tweet = self._parse_tweet(li)
if tweet is not None:
tweets.append(tweet)
return tweets
def _parse_tweet(self, tweetElement):
'''
Parses the attributes of a tweet from the tweetElement into a dict
returns None if there is an error in the tweet
'''
li = tweetElement
tweet = {
'created_at' : None,
'id_str' : li.get('data-item-id'),
'text' : None,
'lang' : None,
'entities': {
'hashtags': [],
'symbols':[],
'user_mentions':[],
'urls':[],
},
'user' : {
'id_str' : None,
'name' : None,
'screen_name': None,
'profile_image_url': None,
'verified': False
},
'retweet_count' : 0,
'favorite_count' : 0,
'is_quote_status' : False,
'in_reply_to_user_id': None,
'in_reply_to_screen_name' : None,
'contains_photo': False,
'contains_video': False,
'contains_card': False
}
content_div = li.cssselect('div.tweet')
if len(content_div) > 0:
content_div = content_div[0]
tweet['user']['id_str'] = content_div.get('data-user-id')
tweet['user']['name'] = content_div.get('data-name')
tweet['user']['screen_name'] = content_div.get('data-screen-name')
reply_a = content_div.cssselect('div.tweet-context a.js-user-profile-link') # tweet-context can be used by many functions, incl follow, reply, retweet only extract reply atm
if len(reply_a) > 0:
if len(content_div.cssselect('div.tweet-context span.Icon--reply')) > 0: # check if actually a reply
tweet['in_reply_to_user_id'] = reply_a[0].get('data-user-id')
tweet['in_reply_to_screen_name'] = reply_a[0].get('href').strip('/')
user_img = content_div.cssselect('img.avatar')
if len(user_img) > 0:
tweet['user']['profile_image_url'] = user_img[0].get('src')
text_p = content_div.cssselect('p.tweet-text, p.js-tweet-text')
if len(text_p) > 0:
text_p = text_p[0]
self._parse_tweet_text(text_p, tweet)
tweet['lang'] = text_p.get('lang')
self._parse_tweet_entites(text_p, tweet['entities'])
else:
# there is no tweet text, unknown if this occurs
return None
verified_span = content_div.cssselect('span.Icon--verified')
if len(verified_span) > 0:
tweet['user']['verified'] = True
date_span = content_div.cssselect('span._timestamp')
if len(date_span) > 0:
timestamp = int(date_span[0].get('data-time-ms'))/1000
tweet['created_at'] = datetime.fromtimestamp(timestamp, tz=timezone.utc).strftime('%a %b %d %H:%M:%S %z %Y')
#Retweet and Favoritte counts
counts = li.cssselect('span.ProfileTweet-action--retweet, span.ProfileTweet-action--favorite')
if len(counts) > 0:
for c in counts:
classes = c.get('class').split(' ')
if 'ProfileTweet-action--retweet' in classes:
tweet['retweet_count'] = int(c[0].get('data-tweet-stat-count'))
elif 'ProfileTweet-action--favorite' in classes:
tweet['favorite_count'] = int(c[0].get('data-tweet-stat-count'))
#Extract Quoted Status
quoted_tweet_context = content_div.cssselect('div.QuoteTweet-innerContainer')
if len(quoted_tweet_context) > 0:
quoted_tweet_context = quoted_tweet_context[0]
tweet['is_quote_status'] = True
tweet['quoted_status_id_str'] = quoted_tweet_context.get('data-item-id')
tweet['quoted_status'] = {
'id_str': None,
'text': None,
'user': {
'id_str' : None,
'name' : None,
'screen_name' : None,
},
'entities' : {
'hashtags' : [],
'symbols' :[],
'user_mentions':[],
'urls':[]
}
}
qtweet = tweet['quoted_status']
qtweet['id_str'] = quoted_tweet_context.get('data-item-id')
qtweet['user']['id_str'] = quoted_tweet_context.get('data-user-id')
qtweet['user']['screen_name'] = quoted_tweet_context.get('data-screen-name')
qt_user_name = quoted_tweet_context.cssselect('b.QuoteTweet-fullname')
if len(qt_user_name) > 0:
qtweet['user']['name'] = qt_user_name[0].text_content()
qt_text = quoted_tweet_context.cssselect('div.QuoteTweet-text.tweet-text')
if len(qt_text) > 0:
qt_text = qt_text[0]
self._parse_tweet_text(qt_text, qtweet)
self._parse_tweet_entites(qt_text, qtweet['entities'])
# Extract Media entities
tweet_media_context = content_div.cssselect('div.AdaptiveMedia-container')
if len(tweet_media_context) > 0:
tweet_media_context = tweet_media_context[0]
tweet['entities']['media'] = []
photo_found = False
tweet_media_photos = tweet_media_context.cssselect('div.AdaptiveMedia-photoContainer')
for elm in tweet_media_photos:
tweet['contains_photo'] = photo_found = True
photo = {
'media_url' : elm.get('data-image-url'),
'type' : 'photo'
}
tweet['entities']['media'].append(photo)
if not photo_found:
tweet_media_video = tweet_media_context.cssselect('div.AdaptiveMedia-videoContainer')
if len(tweet_media_video) > 0:
tweet['contains_video'] = True
video = {
'type' : 'video',
'video_type' : re.search(re.compile(r"PlayableMedia--([a-zA-Z]*)"), tweet_media_video[0].cssselect('div[class*="PlayableMedia--"]')[0].get('class')).group(1),
'media_url' : 'https://twitter.com/i/videos/tweet/' + tweet['id_str'],
'video_thumbnail' : re.search(re.compile(r"background-image:url\(\'(.*)\'"),tweet_media_video[0].cssselect('div.PlayableMedia-player')[0].get('style')).group(1)
}
tweet['entities']['media'].append(video)
return tweet
def _parse_tweet_text(self, text_element, tweet):
#hacky way to include Emojis
for emoj in text_element.cssselect('img.Emoji'):
emoj.tail = emoj.get('alt') + emoj.tail if emoj.tail else emoj.get('alt')
#Modify Urls so they are correct
for url in text_element.cssselect('a.twitter-timeline-link'):
is_truncated = u'\u2026' in url.text_content()
url_disp = url.cssselect('span.js-display-url')
if len(url_disp) > 0:
url_disp_text = url_disp[0].text_content()
if is_truncated:
url_disp_text = url_disp_text + u'\u2026'
url.attrib['xtract-display-url'] = url_disp_text # store for later extraction
elif 'pic.twitter.com' in url.text:
url.attrib['xtract-display-url'] = url.text
strip_elements(url, ['*'])
url.text = url.attrib['href']
tmp = str(text_element.text_content())
for m in re.finditer(r'(?<!\s)(?<!\\n)(http|https)://', tmp): #add a space before urls where required
tmp = tmp[:m.start()] + ' ' + tmp[m.start():]
tweet['text'] = tmp
def _parse_tweet_entites(self, element, entities):
tags = element.cssselect('a.twitter-hashtag, a.twitter-cashtag, a.twitter-atreply, a.twitter-timeline-link')
if len(tags) > 0:
for tag in tags:
classes = tag.get('class').split(' ')
if 'twitter-hashtag' in classes:
entities['hashtags'].append(tag.text_content().strip(' \n#'))
elif 'twitter-cashtag' in classes:
entities['symbols'].append(tag.text_content().strip(' \n$'))
elif 'twitter-atreply' in classes:
mentioned_user = {
'id_str' : tag.get('data-mentioned-user-id'),
'screen_name' : tag.get('href').strip('/') if tag.get('href') is not None else None
}
entities['user_mentions'].append(mentioned_user)
elif 'twitter-timeline-link' in classes:
url = {
'url': tag.get('href'),
'expanded_url' : tag.get('data-expanded-url'),
'display_url' : tag.get('xtract-display-url')
}
entities['urls'].append(url)
if __name__ == "__main__":
import TwitterQuery
# TwitterClient.FIDDLER_DEBUG = True
x = TwitterClient(timeout=None)
try:
gen = x.get_search_iterator(TwitterQuery.SearchQuery('apple filter:replies'))
for res in gen:
print(len(res['tweets']))
except requests.exceptions.Timeout as e:
print(e)
def get_ids(tweets):
return [tweet['id_str'] for tweet in tweets]
|
{
"content_hash": "9e755275761ce0015f6975f0907ccaa4",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 184,
"avg_line_length": 40.17169373549884,
"alnum_prop": 0.5276654730276077,
"repo_name": "dtuit/TwitterWebsiteSearch",
"id": "76c76b3318aabc52c57cd61148edf9a8a15fc12b",
"size": "17314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TwitterWebsiteSearch/TwitterClient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "39151"
},
{
"name": "Python",
"bytes": "29527"
}
],
"symlink_target": ""
}
|
import os
import shutil
import mock
from catkin_tools import config
from ..utils import assert_raises_regex
from ..utils import in_temporary_directory
from ..utils import redirected_stdio
@mock.patch('catkin_tools.config.initialize_verb_aliases')
@in_temporary_directory
def test_config_initialization(patched_func):
cwd = os.getcwd()
test_folder = os.path.join(cwd, 'test')
# Test normal case
config.initialize_config(test_folder)
assert os.path.isdir(test_folder)
assert not os.path.exists(os.path.join(test_folder, 'verb_aliases'))
# Assert a second invocation is fine
config.initialize_config(test_folder)
shutil.rmtree(test_folder)
# Test failure with file for target config path
with open(test_folder, 'w') as f:
f.write('this will cause a RuntimeError')
with assert_raises_regex(RuntimeError, "The catkin config directory"):
config.initialize_config(test_folder)
@in_temporary_directory
def test_verb_alias_config_initialization():
cwd = os.getcwd()
test_folder = os.path.join(cwd, 'test')
# Test target directory does not exist failure
with assert_raises_regex(RuntimeError, "Cannot initialize verb aliases because catkin configuration path"):
config.initialize_verb_aliases(test_folder)
# Test normal case
os.makedirs(test_folder)
config.initialize_verb_aliases(test_folder)
assert os.path.isdir(test_folder)
assert os.path.isdir(os.path.join(test_folder, 'verb_aliases'))
defaults_path = os.path.join(test_folder, 'verb_aliases', '00-default-aliases.yaml')
assert os.path.isfile(defaults_path)
# Assert a second invocation is fine
config.initialize_verb_aliases(test_folder)
# Check that replacement of defaults works
with open(defaults_path, 'w') as f:
f.write("This should be overwritten (simulation of update needed)")
with redirected_stdio() as (out, err):
config.initialize_verb_aliases(test_folder)
assert "Warning, builtin verb aliases at" in out.getvalue(), out.getvalue()
shutil.rmtree(test_folder)
# Check failure from verb aliases folder existing as a file
os.makedirs(test_folder)
with open(os.path.join(test_folder, 'verb_aliases'), 'w') as f:
f.write("this will cause a RuntimeError")
with assert_raises_regex(RuntimeError, "The catkin verb aliases config directory"):
config.initialize_verb_aliases(test_folder)
shutil.rmtree(test_folder)
@in_temporary_directory
def test_get_verb_aliases():
cwd = os.getcwd()
test_folder = os.path.join(cwd, 'test')
# Test failure case where config folder does not exist
with assert_raises_regex(RuntimeError, "Cannot get verb aliases because the catkin config path"):
config.get_verb_aliases(test_folder)
# Test failure case where aliases folder does not exist
with mock.patch('catkin_tools.config.initialize_verb_aliases'):
config.initialize_config(test_folder)
with assert_raises_regex(RuntimeError, "Cannot get verb aliases because the verb aliases config path"):
config.get_verb_aliases(test_folder)
shutil.rmtree(test_folder)
# Test the normal case
config.initialize_config(test_folder)
aliases = config.get_verb_aliases(test_folder)
assert 'b' in aliases
assert aliases['b'] == 'build'
# Test a custom file
base_path = os.path.join(test_folder, 'verb_aliases')
with open(os.path.join(base_path, '01-my-custom-aliases.yaml'), 'w') as f:
f.write("""\
b: build --isolate-devel
ls: null
""")
aliases = config.get_verb_aliases(test_folder)
assert 'b' in aliases
assert aliases['b'] == 'build --isolate-devel', aliases['b']
assert 'ls' not in aliases
# Test a bad alias files
bad_path = os.path.join(base_path, '02-bad.yaml')
with open(bad_path, 'w') as f:
f.write("""\
- foo
- bar
""")
with assert_raises_regex(RuntimeError, "Invalid alias file"):
config.get_verb_aliases(test_folder)
os.remove(bad_path)
with open(bad_path, 'w') as f:
f.write("""\
null: foo
""")
with assert_raises_regex(RuntimeError, "Invalid alias in file"):
config.get_verb_aliases(test_folder)
os.remove(bad_path)
with open(bad_path, 'w') as f:
f.write("""\
foo: 13.4
""")
with assert_raises_regex(RuntimeError, "Invalid alias expansion in file"):
config.get_verb_aliases(test_folder)
os.remove(bad_path)
# Test with an empty custom file
empty_path = os.path.join(base_path, '02-my-empty.yaml')
with open(empty_path, 'a') as f:
os.utime(empty_path, None)
aliases = config.get_verb_aliases(test_folder)
|
{
"content_hash": "8867b59b599eb0ca6be25451cd2d394e",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 111,
"avg_line_length": 38.891666666666666,
"alnum_prop": 0.6905935290336405,
"repo_name": "xqms/catkin_tools",
"id": "edd98038cf6bb19f41bc5fb246b6cee3e980fbe4",
"size": "4667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Objective-C",
"bytes": "3354"
},
{
"name": "Python",
"bytes": "227706"
},
{
"name": "Shell",
"bytes": "6713"
}
],
"symlink_target": ""
}
|
from .. import Provider as BaseProvider
class Provider(BaseProvider):
"""
A Faker provider for the Portuguese VAT IDs
"""
vat_id_formats = (
'PT#########',
)
def vat_id(self):
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random Portuguese VAT ID
"""
return self.bothify(self.random_element(self.vat_id_formats))
|
{
"content_hash": "a973b33bbdf7fb66ba03f9b6c9abdd4b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 69,
"avg_line_length": 22,
"alnum_prop": 0.5813397129186603,
"repo_name": "danhuss/faker",
"id": "a5f85eb61f8e2115da65eb39c0d4cbb2f73ea22c",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faker/providers/ssn/pt_PT/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1411894"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.core.management.base import NoArgsCommand
from channel.actions import post_process_channel
from optparse import make_option
from docutil.commands_util import recocommand
from docutil.str_util import smart_decode
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--pname', action='store', dest='pname',
default='-1', help='Project unix name'),
make_option('--cname', action='store', dest='cname',
default='-1', help='Channel name'),
make_option('--skip_refs', action='store_true', dest='skip_refs',
default=False, help='Skip code reference identification'),
)
help = "Post Parse channel model"
@recocommand
def handle_noargs(self, **options):
pname = smart_decode(options.get('pname'))
cname = smart_decode(options.get('cname'))
post_process_channel(pname, cname)
|
{
"content_hash": "f4b751de3ff43ab482bae0f1727a1443",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 38.4,
"alnum_prop": 0.671875,
"repo_name": "bartdag/recodoc2",
"id": "a32f7110ed36fdaeb81660abea54e32525584436",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "recodoc2/apps/channel/management/commands/postparsechannel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5581"
},
{
"name": "HTML",
"bytes": "32211467"
},
{
"name": "Java",
"bytes": "13646"
},
{
"name": "Perl",
"bytes": "503"
},
{
"name": "Python",
"bytes": "717834"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('first_name', models.CharField(max_length=20, validators=[django.core.validators.MinLengthValidator(2), django.core.validators.RegexValidator('^[aA-zZ]+\\Z$', code='invalid', message='Enter a valid first name.')], verbose_name='First Name')),
('last_name', models.CharField(max_length=20, validators=[django.core.validators.MinLengthValidator(2), django.core.validators.RegexValidator('^[aA-zZ]+$', code='invalid', message='Enter a valid last name.')], verbose_name='Last Name')),
('name', models.CharField(max_length=100, validators=[django.core.validators.MinLengthValidator(5), django.core.validators.RegexValidator('[aA-zZ]+(?:[aA-zZ]+) [aA-zZ]+\\Z', code='invalid', message='Enter a valid name (first & last, letters only).')], verbose_name='Full Name')),
('phone', models.CharField(max_length=10, validators=[django.core.validators.MinLengthValidator(10), django.core.validators.RegexValidator(re.compile('^\\d+(?:\\d+)*\\Z', 32), code='invalid', message='Enter a valid phone number (Only Integers).')], verbose_name='Phone Number')),
('email', models.CharField(max_length=100, validators=[django.core.validators.EmailValidator(code='invalid', message='Enter a valid email.')], verbose_name='Email Address')),
('comment', models.TextField(validators=[django.core.validators.MinLengthValidator(4, message='Type at least 1 word (4 characters)')], verbose_name='Comment')),
('status', models.CharField(choices=[('r', 'Responded'), ('c', 'Closed'), ('n', 'New')], default='n', max_length=1)),
('notes', models.CharField(blank=True, max_length=200, null=True, verbose_name='Admin Notes')),
('remote_address', models.GenericIPAddressField(blank=True, null=True, verbose_name='Remote IP')),
],
options={
'get_latest_by': 'date',
},
),
]
|
{
"content_hash": "c004762fb45ca8e82b60b48fae61f3b3",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 295,
"avg_line_length": 69.68571428571428,
"alnum_prop": 0.6412464124641246,
"repo_name": "alphageek-xyz/site",
"id": "9a82ad03157301944df906076e354841ba2d4a15",
"size": "2511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "landing/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "14835"
},
{
"name": "Makefile",
"bytes": "1236"
},
{
"name": "Python",
"bytes": "145143"
}
],
"symlink_target": ""
}
|
from base import Task
from common import phases
import volume
class Create(Task):
description = 'Creating a loopback volume'
phase = phases.volume_creation
successors = [volume.Attach]
@classmethod
def run(cls, info):
import os.path
image_path = os.path.join(info.workspace, 'volume.{ext}'.format(ext=info.volume.extension))
info.volume.create(image_path)
class MoveImage(Task):
description = 'Moving volume image'
phase = phases.image_registration
@classmethod
def run(cls, info):
image_name = info.manifest.image['name'].format(**info.manifest_vars)
filename = '{image_name}.{ext}'.format(image_name=image_name, ext=info.volume.extension)
import os.path
destination = os.path.join(info.manifest.bootstrapper['workspace'], filename)
import shutil
shutil.move(info.volume.image_path, destination)
import logging
log = logging.getLogger(__name__)
log.info('The volume image has been moved to {image_path}'.format(image_path=destination))
|
{
"content_hash": "597f38e5a8eef7b73379f45a74b26c3a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 93,
"avg_line_length": 29.606060606060606,
"alnum_prop": 0.7430910951893551,
"repo_name": "brianspeir/Vanilla",
"id": "94204294c21335481be9f200d95d5a9b7a8dcbaf",
"size": "977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor/bootstrap-vz/common/tasks/loopback.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
__author__ = 'Sean WANG'
import os
import sys
import time
import copy
MAX_TIME = 1000000000
MAX_PRO = 10000
# convert the input
def strtolist(str):
start_time = int(str.split()[0])
end_time = int(str.split()[1])
return [start_time, end_time]
# check input valid
def inputisvalid(in_num, max_limit):
if in_num <= max_limit:
return True
else:
return False
# filter process as time range
def filter_process_list(process_list, time_range):
tmp_list = copy.copy(process_list)
for item in process_list:
if item[0] > time_range[1] or item[1] < time_range[0]:
tmp_list.remove(item)
return tmp_list
# get time point when there are process open or close
def filter_time_point(process_list, time_range):
tmp_list = []
for item in process_list:
if item[0] >= time_range[0]:
tmp_list.append([item[0], 1])
if item[1] <= time_range[1]:
tmp_list.append([item[1], -1])
tmp_list.sort()
return tmp_list
# read input
valid_flg = 0
time_range = []
process_nb = 0
g_process_tlist = []
# read valid input time range
while 0 == valid_flg:
str = raw_input()
tmp_str = str.split()
if len(tmp_str) != 2:
print 'Invalid input, please input time range again'
continue
time_range = strtolist(str)
if 0 <= time_range[0] <= time_range[1] and inputisvalid(time_range[1], MAX_TIME):
valid_flg = 1
else:
print 'Invalid input, please input time range again'
# read valid input process nb
valid_flg = 0
while 0 == valid_flg:
str = raw_input()
tmp_str = str.split()
if len(tmp_str) != 1:
print 'Invalid input, please input process number again'
continue
process_nb = int(str)
if inputisvalid(process_nb, MAX_PRO):
valid_flg = 1
else:
print 'Invalid input, please input process number again'
# read valid input process time range
for i in range(0, process_nb):
valid_flg = 0
while 0 == valid_flg:
str = raw_input()
tmp_str = str.split()
if len(tmp_str) != 2:
print 'Invalid input, please input %s th process time range again' % (i + 1)
continue
l_time_range = strtolist(str)
if 0 <= l_time_range[0] <= l_time_range[1] and inputisvalid(l_time_range[1], MAX_TIME):
valid_flg = 1
else:
print 'Invalid input, please input %s th process time range again' % (i + 1)
g_process_tlist.append(l_time_range)
# begin to compute max and min process number
g_process_tlist = filter_process_list(g_process_tlist, time_range)
time_point_list = filter_time_point(g_process_tlist, time_range)
# process nb == 0 ==> max = min = 0
if 0 == len(g_process_tlist):
print 0
print 0
exit()
# to record process nb of every time point when process nb change
count_list = []
count = 0
begin_value = 0
# make sure the start value of the point that time range begin
for pro_item in g_process_tlist:
if pro_item[1] >= time_range[0] > pro_item[0]:
count += 1
begin_value = count
tmp_value = time_point_list[0][0]
if time_range[0] != tmp_value:
count_list.append(begin_value)
# algorithm : record process num as time line - add when open or minus when close
count_all = 0
count_add = 0
for item in time_point_list:
if tmp_value != item[0]:
tmp_value = item[0]
count_list.append(begin_value + count_add)
count_list.append(begin_value + count_all)
begin_value += count_all
count_all = 0
count_add = 0
count_all += item[1]
if 1 == item[1]:
count_add += 1
else:
count_all += item[1]
if 1 == item[1]:
count_add += 1
count_list.append(begin_value + count_add)
if time_point_list[-1][0] != time_range[1]:
count_list.append(begin_value + count_all)
# output
print min(count_list)
print max(count_list)
|
{
"content_hash": "88919dc59ef5fb6a601ae11a0645500e",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 95,
"avg_line_length": 28.28472222222222,
"alnum_prop": 0.5924380063835011,
"repo_name": "IT-SeanWANG/CodeJam",
"id": "c493067f8516a2e0d9821f1a3ba0f2b5a11bf3f1",
"size": "4139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2016_1st/Q1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8435"
},
{
"name": "C++",
"bytes": "20597"
},
{
"name": "Java",
"bytes": "9673"
},
{
"name": "Lua",
"bytes": "6758"
},
{
"name": "Python",
"bytes": "253998"
}
],
"symlink_target": ""
}
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ComplianceResult(vim, *args, **kwargs):
'''DataObject representing the result from a ComplianceCheck'''
obj = vim.client.factory.create('ns0:ComplianceResult')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'complianceStatus' ]
optional = [ 'checkTime', 'entity', 'failure', 'profile', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
{
"content_hash": "04b37d334167297eb5a60d8304c0626e",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 124,
"avg_line_length": 32.9375,
"alnum_prop": 0.6015180265654649,
"repo_name": "xuru/pyvisdk",
"id": "e99cc118efcef3b0451f6b9428f7d8efa8f28d7b",
"size": "1055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/compliance_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
}
|
from torch.utils.data.sampler import Sampler
import math
def trainable_param(model):
training = []
for p in model.parameters():
if p.requires_grad:
training.append(p)
return training
class DistributedSequentialSampler(Sampler):
"""
A sequential sampler used in FeatureSet when get (train=false) iterator .
"""
def __init__(self, dataset, num_replicas, rank):
self.dataset = dataset
self.num_samples = int(math.floor(len(self.dataset) * 1.0 / num_replicas))
extra_samples = len(self.dataset) % num_replicas
self.epoch = 0
if extra_samples > rank:
self.num_samples += 1
self.offset = self.num_samples * rank
else:
self.offset = self.num_samples * rank + extra_samples
self.total_size = len(dataset)
def __iter__(self):
indices = list(range(self.offset, self.num_samples + self.offset))
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
{
"content_hash": "911ff1352151ddd195e2032c2f180696",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 82,
"avg_line_length": 28.4,
"alnum_prop": 0.6100352112676056,
"repo_name": "intel-analytics/analytics-zoo",
"id": "37ce7595d999dc7545806922ce0bf4a1d2cb943a",
"size": "1726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzoo/zoo/pipeline/api/torch/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "73165"
},
{
"name": "Groovy",
"bytes": "1613"
},
{
"name": "Java",
"bytes": "209136"
},
{
"name": "Jupyter Notebook",
"bytes": "24437284"
},
{
"name": "Makefile",
"bytes": "11724"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "4085490"
},
{
"name": "RobotFramework",
"bytes": "17467"
},
{
"name": "Scala",
"bytes": "3562801"
},
{
"name": "Shell",
"bytes": "413512"
}
],
"symlink_target": ""
}
|
"""Tests for resource tracker claims."""
import re
import uuid
import mock
import six
from nova.compute import claims
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.openstack.common import jsonutils
from nova.pci import pci_manager
from nova import test
from nova.tests.pci import pci_fakes
from nova.virt import hardware
class FakeResourceHandler(object):
test_called = False
usage_is_instance = False
def test_resources(self, usage, limits):
self.test_called = True
self.usage_is_itype = usage.get('name') is 'fakeitype'
return []
class DummyTracker(object):
icalled = False
rcalled = False
ext_resources_handler = FakeResourceHandler()
def __init__(self):
self.new_pci_tracker()
def abort_instance_claim(self, *args, **kwargs):
self.icalled = True
def drop_resize_claim(self, *args, **kwargs):
self.rcalled = True
def new_pci_tracker(self):
ctxt = context.RequestContext('testuser', 'testproject')
self.pci_tracker = pci_manager.PciDevTracker(ctxt)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
class ClaimTestCase(test.NoDBTestCase):
def setUp(self):
super(ClaimTestCase, self).setUp()
self.resources = self._fake_resources()
self.tracker = DummyTracker()
def _claim(self, limits=None, overhead=None, **kwargs):
numa_topology = kwargs.pop('numa_topology', None)
instance = self._fake_instance(**kwargs)
if numa_topology:
db_numa_topology = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': instance['uuid'],
'numa_topology': numa_topology.to_json()
}
else:
db_numa_topology = None
if overhead is None:
overhead = {'memory_mb': 0}
with mock.patch.object(
db, 'instance_extra_get_by_instance_uuid',
return_value=db_numa_topology):
return claims.Claim('context', instance, self.tracker,
self.resources, overhead=overhead,
limits=limits)
def _fake_instance(self, **kwargs):
instance = {
'uuid': str(uuid.uuid1()),
'memory_mb': 1024,
'root_gb': 10,
'ephemeral_gb': 5,
'vcpus': 1,
'system_metadata': {},
'numa_topology': None
}
instance.update(**kwargs)
return instance
def _fake_instance_type(self, **kwargs):
instance_type = {
'id': 1,
'name': 'fakeitype',
'memory_mb': 1,
'vcpus': 1,
'root_gb': 1,
'ephemeral_gb': 2
}
instance_type.update(**kwargs)
return instance_type
def _fake_resources(self, values=None):
resources = {
'memory_mb': 2048,
'memory_mb_used': 0,
'free_ram_mb': 2048,
'local_gb': 20,
'local_gb_used': 0,
'free_disk_gb': 20,
'vcpus': 2,
'vcpus_used': 0,
'numa_topology': hardware.VirtNUMAHostTopology(
cells=[hardware.VirtNUMATopologyCellUsage(1, [1, 2], 512),
hardware.VirtNUMATopologyCellUsage(2, [3, 4], 512)]
).to_json()
}
if values:
resources.update(values)
return resources
# TODO(lxsli): Remove once Py2.6 is deprecated
def assertRaisesRegexp(self, re_obj, e, fn, *a, **kw):
try:
fn(*a, **kw)
self.fail("Expected exception not raised")
except e as ee:
self.assertTrue(re.search(re_obj, six.text_type(ee)))
def test_memory_unlimited(self, mock_get):
self._claim(memory_mb=99999999)
def test_disk_unlimited_root(self, mock_get):
self._claim(root_gb=999999)
def test_disk_unlimited_ephemeral(self, mock_get):
self._claim(ephemeral_gb=999999)
def test_memory_with_overhead(self, mock_get):
overhead = {'memory_mb': 8}
limits = {'memory_mb': 2048}
self._claim(memory_mb=2040, limits=limits,
overhead=overhead)
def test_memory_with_overhead_insufficient(self, mock_get):
overhead = {'memory_mb': 9}
limits = {'memory_mb': 2048}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, overhead=overhead,
memory_mb=2040)
def test_memory_oversubscription(self, mock_get):
self._claim(memory_mb=4096)
def test_memory_insufficient(self, mock_get):
limits = {'memory_mb': 8192}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, memory_mb=16384)
def test_disk_oversubscription(self, mock_get):
limits = {'disk_gb': 60}
self._claim(root_gb=10, ephemeral_gb=40,
limits=limits)
def test_disk_insufficient(self, mock_get):
limits = {'disk_gb': 45}
self.assertRaisesRegexp(re.compile("disk", re.IGNORECASE),
exception.ComputeResourcesUnavailable,
self._claim, limits=limits, root_gb=10, ephemeral_gb=40)
def test_disk_and_memory_insufficient(self, mock_get):
limits = {'disk_gb': 45, 'memory_mb': 8192}
self.assertRaisesRegexp(re.compile("memory.*disk", re.IGNORECASE),
exception.ComputeResourcesUnavailable,
self._claim, limits=limits, root_gb=10, ephemeral_gb=40,
memory_mb=16384)
@pci_fakes.patch_pci_whitelist
def test_pci_pass(self, mock_get):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker.set_hvdevs([dev_dict])
claim = self._claim()
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
mock_get.return_value = objects.InstancePCIRequests(
requests=[request])
self.assertIsNone(claim._test_pci())
@pci_fakes.patch_pci_whitelist
def test_pci_fail(self, mock_get):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v1',
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker.set_hvdevs([dev_dict])
claim = self._claim()
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
mock_get.return_value = objects.InstancePCIRequests(
requests=[request])
claim._test_pci()
@pci_fakes.patch_pci_whitelist
def test_pci_pass_no_requests(self, mock_get):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker.set_hvdevs([dev_dict])
claim = self._claim()
self.assertIsNone(claim._test_pci())
def test_ext_resources(self, mock_get):
self._claim()
self.assertTrue(self.tracker.ext_resources_handler.test_called)
self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype)
def test_numa_topology_no_limit(self, mock_get):
huge_instance = hardware.VirtNUMAInstanceTopology(
cells=[hardware.VirtNUMATopologyCell(
1, set([1, 2]), 512)])
self._claim(numa_topology=huge_instance)
def test_numa_topology_fails(self, mock_get):
huge_instance = hardware.VirtNUMAInstanceTopology(
cells=[hardware.VirtNUMATopologyCell(
1, set([1, 2, 3, 4, 5]), 2048)])
limit_topo = hardware.VirtNUMALimitTopology(
cells=[hardware.VirtNUMATopologyCellLimit(
1, [1, 2], 512, cpu_limit=2, memory_limit=512),
hardware.VirtNUMATopologyCellLimit(
1, [3, 4], 512, cpu_limit=2, memory_limit=512)])
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim,
limits={'numa_topology': limit_topo.to_json()},
numa_topology=huge_instance)
def test_numa_topology_passes(self, mock_get):
huge_instance = hardware.VirtNUMAInstanceTopology(
cells=[hardware.VirtNUMATopologyCell(
1, set([1, 2]), 512)])
limit_topo = hardware.VirtNUMALimitTopology(
cells=[hardware.VirtNUMATopologyCellLimit(
1, [1, 2], 512, cpu_limit=5, memory_limit=4096),
hardware.VirtNUMATopologyCellLimit(
1, [3, 4], 512, cpu_limit=5, memory_limit=4096)])
self._claim(limits={'numa_topology': limit_topo.to_json()},
numa_topology=huge_instance)
def test_abort(self, mock_get):
claim = self._abort()
self.assertTrue(claim.tracker.icalled)
def _abort(self):
claim = None
try:
with self._claim(memory_mb=4096) as claim:
raise test.TestingException("abort")
except test.TestingException:
pass
return claim
class ResizeClaimTestCase(ClaimTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.instance = self._fake_instance()
self.get_numa_constraint_patch = None
def _claim(self, limits=None, overhead=None, **kwargs):
instance_type = self._fake_instance_type(**kwargs)
numa_constraint = kwargs.pop('numa_topology', None)
if overhead is None:
overhead = {'memory_mb': 0}
with mock.patch.object(
hardware.VirtNUMAInstanceTopology, 'get_constraints',
return_value=numa_constraint):
return claims.ResizeClaim('context', self.instance, instance_type,
{}, self.tracker, self.resources,
overhead=overhead, limits=limits)
def _set_pci_request(self, claim):
request = [{'count': 1,
'spec': [{'vendor_id': 'v', 'product_id': 'p'}],
}]
claim.instance.update(
system_metadata={'new_pci_requests': jsonutils.dumps(request)})
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_ext_resources(self, mock_get):
self._claim()
self.assertTrue(self.tracker.ext_resources_handler.test_called)
self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
claim = self._abort()
self.assertTrue(claim.tracker.rcalled)
|
{
"content_hash": "10af99a8edd83b9cb9b22d7e486c6a0e",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 78,
"avg_line_length": 36.41955835962145,
"alnum_prop": 0.5692507579038545,
"repo_name": "tianweizhang/nova",
"id": "8a4e30b1bc6aff1b214445077c4a3106cb52c3ab",
"size": "12185",
"binary": false,
"copies": "2",
"ref": "refs/heads/v0",
"path": "nova/tests/compute/test_claims.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16708379"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "259645"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.contrib.auth.models import User
class ChangePasswordTest(TestCase):
urls = "pinax.apps.account.tests.account_urls"
def setUp(self):
self.old_installed_apps = settings.INSTALLED_APPS
# remove django-mailer to properly test for outbound email
if "mailer" in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.remove("mailer")
self.EMAIL_AUTHENTICATION = getattr(settings, "ACCOUNT_EMAIL_AUTHENTICATION", False)
User.objects.create_user("bob", "bob@example.com", "abc123")
def tearDown(self):
settings.INSTALLED_APPS = self.old_installed_apps
def test_password_change_view(self):
"""
Test GET on /password_change/
"""
response = self.client.get(reverse("acct_passwd"))
self.assertEquals(response.status_code, 302)
def test_authenticated_password_change_view(self):
"""
Error if user can not login and get to the password change view
"""
data = {
"password": "abc123",
}
if self.EMAIL_AUTHENTICATION:
data["email"] = "bob@example.com"
else:
data["username"] = "bob"
response = self.client.post(reverse("acct_login"), data)
self.assertEquals(response.status_code, 302)
response = self.client.get(reverse("acct_passwd"))
self.assertEquals(response.status_code, 200)
def test_change_password(self):
"""
Error if password can not be changed
"""
bob = User.objects.get(email="bob@example.com")
data = {
"oldpassword": "abc123",
"password1": "def456",
"password2": "def456",
}
response = self.client.post(reverse("acct_passwd"), data)
self.assertEquals(response.status_code, 302)
def test_signal_password_change(self):
"""
Two tests should pass here if the signal is fired.
"""
from pinax.apps.account.signals import password_changed
bob = User.objects.get(email="bob@example.com")
def receiver(sender, user):
self.assertEquals(user, bob)
self.assert_(user.check_password("ghi789"))
password_changed.connect(receiver)
data = {
"oldpassword": "def456",
"password1": "ghi789",
"password2": "ghi789",
}
response = self.client.post(reverse("acct_passwd"), data)
|
{
"content_hash": "6a608a25dbf0a88b52212b8ebf8ee1dd",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 92,
"avg_line_length": 33.46835443037975,
"alnum_prop": 0.5922844175491679,
"repo_name": "espenak/pinax-oldversion-backup",
"id": "ceeb0b5ee023e9e09acb99a42c7755920f41d0ba",
"size": "2644",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pinax/apps/account/tests/test_change_password.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""Trash a trashable card."""
from csrv.model.actions import action
from csrv.model import cost
from csrv.model import errors
class Trash(action.Action):
DESCRIPTION = 'Trash a card'
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
action.Action.resolve(
self,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
if self.player == self.game.runner:
self.card.is_faceup = True
self.card.trash()
@property
def description(self):
return 'Trash %s' % self.card.NAME
|
{
"content_hash": "20456f18c313f60dd855afc1095fa82a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 24.52173913043478,
"alnum_prop": 0.6879432624113475,
"repo_name": "mrroach/CentralServer",
"id": "597e9752499a9e94fa5f985d897327e2bd6c9ffe",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csrv/model/actions/trash.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23340"
},
{
"name": "JavaScript",
"bytes": "133607"
},
{
"name": "Python",
"bytes": "433045"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.shortcuts import get_object_or_404
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.status import HTTP_201_CREATED
from rest_framework.viewsets import ModelViewSet
from ..conf import settings as zeus_settings
from ..models import Buildset
from ..models import Build
from ..models import Command
from ..models import Project
from ..tasks import do_build
from .exceptions import ConflictError
from .serializers import BuildsetSerializer
from .serializers import BuildDetailSerializer
from .serializers import ProjectDetailSerializer
from .serializers import ProjectSerializer
from ..tasks import do_buildset
import datetime
import time
class BaseApiMixin(object):
pagination_serializer_class = zeus_settings.API_PAGINATION_SERIALIZER_CLASS
paginate_by = zeus_settings.API_PAGINATE_BY
def dispatch(self, *args, **kwargs):
if settings.DEBUG and zeus_settings.API_DELAY:
# TODO: note user (log) that API calls are artificially delayed
time.sleep(zeus_settings.API_DELAY)
return super(BaseApiMixin, self).dispatch(*args, **kwargs)
class BaseViewSet(BaseApiMixin, ModelViewSet):
pass
# ==============================================================================
# Project
# ==============================================================================
class ProjectViewSet(BaseViewSet):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
project_list = ProjectViewSet.as_view({'get': 'list', 'post': 'create'})
class ProjectDetail(BaseApiMixin, generics.RetrieveAPIView):
serializer_class = ProjectDetailSerializer
model = Project
lookup_field = 'name'
project_detail = ProjectDetail.as_view()
# ==============================================================================
# Buildset
# ==============================================================================
class BuildsetViewSet(BaseViewSet):
serializer_class = BuildsetSerializer
model = Buildset
def get_queryset(self):
return Buildset.objects.filter(project__name=self.kwargs['name'])
def get_object_filters(self):
return {'number': self.kwargs['buildset_no']}
def get_object(self):
queryset = self.get_queryset()
filters = self.get_object_filters()
return get_object_or_404(queryset, **filters)
def pre_save(self, obj):
obj.project = Project.objects.get(name=self.kwargs['name'])
def post_save(self, obj, created):
if created:
do_buildset.delay(self.object)
buildset_list = BuildsetViewSet.as_view({
'get': 'list',
'post': 'create',
})
buildset_detail = BuildsetViewSet.as_view({
'get': 'retrieve',
})
# ==============================================================================
# Build
# ==============================================================================
class BuildViewSet(BaseViewSet):
serializer_class = BuildDetailSerializer
model = Build
def get_queryset(self):
return Build.objects.filter(buildset__project__name=self.kwargs['name'])
def get_object_filters(self):
return {
'buildset__number': self.kwargs['buildset_no'],
'number': self.kwargs['build_no'],
}
def get_object(self):
if not hasattr(self, '_build'):
queryset = self.get_queryset()
filters = self.get_object_filters()
self._build = get_object_or_404(queryset, **filters)
return self._build
def update(self, request, *args, **kwargs):
"""
Actually **restarts** a build *in place* - it doesn't create new build
and the information for current build is lost.
"""
build = self.get_object()
build.clear_output()
if not build.is_finished():
raise ConflictError('Build is still running and cannot be restarted')
Command.objects.filter(build=build).delete()
build.created_at = datetime.datetime.now()
build.finished_at = None
build.save(force_update=True)
do_build.delay(build, build.buildset.project.get_builder())
return super(BuildViewSet, self).retrieve(request, *args, **kwargs)
build_detail = BuildViewSet.as_view({'get': 'retrieve', 'put': 'update'})
|
{
"content_hash": "5c0993bd01e3e7daf303313a8efb45d1",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 81,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6086956521739131,
"repo_name": "lukaszb/zeusci",
"id": "936fc1f8aa0a1ace941c7a355556acff06650926",
"size": "4370",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "zeusci/zeus/api/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11819"
},
{
"name": "JavaScript",
"bytes": "13360"
},
{
"name": "Python",
"bytes": "112872"
},
{
"name": "Ruby",
"bytes": "1262"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
}
|
from django import template
from django.contrib.staticfiles import finders
#from django.utils.html import escape
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def includestatic(path, encoding='UTF-8'):
file_path = finders.find(path)
with open(file_path, "r", encoding=encoding) as f:
string = f.read()
#return escape(string
return mark_safe(string)
|
{
"content_hash": "0ba76e899e35715cbcb1c62b56cf2a6f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 51,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.7682926829268293,
"repo_name": "amitdhiman000/MyOffers",
"id": "4fd78e3b51eec645c78b798c3784c7e3f041f0fb",
"size": "410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MyOffers/templatetags/includestatic.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "26343"
},
{
"name": "HTML",
"bytes": "177970"
},
{
"name": "JavaScript",
"bytes": "943727"
},
{
"name": "Python",
"bytes": "196685"
},
{
"name": "Shell",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "50608"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import inspect
def arginfo(callable):
"""Get information about the arguments of a callable.
Returns a :class:`inspect.ArgSpec` object as for
:func:`inspect.getargspec`.
:func:`inspect.getargspec` returns information about the arguments
of a function. arginfo also works for classes and instances with a
__call__ defined. Unlike getargspec, arginfo treats bound methods
like functions, so that the self argument is not reported.
arginfo returns ``None`` if given something that is not callable.
arginfo caches previous calls (except for instances with a
__call__), making calling it repeatedly cheap.
This was originally inspired by the pytest.core varnames() function,
but has been completely rewritten to handle class constructors,
also show other getarginfo() information, and for readability.
"""
try:
return arginfo._cache[callable]
except KeyError:
# Try to get __call__ function from the cache.
try:
return arginfo._cache[callable.__call__]
except (AttributeError, KeyError):
pass
func, cache_key, remove_self = get_callable_info(callable)
if func is None:
return None
result = inspect.getargspec(func)
if remove_self:
args = result.args[1:]
result = inspect.ArgSpec(args, result.varargs, result.keywords,
result.defaults)
arginfo._cache[cache_key] = result
return result
def is_cached(callable):
if callable in arginfo._cache:
return True
return callable.__call__ in arginfo._cache
arginfo._cache = {}
arginfo.is_cached = is_cached
def get_callable_info(callable):
"""Get information about a callable.
Returns a tuple of:
* actual function/method that can be inspected with inspect.getargspec.
* cache key to use to cache results.
* whether to remove self or not.
Note that in Python 3, __init__ is not a method, but we still
want to remove self from it.
If not inspectable (None, None, False) is returned.
"""
if inspect.isfunction(callable):
return callable, callable, False
if inspect.ismethod(callable):
return callable, callable, True
if inspect.isclass(callable):
return get_class_init(callable), callable, True
try:
callable = getattr(callable, '__call__')
return callable, callable, True
except AttributeError:
return None, None, False
def fake_empty_init():
pass # pragma: nocoverage
class Dummy(object):
pass
WRAPPER_DESCRIPTOR = Dummy.__init__
def get_class_init(class_):
try:
func = class_.__init__
except AttributeError: # pragma: no cover
# Python 2 classic class without __init__.
return fake_empty_init
# If this is a new-style class and there is no __init__
# defined, in CPython (but not PyPy) this is a WRAPPER_DESCRIPTOR.
if func is WRAPPER_DESCRIPTOR:
return fake_empty_init
# A PyPy class without __init__ needs to be handled specially,
# as the default __init__ in this case falsely reports varargs
# and keywords.
if is_pypy_default_init(func):
return fake_empty_init # pragma: nocoverage
return func
def is_pypy_default_init(func):
try:
return func.func_code.co_name == 'descr__init__'
except AttributeError: # pragma: nocoverage
return False
|
{
"content_hash": "5493c27c395e24a25709bfdc59f6ab0d",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 75,
"avg_line_length": 29.752136752136753,
"alnum_prop": 0.6627405917839702,
"repo_name": "taschini/reg",
"id": "16ec82afd6e448a5939fb687515f8b891a00858a",
"size": "3481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reg/arginfo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "114763"
}
],
"symlink_target": ""
}
|
"""
Validation of SDFkt using a collection of unittests
author: Rinse Wester
"""
import unittest
from tests.testhsdfgraph import HSDFGraphTestCase
from tests.testsdfgraph import SDFGraphTestCase
from tests.testcsdfgraph import CSDFGraphTestCase
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(HSDFGraphTestCase))
suite.addTest(unittest.makeSuite(SDFGraphTestCase))
suite.addTest(unittest.makeSuite(CSDFGraphTestCase))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
|
{
"content_hash": "18cdb88012b84ddf3a178cf58c79fe0d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 56,
"avg_line_length": 25.391304347826086,
"alnum_prop": 0.7671232876712328,
"repo_name": "rinsewester/SDFkit",
"id": "b69a173c526602cd6c8349a5e4183478d9866c35",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testall.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Haskell",
"bytes": "15706"
},
{
"name": "Makefile",
"bytes": "241"
},
{
"name": "Python",
"bytes": "159137"
},
{
"name": "Shell",
"bytes": "196"
},
{
"name": "TeX",
"bytes": "251146"
}
],
"symlink_target": ""
}
|
""" The base class for all test case controllers.
Copyright (c) 2014-2022 Kenn Takara
See LICENSE for details
"""
import logging
import time
from twisted.internet import reactor
from twisted.internet.endpoints import connectProtocol, clientFromString
from fixtest.base import FixtestTestInterruptedError, FixtestTimeoutError
from fixtest.base.utils import log_text
class TestCaseController:
""" This is the base class that is used to run an individual
test case.
Attributes:
testcase_id:
description:
exit_value: The value returned when exiting from the
command line.
"""
def __init__(self, **kwargs):
""" TestCaseController initialization
Args:
config:
"""
# pylint: disable=unused-argument
self.testcase_id = 'Enter your testcase id'
self.description = 'Enter your testcase description'
self.test_status = 'test: not-started'
self.exit_value = 1
self._is_cancelled = False
self._logger = logging.getLogger(__name__)
def servers(self):
""" Returns the dict of servers that need to be started
indexed by server name.
This is the responsiblity of the subclass.
"""
raise NotImplementedError()
def clients(self):
""" Returns the dict of clients that need to be started
indexed by client name.
This is the responsibility of the subclass.
"""
raise NotImplementedError()
def _execute_test(self):
""" Runs the test. This is the entrypoint from the
TestCaseController.
"""
# pylint: disable=broad-except
try:
if not self.pre_test():
self.test_status = 'test: failed pre-test conditions'
return
self.test_status = 'test: in-progress'
self.setup()
self.run()
self.teardown()
self.test_status = 'ok'
self.exit_value = 0
except AssertionError as err:
self.test_status = 'fail: assert failed : ' + str(err)
except FixtestTestInterruptedError:
self.test_status = 'fail: test cancelled'
except FixtestTimeoutError as err:
self.test_status = 'fail: timeout : ' + str(err)
except Exception:
self.test_status = 'fail: exception'
self._logger.exception('fail: exception')
finally:
if reactor.running:
reactor.callFromThread(reactor.stop)
def pre_test(self):
""" Override for any pre test checks.
Returns: Return True if everything is ok. Return
False to stop the test.
"""
return True
def setup(self):
""" Override this to implement any setup
"""
def run(self):
""" Override this to implement the actual test.
This is the responsibility of the subclass.
"""
raise NotImplementedError()
def teardown(self):
""" Override this to implement any cleanup
Note that this only runs in the normal case. In the
case of an exception, the connections will be torn
down by shutting the reactor down.
"""
def cancel_test(self):
""" Cancels the test. Cancels any operations.
Override this to take care of any cleanup/cancelling
that needs to be done.
"""
self._is_cancelled = True
for node in self.clients().values():
node['node'].cancel()
for node in self.servers().values():
node['factory'].cancel()
def _start_client(self, client):
""" This is a helper function that needs to get called
on the reactor thread.
Arguments:
client: The client dict().
"""
# pylint: disable=consider-using-f-string
log_text(self._logger.info, None,
'client:{0} attempting {1}:{2}'.format(
client['name'],
client['host'],
client['port']))
str_send = f"tcp:{client['host']}:{client['port']}:timeout=10"
endpoint = clientFromString(reactor, str_send)
node = client['node']
deferred = connectProtocol(endpoint, node)
deferred.addCallbacks(node.client_success,
callbackArgs=(client,),
errback=node.client_failure,
errbackArgs=(client,))
def wait_for_client_connections(self, timeout):
""" Initiate and wait for all client connections to connect.
Arguments:
timeout:
Raises:
FixtestTimeoutError
FixtestTestInterruptedError
"""
for client in self.clients().values():
reactor.callFromThread(self._start_client, client)
# Now have to wait until all clients are connected
per_sec = 5
success = True
for _ in range(timeout * per_sec):
if self._is_cancelled:
raise FixtestTestInterruptedError('test cancelled')
success = True
for client in self.clients().values():
if client.get('error', None) is not None:
raise client['error']
if client.get('connected', '') == '':
success = False
break
if success:
break
time.sleep(1.0/per_sec)
if not success:
raise FixtestTimeoutError('waiting for clients to connect')
def wait_for_server_connections(self, timeout):
""" Wait for all server connections to connect.
Raises:
FixtestTestInterruptedError
FixtestTimeoutError
"""
per_sec = 5
for _ in range(timeout * per_sec):
if self._is_cancelled:
raise FixtestTestInterruptedError('test cancelled')
success = True
for server in self.servers().values():
if server.get('error', None) is not None:
raise server['error']
if len(server['factory'].servers) == 0:
success = False
break
if success:
break
time.sleep(1.0/per_sec)
if not success:
raise FixtestTimeoutError("waitng for servers to connect")
|
{
"content_hash": "bd8dd9fefd35a13bca53f7d1b7fda36e",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 73,
"avg_line_length": 31.42654028436019,
"alnum_prop": 0.5496908460262404,
"repo_name": "kennt/fixtest",
"id": "4a2262620533a6e3576d7bee0559ce6ecba80a15",
"size": "6631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixtest/base/controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "154248"
}
],
"symlink_target": ""
}
|
import datetime
import unittest2
import json
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from datafeeds.datafeed_fms_api import DatafeedFMSAPI
from helpers.match_helper import MatchHelper
from helpers.match_manipulator import MatchManipulator
from models.event import Event
from models.match import Match
class TestFMSAPIMatchTiebreaker(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_app_identity_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_taskqueue_stub(root_path=".")
ndb.get_context().clear_cache() # Prevent data from leaking between tests
def tearDown(self):
self.testbed.deactivate()
def test_2017flwp_sequence(self):
event = Event(
id='2017flwp',
event_short='flwp',
year=2017,
event_type_enum=0,
timezone_id='America/New_York'
)
event.put()
event_code = 'flwp'
file_prefix = 'frc-api-response/v2.0/2017/schedule/{}/playoff/hybrid/'.format(event_code)
context = ndb.get_context()
result = context.urlfetch('https://www.googleapis.com/storage/v1/b/bucket/o?bucket=tbatv-prod-hrd.appspot.com&prefix={}'.format(file_prefix)).get_result()
for item in json.loads(result.content)['items']:
filename = item['name']
time_str = filename.replace(file_prefix, '').replace('.json', '').strip()
file_time = datetime.datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S.%f")
query_time = file_time + datetime.timedelta(seconds=30)
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=query_time).getMatches('2017{}'.format(event_code)), run_post_update_hook=False)
MatchHelper.deleteInvalidMatches(event.matches, event)
sf_matches = Match.query(Match.event == ndb.Key(Event, '2017flwp'), Match.comp_level == 'sf').fetch()
self.assertEqual(len(sf_matches), 7)
self.assertEqual(Match.get_by_id('2017flwp_sf1m1').alliances['red']['score'], 305)
self.assertEqual(Match.get_by_id('2017flwp_sf1m1').alliances['blue']['score'], 255)
self.assertEqual(Match.get_by_id('2017flwp_sf1m1').score_breakdown['red']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017flwp_sf1m1').score_breakdown['blue']['totalPoints'], 255)
self.assertEqual(Match.get_by_id('2017flwp_sf1m2').alliances['red']['score'], 165)
self.assertEqual(Match.get_by_id('2017flwp_sf1m2').alliances['blue']['score'], 258)
self.assertEqual(Match.get_by_id('2017flwp_sf1m2').score_breakdown['red']['totalPoints'], 165)
self.assertEqual(Match.get_by_id('2017flwp_sf1m2').score_breakdown['blue']['totalPoints'], 258)
self.assertEqual(Match.get_by_id('2017flwp_sf1m3').alliances['red']['score'], 255)
self.assertEqual(Match.get_by_id('2017flwp_sf1m3').alliances['blue']['score'], 255)
self.assertEqual(Match.get_by_id('2017flwp_sf1m3').score_breakdown['red']['totalPoints'], 255)
self.assertEqual(Match.get_by_id('2017flwp_sf1m3').score_breakdown['blue']['totalPoints'], 255)
self.assertEqual(Match.get_by_id('2017flwp_sf1m4').alliances['red']['score'], 255)
self.assertEqual(Match.get_by_id('2017flwp_sf1m4').alliances['blue']['score'], 255)
self.assertEqual(Match.get_by_id('2017flwp_sf1m4').score_breakdown['red']['totalPoints'], 255)
self.assertEqual(Match.get_by_id('2017flwp_sf1m4').score_breakdown['blue']['totalPoints'], 255)
self.assertEqual(Match.get_by_id('2017flwp_sf1m5').alliances['red']['score'], 165)
self.assertEqual(Match.get_by_id('2017flwp_sf1m5').alliances['blue']['score'], 263)
self.assertEqual(Match.get_by_id('2017flwp_sf1m5').score_breakdown['red']['totalPoints'], 165)
self.assertEqual(Match.get_by_id('2017flwp_sf1m5').score_breakdown['blue']['totalPoints'], 263)
def test_2017flwp(self):
event = Event(
id='2017flwp',
event_short='flwp',
year=2017,
event_type_enum=0,
timezone_id='America/New_York'
)
event.put()
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=datetime.datetime(2017, 3, 04, 21, 22)).getMatches('2017flwp'))
MatchHelper.deleteInvalidMatches(event.matches, event)
sf_matches = Match.query(Match.event == ndb.Key(Event, '2017flwp'), Match.comp_level == 'sf').fetch()
self.assertEqual(len(sf_matches), 5)
old_match = Match.get_by_id('2017flwp_sf1m3')
self.assertNotEqual(old_match, None)
self.assertEqual(old_match.alliances['red']['score'], 255)
self.assertEqual(old_match.alliances['blue']['score'], 255)
self.assertEqual(old_match.score_breakdown['red']['totalPoints'], 255)
self.assertEqual(old_match.score_breakdown['blue']['totalPoints'], 255)
ndb.get_context().clear_cache() # Prevent data from leaking between tests
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=datetime.datetime(2017, 3, 04, 21, 35)).getMatches('2017flwp'))
MatchHelper.deleteInvalidMatches(event.matches, event)
sf_matches = Match.query(Match.event == ndb.Key(Event, '2017flwp'), Match.comp_level == 'sf').fetch()
self.assertEqual(len(sf_matches), 6)
new_match = Match.get_by_id('2017flwp_sf1m3')
self.assertNotEqual(new_match, None)
self.assertEqual(old_match.alliances, new_match.alliances)
self.assertEqual(old_match.score_breakdown, new_match.score_breakdown)
tiebreaker_match = Match.get_by_id('2017flwp_sf1m4')
self.assertNotEqual(tiebreaker_match, None)
self.assertEqual(tiebreaker_match.alliances['red']['score'], 165)
self.assertEqual(tiebreaker_match.alliances['blue']['score'], 263)
self.assertEqual(tiebreaker_match.score_breakdown['red']['totalPoints'], 165)
self.assertEqual(tiebreaker_match.score_breakdown['blue']['totalPoints'], 263)
def test_2017pahat(self):
event = Event(
id='2017pahat',
event_short='pahat',
year=2017,
event_type_enum=0,
timezone_id='America/New_York'
)
event.put()
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=datetime.datetime(2017, 3, 05, 20, 45)).getMatches('2017pahat'))
MatchHelper.deleteInvalidMatches(event.matches, event)
f_matches = Match.query(Match.event == ndb.Key(Event, '2017pahat'), Match.comp_level == 'f').fetch()
self.assertEqual(len(f_matches), 3)
old_match = Match.get_by_id('2017pahat_f1m2')
self.assertNotEqual(old_match, None)
self.assertEqual(old_match.alliances['red']['score'], 255)
self.assertEqual(old_match.alliances['blue']['score'], 255)
self.assertEqual(old_match.score_breakdown['red']['totalPoints'], 255)
self.assertEqual(old_match.score_breakdown['blue']['totalPoints'], 255)
ndb.get_context().clear_cache() # Prevent data from leaking between tests
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=datetime.datetime(2017, 3, 05, 21, 02)).getMatches('2017pahat'))
MatchHelper.deleteInvalidMatches(event.matches, event)
f_matches = Match.query(Match.event == ndb.Key(Event, '2017pahat'), Match.comp_level == 'f').fetch()
self.assertEqual(len(f_matches), 4)
new_match = Match.get_by_id('2017pahat_f1m2')
self.assertNotEqual(new_match, None)
self.assertEqual(old_match.alliances, new_match.alliances)
self.assertEqual(old_match.score_breakdown, new_match.score_breakdown)
tiebreaker_match = Match.get_by_id('2017pahat_f1m4')
self.assertNotEqual(tiebreaker_match, None)
self.assertEqual(tiebreaker_match.alliances['red']['score'], 240)
self.assertEqual(tiebreaker_match.alliances['blue']['score'], 235)
self.assertEqual(tiebreaker_match.score_breakdown['red']['totalPoints'], 240)
self.assertEqual(tiebreaker_match.score_breakdown['blue']['totalPoints'], 235)
def test_2017scmb_sequence(self):
event = Event(
id='2017scmb',
event_short='scmb',
year=2017,
event_type_enum=0,
timezone_id='America/New_York'
)
event.put()
event_code = 'scmb'
file_prefix = 'frc-api-response/v2.0/2017/schedule/{}/playoff/hybrid/'.format(event_code)
context = ndb.get_context()
result = context.urlfetch('https://www.googleapis.com/storage/v1/b/bucket/o?bucket=tbatv-prod-hrd.appspot.com&prefix={}'.format(file_prefix)).get_result()
for item in json.loads(result.content)['items']:
filename = item['name']
time_str = filename.replace(file_prefix, '').replace('.json', '').strip()
file_time = datetime.datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S.%f")
query_time = file_time + datetime.timedelta(seconds=30)
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=query_time).getMatches('2017{}'.format(event_code)), run_post_update_hook=False)
MatchHelper.deleteInvalidMatches(event.matches, event)
qf_matches = Match.query(Match.event == ndb.Key(Event, '2017scmb'), Match.comp_level == 'qf').fetch()
self.assertEqual(len(qf_matches), 11)
sf_matches = Match.query(Match.event == ndb.Key(Event, '2017scmb'), Match.comp_level == 'sf').fetch()
self.assertEqual(len(sf_matches), 4)
f_matches = Match.query(Match.event == ndb.Key(Event, '2017scmb'), Match.comp_level == 'f').fetch()
self.assertEqual(len(f_matches), 3)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').alliances['red']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').alliances['blue']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').score_breakdown['red']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').score_breakdown['blue']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').alliances['red']['score'], 213)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').alliances['blue']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').score_breakdown['red']['totalPoints'], 213)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').score_breakdown['blue']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m3').alliances['red']['score'], 312)
self.assertEqual(Match.get_by_id('2017scmb_qf4m3').alliances['blue']['score'], 255)
self.assertEqual(Match.get_by_id('2017scmb_qf4m3').score_breakdown['red']['totalPoints'], 312)
self.assertEqual(Match.get_by_id('2017scmb_qf4m3').score_breakdown['blue']['totalPoints'], 255)
self.assertEqual(Match.get_by_id('2017scmb_qf4m4').alliances['red']['score'], 310)
self.assertEqual(Match.get_by_id('2017scmb_qf4m4').alliances['blue']['score'], 306)
self.assertEqual(Match.get_by_id('2017scmb_qf4m4').score_breakdown['red']['totalPoints'], 310)
self.assertEqual(Match.get_by_id('2017scmb_qf4m4').score_breakdown['blue']['totalPoints'], 306)
def test_2017scmb(self):
event = Event(
id='2017scmb',
event_short='scmb',
year=2017,
event_type_enum=0,
timezone_id='America/New_York'
)
event.put()
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=datetime.datetime(2017, 3, 04, 19, 17)).getMatches('2017scmb'))
MatchHelper.deleteInvalidMatches(event.matches, event)
qf_matches = Match.query(Match.event == ndb.Key(Event, '2017scmb'), Match.comp_level == 'qf').fetch()
self.assertEqual(len(qf_matches), 12)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').alliances['red']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').alliances['blue']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').score_breakdown['red']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').score_breakdown['blue']['totalPoints'], 305)
ndb.get_context().clear_cache() # Prevent data from leaking between tests
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=datetime.datetime(2017, 3, 04, 19, 50)).getMatches('2017scmb'))
MatchHelper.deleteInvalidMatches(event.matches, event)
qf_matches = Match.query(Match.event == ndb.Key(Event, '2017scmb'), Match.comp_level == 'qf').fetch()
self.assertEqual(len(qf_matches), 12)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').alliances['red']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').alliances['blue']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').score_breakdown['red']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').score_breakdown['blue']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').alliances['red']['score'], 213)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').alliances['blue']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').score_breakdown['red']['totalPoints'], 213)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').score_breakdown['blue']['totalPoints'], 305)
ndb.get_context().clear_cache() # Prevent data from leaking between tests
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=datetime.datetime(2017, 3, 04, 20, 12)).getMatches('2017scmb'))
MatchHelper.deleteInvalidMatches(event.matches, event)
qf_matches = Match.query(Match.event == ndb.Key(Event, '2017scmb'), Match.comp_level == 'qf').fetch()
self.assertEqual(len(qf_matches), 12)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').alliances['red']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').alliances['blue']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').score_breakdown['red']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').score_breakdown['blue']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').alliances['red']['score'], 213)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').alliances['blue']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').score_breakdown['red']['totalPoints'], 213)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').score_breakdown['blue']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m3').alliances['red']['score'], 312)
self.assertEqual(Match.get_by_id('2017scmb_qf4m3').alliances['blue']['score'], 255)
self.assertEqual(Match.get_by_id('2017scmb_qf4m3').score_breakdown['red']['totalPoints'], 312)
self.assertEqual(Match.get_by_id('2017scmb_qf4m3').score_breakdown['blue']['totalPoints'], 255)
ndb.get_context().clear_cache() # Prevent data from leaking between tests
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=datetime.datetime(2017, 3, 04, 20, 48)).getMatches('2017scmb'))
MatchHelper.deleteInvalidMatches(event.matches, event)
qf_matches = Match.query(Match.event == ndb.Key(Event, '2017scmb'), Match.comp_level == 'qf').fetch()
self.assertEqual(len(qf_matches), 13)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').alliances['red']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').alliances['blue']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').score_breakdown['red']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m1').score_breakdown['blue']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').alliances['red']['score'], 213)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').alliances['blue']['score'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').score_breakdown['red']['totalPoints'], 213)
self.assertEqual(Match.get_by_id('2017scmb_qf4m2').score_breakdown['blue']['totalPoints'], 305)
self.assertEqual(Match.get_by_id('2017scmb_qf4m3').alliances['red']['score'], 312)
self.assertEqual(Match.get_by_id('2017scmb_qf4m3').alliances['blue']['score'], 255)
self.assertEqual(Match.get_by_id('2017scmb_qf4m3').score_breakdown['red']['totalPoints'], 312)
self.assertEqual(Match.get_by_id('2017scmb_qf4m3').score_breakdown['blue']['totalPoints'], 255)
self.assertEqual(Match.get_by_id('2017scmb_qf4m4').alliances['red']['score'], 310)
self.assertEqual(Match.get_by_id('2017scmb_qf4m4').alliances['blue']['score'], 306)
self.assertEqual(Match.get_by_id('2017scmb_qf4m4').score_breakdown['red']['totalPoints'], 310)
self.assertEqual(Match.get_by_id('2017scmb_qf4m4').score_breakdown['blue']['totalPoints'], 306)
def test_2017ncwin(self):
event = Event(
id='2017ncwin',
event_short='ncwin',
year=2017,
event_type_enum=0,
timezone_id='America/New_York'
)
event.put()
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=datetime.datetime(2017, 3, 05, 21, 2)).getMatches('2017ncwin'))
MatchHelper.deleteInvalidMatches(event.matches, event)
sf_matches = Match.query(Match.event == ndb.Key(Event, '2017ncwin'), Match.comp_level == 'sf').fetch()
self.assertEqual(len(sf_matches), 6)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').alliances['red']['score'], 265)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').alliances['blue']['score'], 150)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').score_breakdown['red']['totalPoints'], 265)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').score_breakdown['blue']['totalPoints'], 150)
ndb.get_context().clear_cache() # Prevent data from leaking between tests
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=datetime.datetime(2017, 3, 05, 21, 30)).getMatches('2017ncwin'))
MatchHelper.deleteInvalidMatches(event.matches, event)
sf_matches = Match.query(Match.event == ndb.Key(Event, '2017ncwin'), Match.comp_level == 'sf').fetch()
self.assertEqual(len(sf_matches), 6)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').alliances['red']['score'], 265)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').alliances['blue']['score'], 150)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').score_breakdown['red']['totalPoints'], 265)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').score_breakdown['blue']['totalPoints'], 150)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m2').alliances['red']['score'], 205)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m2').alliances['blue']['score'], 205)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m2').score_breakdown['red']['totalPoints'], 205)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m2').score_breakdown['blue']['totalPoints'], 205)
ndb.get_context().clear_cache() # Prevent data from leaking between tests
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=datetime.datetime(2017, 3, 05, 21, 35)).getMatches('2017ncwin'))
MatchHelper.deleteInvalidMatches(event.matches, event)
sf_matches = Match.query(Match.event == ndb.Key(Event, '2017ncwin'), Match.comp_level == 'sf').fetch()
self.assertEqual(len(sf_matches), 6)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').alliances['red']['score'], 265)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').alliances['blue']['score'], 150)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').score_breakdown['red']['totalPoints'], 265)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').score_breakdown['blue']['totalPoints'], 150)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m2').alliances['red']['score'], 205)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m2').alliances['blue']['score'], 205)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m2').score_breakdown['red']['totalPoints'], 205)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m2').score_breakdown['blue']['totalPoints'], 205)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m3').alliances['red']['score'], 145)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m3').alliances['blue']['score'], 265)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m3').score_breakdown['red']['totalPoints'], 145)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m3').score_breakdown['blue']['totalPoints'], 265)
ndb.get_context().clear_cache() # Prevent data from leaking between tests
MatchManipulator.createOrUpdate(DatafeedFMSAPI('v2.0', sim_time=datetime.datetime(2017, 3, 05, 21, 51)).getMatches('2017ncwin'))
MatchHelper.deleteInvalidMatches(event.matches, event)
sf_matches = Match.query(Match.event == ndb.Key(Event, '2017ncwin'), Match.comp_level == 'sf').fetch()
self.assertEqual(len(sf_matches), 7)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').alliances['red']['score'], 265)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').alliances['blue']['score'], 150)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').score_breakdown['red']['totalPoints'], 265)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m1').score_breakdown['blue']['totalPoints'], 150)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m2').alliances['red']['score'], 205)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m2').alliances['blue']['score'], 205)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m2').score_breakdown['red']['totalPoints'], 205)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m2').score_breakdown['blue']['totalPoints'], 205)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m3').alliances['red']['score'], 145)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m3').alliances['blue']['score'], 265)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m3').score_breakdown['red']['totalPoints'], 145)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m3').score_breakdown['blue']['totalPoints'], 265)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m4').alliances['red']['score'], 180)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m4').alliances['blue']['score'], 305)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m4').score_breakdown['red']['totalPoints'], 180)
self.assertEqual(Match.get_by_id('2017ncwin_sf2m4').score_breakdown['blue']['totalPoints'], 305)
|
{
"content_hash": "a14654f0b43b4d584493f617cf6850a3",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 162,
"avg_line_length": 61.56233421750663,
"alnum_prop": 0.6637511310267569,
"repo_name": "fangeugene/the-blue-alliance",
"id": "a07ae32017d55362ad8f3226d3d14e40d7a94d6f",
"size": "23209",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_fms_api_match_tiebreaker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "299313"
},
{
"name": "Dockerfile",
"bytes": "1815"
},
{
"name": "HTML",
"bytes": "5829913"
},
{
"name": "JavaScript",
"bytes": "516241"
},
{
"name": "Less",
"bytes": "42810"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Pug",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "2857775"
},
{
"name": "Ruby",
"bytes": "4677"
},
{
"name": "Shell",
"bytes": "17229"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Comment'
db.create_table(u'core_comment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('aoi', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.AOI'])),
('text', self.gf('django.db.models.fields.CharField')(max_length=200)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'core', ['Comment'])
def backwards(self, orm):
# Deleting model 'Comment'
db.delete_table(u'core_comment')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.aoi': {
'Meta': {'object_name': 'AOI'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'analyst': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aois'", 'to': u"orm['core.Job']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'polygon': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '5', 'max_length': '1'}),
'properties': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'reviewers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'aoi_reviewers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Unassigned'", 'max_length': '15'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.comment': {
'Meta': {'object_name': 'Comment'},
'aoi': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AOI']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'core.job': {
'Meta': {'ordering': "('-created_at',)", 'object_name': 'Job'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'analysts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'analysts'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'feature_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['maps.FeatureType']", 'null': 'True', 'blank': 'True'}),
'grid': ('django.db.models.fields.CharField', [], {'default': "'usng'", 'max_length': '5'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.Map']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'progress': ('django.db.models.fields.SmallIntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project'", 'to': u"orm['core.Project']"}),
'properties': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'reviewers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reviewers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.project': {
'Meta': {'ordering': "('-created_at',)", 'object_name': 'Project'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'contributors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'contributors'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project_admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'project_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'project_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'properties': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maps.featuretype': {
'Meta': {'object_name': 'FeatureType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'properties': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'style': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'maps.map': {
'Meta': {'object_name': 'Map'},
'center_x': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'center_y': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'projection': ('django.db.models.fields.CharField', [], {'default': "'EPSG:4326'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'default': '5'})
}
}
complete_apps = ['core']
|
{
"content_hash": "38eca4b498e6d10a1a2a80857ebdf7dc",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 208,
"avg_line_length": 78.46808510638297,
"alnum_prop": 0.5490780911062907,
"repo_name": "Pkthunder/geoq",
"id": "4ccf009e5ca15598def5a40a17d28afe6368265e",
"size": "11088",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "geoq/core/migrations/0010_auto__add_comment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "71402"
},
{
"name": "HTML",
"bytes": "203736"
},
{
"name": "JavaScript",
"bytes": "1424580"
},
{
"name": "Python",
"bytes": "727105"
}
],
"symlink_target": ""
}
|
"""Startup utilities"""
#pylint:skip-file
import os
import sys
from functools import partial
import paste.script.command
import werkzeug.script
from lxml import etree
import logging
log = logging.getLogger(__name__) # pylint: disable-msg=C0103
etc = partial(os.path.join, 'parts', 'etc')
DEPLOY_INI = etc('deploy.ini')
DEPLOY_CFG = etc('deploy.cfg')
DEBUG_INI = etc('debug.ini')
DEBUG_CFG = etc('debug.cfg')
_buildout_path = __file__
for i in range(2 + __name__.count('.')):
_buildout_path = os.path.dirname(_buildout_path)
abspath = partial(os.path.join, _buildout_path)
del _buildout_path
# bin/paster serve parts/etc/deploy.ini
def make_app(global_conf={}, config=DEPLOY_CFG, debug=False):
from presence_analyzer import app
app.config.from_pyfile(abspath(config))
app.debug = debug
return app
# bin/paster serve parts/etc/debug.ini
def make_debug(global_conf={}, **conf):
from werkzeug.debug import DebuggedApplication
app = make_app(global_conf, config=DEBUG_CFG, debug=True)
return DebuggedApplication(app, evalex=True)
# bin/flask-ctl shell
def make_shell():
"""Interactive Flask Shell"""
from flask import request
app = make_app()
http = app.test_client()
reqctx = app.test_request_context
return locals()
def _serve(action, debug=False, dry_run=False):
"""Build paster command from 'action' and 'debug' flag."""
if debug:
config = DEBUG_INI
else:
config = DEPLOY_INI
argv = ['bin/paster', 'serve', config]
if action in ('start', 'restart'):
argv += [action, '--daemon']
elif action in ('', 'fg', 'foreground'):
argv += ['--reload']
else:
argv += [action]
# Print the 'paster' command
print ' '.join(argv)
if dry_run:
return
# Configure logging and lock file
if action in ('start', 'stop', 'restart', 'status'):
argv += [
'--log-file', abspath('var', 'log', 'paster.log'),
'--pid-file', abspath('var', 'log', '.paster.pid'),
]
sys.argv = argv[:2] + [abspath(config)] + argv[3:]
# Run the 'paster' command
paste.script.command.run()
# bin/flask-ctl ...
def run():
action_shell = werkzeug.script.make_shell(make_shell, make_shell.__doc__)
# bin/flask-ctl serve [fg|start|stop|restart|status]
def action_serve(action=('a', 'start'), dry_run=False):
"""Serve the application.
This command serves a web application that uses a paste.deploy
configuration file for the server and application.
Options:
- 'action' is one of [fg|start|stop|restart|status]
- '--dry-run' print the paster command and exit
"""
_serve(action, debug=False, dry_run=dry_run)
# bin/flask-ctl debug [fg|start|stop|restart|status]
def action_debug(action=('a', 'start'), dry_run=False):
"""Serve the debugging application."""
_serve(action, debug=True, dry_run=dry_run)
# bin/flask-ctl status
def action_status(dry_run=False):
"""Status of the application."""
_serve('status', dry_run=dry_run)
# bin/flask-ctl stop
def action_stop(dry_run=False):
"""Stop the application."""
_serve('stop', dry_run=dry_run)
werkzeug.script.run()
def get_user_xml():
"""
Gets xml file with user data.
"""
from presence_analyzer import app
app.config.from_pyfile(abspath(DEPLOY_CFG))
url = "http://bolt/~sargo/users.xml"
remote_xml = etree.parse(url)
local_xml = etree.parse(app.config['DATA_XML'])
if set(remote_xml.getroot().itertext()) \
!= set(local_xml.getroot().itertext()):
f = open('runtime/data/users.xml', 'w')
f.write(etree.tostring(remote_xml))
f.close()
log.debug('xml overwritten')
else:
log.debug('xml files do not differ. skipping.')
|
{
"content_hash": "d19241725e5dd11958c8d77f57b6b122",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 77,
"avg_line_length": 27.728571428571428,
"alnum_prop": 0.6218444100978877,
"repo_name": "jchrusciel/presence-analyzer-jchrusciel",
"id": "54a93d068a0e32e8ef81439d9cf3ae0c75346869",
"size": "3906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/presence_analyzer/script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1540"
},
{
"name": "Python",
"bytes": "30381"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('invoice', '0003_auto_20160921_1730'),
]
operations = [
migrations.AlterField(
model_name='usercheckout',
name='email',
field=models.EmailField(unique=True, max_length=254),
),
]
|
{
"content_hash": "b03fb465ae6fd62e589b57b79365e52c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 65,
"avg_line_length": 22.11111111111111,
"alnum_prop": 0.6030150753768844,
"repo_name": "brittdawn/django-webstore",
"id": "1ec797461d9abcb98dc7cbf8184fb571d2d28b4b",
"size": "422",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "invoice/migrations/0004_auto_20160921_1732.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "333244"
},
{
"name": "HTML",
"bytes": "20808"
},
{
"name": "JavaScript",
"bytes": "527034"
},
{
"name": "Python",
"bytes": "48144"
}
],
"symlink_target": ""
}
|
"""Main Weather Server application module.
.. moduleauthor:: grzes71
"""
import argparse
import logging
from configparser import ConfigParser
import weatherserver.config as cfg
from weatherserver.config.configuration import create_configuration
from weatherserver.model.weathermodel import create_weather_model
from weatherserver.service.weatherservice import create_weather_service
def logger():
"""Get logger instance.
:return: logger instance
:rtype: logging.Logger
"""
return logging.getLogger(__name__)
def do_parse_args():
"""Parse command line arguments.
"""
parser = argparse.ArgumentParser(description='Weather Service')
parser.add_argument('-d', '--data', type=argparse.FileType('r'),
help="File with weather data", required=True)
parser.add_argument('-l', '--loglevel', choices=[cfg.LEV_DEBUG, cfg.LEV_INFO,
cfg.LEV_WARNING, cfg.LEV_ERROR],
help='logging level (default {debug})'.format(debug=cfg.LEV_DEBUG),
default=cfg.LEV_DEBUG)
parser.add_argument('-n', '--name', help='name of the service')
parser.add_argument('-s', '--server', help='host to listen')
parser.add_argument('-p', '--port', type=int, help='port to listen')
return parser.parse_args()
def main():
"""Main application entry.
"""
parsed_args = do_parse_args()
logging.basicConfig(level=parsed_args.loglevel)
config_parser = ConfigParser()
config_parser.read_file(parsed_args.data)
configuration = create_configuration(config_parser[cfg.SEC_CONF], parsed_args)
weather_model = create_weather_model(config_parser[cfg.SEC_SITE])
simple_server = create_weather_service(configuration, weather_model)
try:
logger().info('Use Control-C to exit')
simple_server.serve_forever()
except KeyboardInterrupt:
logger().debug("Exiting")
|
{
"content_hash": "a86ac95270279064411b1da3fbc1e89c",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 91,
"avg_line_length": 31.26153846153846,
"alnum_prop": 0.6377952755905512,
"repo_name": "grzes71/PyWeatherServer",
"id": "ae376f2f9a4a0c729deb634838fd0bf648ac2c1f",
"size": "2032",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/weatherserver/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12754"
}
],
"symlink_target": ""
}
|
{
'name' : 'Email Templates',
'version' : '1.1',
'author' : 'OpenERP SA',
'website' : 'https://www.odoo.com/page/mailing',
'category' : 'Marketing',
'depends' : ['mail'],
'description': """
Email Templating (simplified version of the original Power Email by Openlabs).
==============================================================================
Lets you design complete email templates related to any OpenERP document (Sale
Orders, Invoices and so on), including sender, recipient, subject, body (HTML and
Text). You may also automatically attach files to your templates, or print and
attach a report.
For advanced use, the templates may include dynamic attributes of the document
they are related to. For example, you may use the name of a Partner's country
when writing to them, also providing a safe default in case the attribute is
not defined. Each template contains a built-in assistant to help with the
inclusion of these dynamic values.
If you enable the option, a composition assistant will also appear in the sidebar
of the OpenERP documents to which the template applies (e.g. Invoices).
This serves as a quick way to send a new email based on the template, after
reviewing and adapting the contents, if needed.
This composition assistant will also turn into a mass mailing system when called
for multiple documents at once.
These email templates are also at the heart of the marketing campaign system
(see the ``marketing_campaign`` application), if you need to automate larger
campaigns on any OpenERP document.
**Technical note:** only the templating system of the original Power Email by Openlabs was kept.
""",
'data': [
'wizard/email_template_preview_view.xml',
'email_template_view.xml',
'res_partner_view.xml',
'ir_actions_view.xml',
'wizard/mail_compose_message_view.xml',
'security/ir.model.access.csv'
],
'demo': [],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "43358884bcd0143a8908f71f4199208d",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 100,
"avg_line_length": 42.10204081632653,
"alnum_prop": 0.6965584100824043,
"repo_name": "cristianquaglio/odoo",
"id": "d93c08be11c440dcc4ee3d959234f1456a8b6da2",
"size": "3068",
"binary": false,
"copies": "254",
"ref": "refs/heads/master",
"path": "addons/email_template/__openerp__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "671328"
},
{
"name": "HTML",
"bytes": "212829"
},
{
"name": "JavaScript",
"bytes": "5984109"
},
{
"name": "Makefile",
"bytes": "12332"
},
{
"name": "Mako",
"bytes": "561"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "8366254"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "19163"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "92945"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
import sys
import json
from distutils import log
import sentry
JS_SDK_REGISTRY_URL = (
"https://release-registry.services.sentry.io/sdks/sentry.javascript.browser/versions"
)
LOADER_FOLDER = os.path.abspath(os.path.join(os.path.dirname(sentry.__file__), "loader"))
# We cannot leverage six here, so we need to vendor
# bits that we need.
if sys.version_info[0] == 3:
def iteritems(d, **kw):
return iter(d.items(**kw))
from urllib.request import urlopen
else:
def iteritems(d, **kw):
return d.iteritems(**kw) # NOQA
from urllib2 import urlopen
def dump_registry(path, data):
fn = os.path.join(LOADER_FOLDER, path + ".json")
directory = os.path.dirname(fn)
try:
os.makedirs(directory)
except OSError:
pass
with open(fn, "wb") as f:
json.dump(data, f, indent=2)
f.write("\n")
def sync_registry():
body = urlopen(JS_SDK_REGISTRY_URL).read().decode("utf-8")
data = json.loads(body)
dump_registry("_registry", data)
from .base import BaseBuildCommand
class BuildJsSdkRegistryCommand(BaseBuildCommand):
description = "build js sdk registry"
def run(self):
log.info("downloading js sdk information from the release registry")
try:
sync_registry()
except BaseException:
log.error("error ocurred while trying to fetch js sdk information from the registry")
|
{
"content_hash": "d2d0b82568b6d8acbb2c424056dd2282",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 97,
"avg_line_length": 24.114754098360656,
"alnum_prop": 0.6607749830047587,
"repo_name": "mvaled/sentry",
"id": "99a18c4325bbff47d6a2b3c0f6a2c6834744e2c5",
"size": "1605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/utils/distutils/commands/build_js_sdk_registry.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
}
|
"""
Created on Thu Dec 4 18:11:29 2014
@author: jarenas
"""
import numpy as np
from itertools import product
from scipy import spatial
import sys
def dd_hellinger(theta1,theta2):
""" Calcula la distancia de Hellinger entre distribuciones discretas.
Parametros de entrada:
* theta1 : Matriz de dimensiones (n1 x K)
* theta2 : Matriz de dimensiones (n2 x K)
Devuelve: Una matriz de dimensiones (n1 x n2), donde cada componente
se obtiene como la distancia de Hellinger entre las correspondientes filas
de theta1 y theta2
"""
_SQRT2 = np.sqrt(2)
(n1, col1) = theta1.shape
(n2, col2) = theta2.shape
if col1 != col2:
sys.exit("Error en llamada a Hellinger: Las dimensiones no concuerdan")
return spatial.distance.cdist(np.sqrt(theta1),np.sqrt(theta2),'euclidean') / _SQRT2
def dd_cosine(theta1,theta2):
""" Calcula la distancia coseno entre distribuciones discretas.
Parametros de entrada:
* theta1 : Matriz de dimensiones (n1 x K)
* theta2 : Matriz de dimensiones (n2 x K)
Devuelve: Una matriz de dimensiones (n1 x n2), donde cada componente
se obtiene como la distancia coseno entre las correspondientes filas
de theta1 y theta2
"""
(n1, col1) = theta1.shape
(n2, col2) = theta2.shape
if col1 != col2:
sys.exit("Error en llamada a D. Coseno: Las dimensiones no concuerdan")
#Normalize to get output between 0 and 1
return spatial.distance.cdist(theta1,theta2,'cosine')/2
def kl(p, q):
"""Kullback-Leibler divergence D(P || Q) for discrete distributions
(should only be used for the Jensen-Shannon divergence)
Parameters
----------
p, q : array-like, dtype=float, shape=n
Discrete probability distributions.
"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
#print np.all([p != 0,q!= 0],axis=0)
#Notice standard practice would be that the p * log(p/q) = 0 for p = 0,
#but p * log(p/q) = inf for q = 0. We could use smoothing, but since this
#function will only be called to calculate the JS divergence, we can also
#use p * log(p/q) = 0 for p = q = 0 (if q is 0, then p is also 0)
return np.sum(np.where(np.all([p != 0,q!= 0],axis=0), p * np.log(p / q), 0))
def dd_js(theta1,theta2):
""" Calcula la distancia de Jensen-Shannon entre distribuciones discretas.
Parametros de entrada:
* theta1 : Matriz de dimensiones (n1 x K)
* theta2 : Matriz de dimensiones (n2 x K)
Devuelve: Una matriz de dimensiones (n1 x n2), donde cada componente
se obtiene como la distancia de J-S entre las correspondientes filas
de theta1 y theta2
"""
(n1, col1) = theta1.shape
(n2, col2) = theta2.shape
if col1 != col2:
sys.exit("Error en llamada a D. JS: Ambas matrices no tienen las mismas columnas")
js_div = np.empty( (n1,n2) )
for idx,pq in zip(product(range(n1),range(n2)),product(theta1,theta2)):
av = (pq[0] + pq[1])/2
js_div[idx[0],idx[1]] = 0.5 * (kl(pq[0],av) + kl(pq[1],av))
return js_div
|
{
"content_hash": "c29299dce62dee2b767639e3e08d419e",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 90,
"avg_line_length": 35.40425531914894,
"alnum_prop": 0.6048677884615384,
"repo_name": "ML4DS/ML4all",
"id": "bc0b975d64a358f3ac86b5a1d72974dc0708da03",
"size": "3353",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "TM1.IntrodNLP/NLP_py3_NSF/notebook/dist_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "47853395"
},
{
"name": "Jupyter Notebook",
"bytes": "46076505"
},
{
"name": "Python",
"bytes": "59377"
},
{
"name": "TeX",
"bytes": "242085"
}
],
"symlink_target": ""
}
|
import re
from bs4 import BeautifulSoup as bs
from sys import argv
from codecs import open
from itertools import combinations, islice
class FormeurTXT:
def __init__(self,xmlFile):
self.entree = bs(open(xmlFile),"xml")
self._phrases = []
self.recupererTexte()
self.genererCorpus()
def recupererTexte(self):
for text in self.entree.find_all("text"):
if not any(x in text.string for x in ['Imaginez','nées','−→ ',"mission","chaque","6","régissent","indiqué","phénomènes","1","2","3","5","4","(",")","Kalaba","lègues,","Corpus"]):
self._phrases.append(text.string)
def getPhrases(self):
return self._phrases
def genererCorpus(self):
francais = [phrase for phrase in islice(self._phrases,1,len(self._phrases),2)]
kalaba = [phrase for phrase in islice(self._phrases,0,len(self._phrases),2)]
self._corpus = list(zip(francais,kalaba))
def getCorpus(self):
return self._corpus
phrases = property(fget=getPhrases)
corpus = property(fget=getCorpus)
def longest_common_string(string1,string2):
set1 = set(string1[begin:end] for (begin,end) in combinations(range(len(string1)+1), 2))
set2 = set(string2[begin:end] for (begin,end) in combinations(range(len(string2)+1), 2))
common = set1&set2
maximal = [com for com in common if sum((s.find(com) for s in common)) == -1 * (len(common)-1)]
return [(s,string1.index(s),string2.index(s)) for s in maximal]
def main():
temp = FormeurTXT(argv[1])
for x in temp.getCorpus():
print(x)
if __name__=="__main__":
main()
|
{
"content_hash": "79b7a1ad705a0a0314be3820d1d523ca",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 190,
"avg_line_length": 36.2,
"alnum_prop": 0.6341313689379988,
"repo_name": "Krolov18/Languages",
"id": "4e42aaf0b8aba95656d734fd9dca48ff60e693d4",
"size": "1639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Kalaba_resolution/traitementKalaba.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "33126"
},
{
"name": "Common Lisp",
"bytes": "57183"
},
{
"name": "HTML",
"bytes": "35649"
},
{
"name": "Java",
"bytes": "4023"
},
{
"name": "Jupyter Notebook",
"bytes": "21794"
},
{
"name": "Python",
"bytes": "357107"
},
{
"name": "Shell",
"bytes": "1055"
},
{
"name": "TeX",
"bytes": "24707"
}
],
"symlink_target": ""
}
|
from wdom.tag import NewTagClass as NewTag
from wdom.themes import *
name = 'Concise'
project_url = 'http://http://concisecss.com/'
project_repository = 'https://github.com/ConciseCSS/concise.css'
license = 'MIT License'
license_url = 'https://github.com/ConciseCSS/concise.css/blob/master/LICENSE'
css_files = [
'//cdn.concisecss.com/v3.4.0/concise.min.css',
]
js_files = []
headers = []
DefaultButton = NewTag('DefaultButton', 'button', DefaultButton)
PrimaryButton = NewTag('PrimaryButton', 'button', PrimaryButton, class_='bg--primary')
SecondaryButton = NewTag('SecondaryButton', 'button', SecondaryButton, class_='bg--success')
SuccessButton = NewTag('SuccessButton', 'button', SuccessButton, class_='bg--success')
InfoButton = NewTag('InfoButton', 'button', InfoButton, class_='button--flat')
WarningButton = NewTag('WarningButton', 'button', WarningButton, class_='bg--warning')
DangerButton = NewTag('DangerButton', 'button', DangerButton, class_='bg--error')
ErrorButton = NewTag('ErrorButton', 'button', ErrorButton, class_='bg--error')
LinkButton = NewTag('LinkButton', 'button', LinkButton, class_='button--flat')
Container = NewTag('Container', 'div', Container, class_='container')
Wrapper = NewTag('Wrapper', 'div', Wrapper, class_='container')
Row = NewTag('Row', 'div', Row, class_='row')
class Col(Div):
column = None
is_ = 'col'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.column:
self.setAttribute('column', str(self.column))
Col1 = NewTag('Col1', 'div', Col, column=1, is_='col1')
Col2 = NewTag('Col2', 'div', Col, column=2, is_='col2')
Col3 = NewTag('Col3', 'div', Col, column=3, is_='col3')
Col4 = NewTag('Col4', 'div', Col, column=4, is_='col4')
Col5 = NewTag('Col5', 'div', Col, column=5, is_='col5')
Col6 = NewTag('Col6', 'div', Col, column=6, is_='col6')
Col7 = NewTag('Col7', 'div', Col, column=7, is_='col7')
Col8 = NewTag('Col8', 'div', Col, column=8, is_='col8')
Col9 = NewTag('Col9', 'div', Col, column=9, is_='col9')
Col10 = NewTag('Col10', 'div', Col, column=10, is_='col10')
Col11 = NewTag('Col11', 'div', Col, column=11, is_='col11')
Col12 = NewTag('Col12', 'div', Col, column=12, is_='col12')
extended_classes = [
Button,
DefaultButton,
PrimaryButton,
SecondaryButton,
SuccessButton,
InfoButton,
WarningButton,
DangerButton,
ErrorButton,
LinkButton,
Container,
Wrapper,
Row,
Col,
Col1,
Col2,
Col3,
Col4,
Col5,
Col6,
Col7,
Col8,
Col9,
Col10,
Col11,
Col12,
]
|
{
"content_hash": "eb2f6aedd8f0797f26aec95bd02092ac",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 92,
"avg_line_length": 33.14102564102564,
"alnum_prop": 0.6479690522243714,
"repo_name": "miyakogi/wdom",
"id": "f2193b00818ff581c103159b5eeace4869cadec4",
"size": "2648",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "wdom/themes/concise.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "540"
},
{
"name": "HTML",
"bytes": "1316"
},
{
"name": "JavaScript",
"bytes": "16122"
},
{
"name": "Makefile",
"bytes": "2938"
},
{
"name": "Python",
"bytes": "512498"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import authenticate
from django.contrib.auth.views import redirect_to_login
from django.shortcuts import render, redirect
from django.http.response import HttpResponse, HttpResponseForbidden, HttpResponseRedirect
from django.utils import timezone
from datetime import timedelta
from common.shortcuts import render_json, render_json_err
from common.sslredirect import get_secure_url
from control import controls, settings, models
from common import tempsensor, shortcuts
# TODO: With override, maybe have a timer option for how long the override should stay enabled.
def index(req):
#if req.user.is_authenticated() and not req.is_secure():
# # Request not secure but user logged in. Change it.
# return HttpResponseRedirect(get_secure_url(req, True))
params = {}
params['override'] = int(settings.getGlobal('override_auto', 0))
params['fan'] = controls.readPin(settings.PIN_FANCTRL)
params['cool'] = controls.readPin(settings.PIN_COOLCTRL)
params['heat'] = controls.readPin(settings.PIN_HEATCTRL)
params['mobile'] = req.mobile
params['authenticated'] = req.user.is_authenticated()
params['user'] = req.user
sensors = tempsensor.getSensorIDs()
if len(sensors) > 0:
sensor = tempsensor.getSensor(sensors[0])
try:
temp = sensor.read_temp()
params['temp'] = 1
params['temp_c'] = "%.2f" % temp[0]
params['temp_f'] = "%.2f" % temp[1]
except tempsensor.CouldNotReadError as e:
params['temp'] = 0
params['temp_err'] = "Could not read sensor file!"
else:
params['temp'] = 0
params['temp_err'] = "No sensor detected!"
return render(req, "control/index.html", params)
def switch(req):
#if not req.user.is_authenticated():
# return render_json_err("You must be logged in to toggle the controls!");
response = {}
override = req.GET.get('override', None)
fan = req.GET.get('fan', None)
cool = req.GET.get('cool', None)
heat = req.GET.get('heat', None)
# Read in the current values
coverride = settings.getGlobal('override_auto', '0') == '1'
# Toggle? Figure out the new state.
if override == 'toggle':
override = coverride and 'off' or 'on'
# Switch override on/off first
if override == 'on' and not coverride:
controls.controlOverride(1)
elif override == 'off' and coverride:
# Return because none of the logic will be enabled afterwards
controls.controlOverride(0)
return render_json({
"status": "success",
"override": "off",
})
if settings.getGlobal('override_auto', 0) == '0':
return render_json_err("Automatic controls aren't overridden!")
# Okay! Terminate now then.
if not fan and not cool and not heat:
return render_json({"override": override, "status": "success"})
cfan = controls.readPin(settings.PIN_FANCTRL)
ccool = controls.readPin(settings.PIN_COOLCTRL)
cheat = controls.readPin(settings.PIN_HEATCTRL)
if fan == 'toggle':
fan = cfan and 'off' or 'on'
if cool == 'toggle':
cool = ccool and 'off' or 'on'
if heat == 'toggle':
heat = cheat and 'off' or 'on'
# Can't have them both on at the same time!
if (cool == 'on' and heat == 'on') or (ccool and heat == 'on') or (cheat and cool == 'on'):
return render_json_err("Cannot turn heat and cool on at the same time!")
# Don't allow the fan to be turned off if the cooler/heater is on.
if fan == 'off' and ((cheat and heat != "off") or (ccool and cool != "off")):
return render_json_err("Cannot turn off fan if cooler/heater is on!")
# The fan needs to be turned on if cool/heat are turned on!
if cool == 'on' or heat == 'on':
fan = 'on'
# Fan control
if fan == 'on':
controls.setPin(settings.PIN_FANCTRL, 1)
#settings.setGlobal('override_fan', 1)
elif fan == 'off':
controls.setPin(settings.PIN_FANCTRL, 0)
#settings.setGlobal('override_fan', 0)
# Cool control
if cool == 'on':
controls.setPin(settings.PIN_COOLCTRL, 1)
#settings.setGlobal('override_cool', 1)
elif cool == 'off':
controls.setPin(settings.PIN_COOLCTRL, 0)
#settings.setGlobal('override_cool', 0)
# Heat control
if heat == 'on':
controls.setPin(settings.PIN_HEATCTRL, 1)
#settings.setGlobal('override_heat', 1)
elif heat == 'off':
controls.setPin(settings.PIN_HEATCTRL, 0)
#settings.setGlobal('override_heat', 0)
response['status'] = 'success'
if override:
response['override'] = override
if fan:
response['fan'] = fan
if cool:
response['cool'] = cool
if heat:
response['heat'] = heat
return render_json(response)
def switch_temp(req):
#if not req.user.is_authenticated():
# return render_json_err("You must be logged in to control the temperature!");
response = {}
target_f = req.GET.get('target_temp_f', None)
target_c = req.GET.get('target_temp_c', None)
# Convert it to celsius
if target_f:
# Make sure the user isn't an idiot and put in a letter for a number
try:
float(target_f)
except ValueError:
return render_json_err("The specified temperature is not a valid number!")
target_c = shortcuts.temp_to_c(float(target_f))
if target_c is not None:
# Make sure the user isn't an idiot and put in a letter for a number
try:
float(target_c)
except ValueError:
return render_json_err("The specified temperature is not a valid number!")
settings.set_target_temp(target_c)
response['status'] = 'success'
response['target_temp_c'] = target_c
else:
return render_json_err("No temperature has been specified.")
return render_json(response)
def getLatestControlEvent(req):
"""
Returns the last control event
"""
response = {}
evt = models.ControlEvent.objects.latest('time')
if evt:
response['status'] = 'success'
response['evt'] = {
'type': evt.type,
'val': evt.val,
'spec_data': evt.spec_data,
}
else:
response['status'] = 'failed'
response['message'] = 'No latest control event found!'
response['evt'] = None
return render_json(response)
def status(req):
"""
Returns the current state of the controls (fan state, cooling, heating)
"""
response = {}
override = int(settings.getGlobal('override_auto', 0))
fan = controls.readPin(settings.PIN_FANCTRL)
cool = controls.readPin(settings.PIN_COOLCTRL)
heat = controls.readPin(settings.PIN_HEATCTRL)
response['status'] = 'success'
response['override'] = override and 'on' or 'off'
response['fan'] = fan and 'on' or 'off'
response['cool'] = cool and 'on' or 'off'
response['heat'] = heat and 'on' or 'off'
return render_json(response)
def status_temp(req):
"""
Returns the temperature status
"""
response = {}
response['target_temp_c'] = "%.2f" % settings.get_target_temp()
response['target_temp_f'] = "%.2f" % shortcuts.temp_to_f(settings.get_target_temp())
sensors = tempsensor.getSensorIDs()
if len(sensors) > 0:
sensor = tempsensor.getSensor(sensors[0])
try:
temp = sensor.read_temp()
# Return the temp in both celsius and fahrenheit
response['temp_c'] = "%.2f" % temp[0]
response['temp_f'] = "%.2f" % temp[1]
response['status'] = 'success'
except tempsensor.CouldNotReadError as e:
response['status'] = 'failed'
response['message'] = 'Could not read temperature from sensor! (error reading from file)'
else:
response['status'] = 'failed'
response['message'] = 'Could not read temperature from sensor! (no sensor detected)'
return render_json(response)
|
{
"content_hash": "88c2e7133018772fa9b1bd837dcf7586",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 101,
"avg_line_length": 33.04471544715447,
"alnum_prop": 0.6160659367695903,
"repo_name": "DrChat/thermoctrl",
"id": "4793cb54d98eca3fb4228520803c56f169100b21",
"size": "8131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "control/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "40"
},
{
"name": "HTML",
"bytes": "14022"
},
{
"name": "JavaScript",
"bytes": "72956"
},
{
"name": "Python",
"bytes": "39008"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.db import models
import os
def get_upload_path(instance, filename):
if instance.user:
return os.path.join(
"data/faces/raw", "%s" % instance.user.username, filename)
else:
return os.path.join(
"data/faces/raw", "%s" % "temp", filename)
class CorrectionDegree(models.Model):
eyes = models.FloatField(default=0)
chin = models.FloatField(default=0)
def return_json(self):
return {
'eyes': self.eyes,
'chin': self.chin
}
class User(models.Model):
username = models.CharField(max_length=30, primary_key=True)
password = models.CharField(max_length=20)
email = models.CharField(max_length=30)
correction_degree = models.OneToOneField(CorrectionDegree, blank=True, null=True)
class FaceImage(models.Model):
user = models.ForeignKey(User, null=True, blank=True)
file = models.ImageField(upload_to=get_upload_path)
uploaded_at = models.DateTimeField(null=True, blank=True)
|
{
"content_hash": "e7b9063d59bbb64eb45c15bea9d1422e",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 85,
"avg_line_length": 29.153846153846153,
"alnum_prop": 0.6772207563764292,
"repo_name": "helloworldajou/webserver",
"id": "d895b8f8243f929d1e0eb107017d6bfaae5291b9",
"size": "1161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apiserver/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2202"
},
{
"name": "HTML",
"bytes": "5204"
},
{
"name": "JavaScript",
"bytes": "236"
},
{
"name": "Lua",
"bytes": "52269"
},
{
"name": "Makefile",
"bytes": "7417"
},
{
"name": "Nginx",
"bytes": "428"
},
{
"name": "Python",
"bytes": "134625"
},
{
"name": "Shell",
"bytes": "5471"
}
],
"symlink_target": ""
}
|
if __name__ == '__main__':
from airtest import console
console.main()
|
{
"content_hash": "1cc3fa79b1a4cf641625c38cb6bc5020",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 31,
"avg_line_length": 26,
"alnum_prop": 0.5769230769230769,
"repo_name": "NetEase/airtest",
"id": "219c610599f9ddaff39fce72b81ad667eb906d8f",
"size": "101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airtest/__main__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "230"
},
{
"name": "CSS",
"bytes": "8092"
},
{
"name": "Go",
"bytes": "13043"
},
{
"name": "HTML",
"bytes": "17563"
},
{
"name": "JavaScript",
"bytes": "3134"
},
{
"name": "Makefile",
"bytes": "348"
},
{
"name": "Python",
"bytes": "257125"
},
{
"name": "Shell",
"bytes": "2790"
}
],
"symlink_target": ""
}
|
def arg(*args, **kwargs):
"""Decorator for CLI args."""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def add_arg(f, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(f, 'arguments'):
f.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in f.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
f.arguments.insert(0, (args, kwargs))
|
{
"content_hash": "1d80fb91eddfdcbdf787594ae7ea7186",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 77,
"avg_line_length": 34.73684210526316,
"alnum_prop": 0.6287878787878788,
"repo_name": "trebuchet-deploy/trigger",
"id": "b039cf04c09851b98e8dd94af9a4568a9f268898",
"size": "1235",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "trigger/utils/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "42687"
}
],
"symlink_target": ""
}
|
DOCUMENTATION = '''
---
module: win_s3
version_added: ""
short_description: Upload and download from AWS S3
description:
- Uses AWS_SDK for Powershell. If the module is not found it will be downloaded. More Info: http://aws.amazon.com/powershell/. Uses the SDK, with either provided credentials or IAM role credentials on EC2 instances to upload and download files from S3. If provided, the credentials are set on the remote machine as the default profile (but only for this session).
options:
bucket:
description:
- S3 Bucket (Must exist)
required: true
default: null
aliases: []
key:
description:
- S3 Key
required: true
default: null
aliases: []
local:
description:
- Local file/directory to upload or download to.
required: yes
default: null
aliases: []
overwrite:
description:
- If true, download the file or directory even if it already exists on the host.
required: no
default: false
aliases: []
method:
description:
- S3 method to carry out. Upload: upload file or entire directory to s3. Download: download a file or directory from s3.
required: yes
choices:
- upload
- download
default: null
aliases: []
rm:
description:
- Remove the local file after upload?
required: no
choices:
- true
- yes
- false
- no
default: false
aliases: []
access_key:
description:
- AWS_ACCESS_KEY_ID: Not required if there are credentials configured on the machine, or if the machine is an ec2 instance with an IAM role.
required: no
default: none
aliases: []
secret_key:
description:
- AWS_SECRET_ACCESS_KEY: Not required if there are credentials configured on the machine, or if the machine is an ec2 instance with an IAM role.
required: no
default: none
aliases: []
author: Phil Schwartz
'''
EXAMPLES = '''
# Upload a local file to S3
$ ansible -i hosts -m win_s3 -a "bucket=server_logs key=QA/WebServer/2015-1-1.App.log local=C:\inetpub\wwwroot\Logs\log.log method=upload rm=true access_key=EXAMPLE secret_key=EXAMPLE" all
# Download an entire directory from S3
$ ansible -i hosts -m win_s3 -a "bucket=apps key=My/Web/APP/ local=C:\Users\Me\WebApp method=download access_key=EXAMPLE secret_key=EXAMPLE" all
# Playbook example
---
- name: Download Application Zip from S3
hosts: all
gather_facts: false
tasks:
- name: Download app
win_s3:
bucket: 'app_deploys'
key: 'app/latest/Application.zip'
method: 'download'
overwrite: true
local: 'C:\Applications\\'
access_key: 'EXAMPLECRED'
secret_key: 'EXAMPLESECRET'
'''
|
{
"content_hash": "f52ec9710c0019a70c3c203d3489e9b9",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 372,
"avg_line_length": 30.1,
"alnum_prop": 0.6710963455149501,
"repo_name": "schwartzmx/ansible-module-devel",
"id": "7ccfc901cc0dd9d94838addb3cb83db86b433ac4",
"size": "3552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "windows/win_s3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "41316"
},
{
"name": "Python",
"bytes": "21567"
}
],
"symlink_target": ""
}
|
import pytest
from hijri_converter import locales
@pytest.fixture(scope="class")
def all_locales(request):
request.cls.locales = locales._locale_map
@pytest.mark.usefixtures("all_locales")
class TestLocalesValidity:
def test_locale_data_structure(self):
for locale_cls in self.locales.values():
assert len(locale_cls.language_tag) == 2
assert locale_cls.language_tag.islower()
assert len(locale_cls.month_names) == 12
assert all(locale_cls.month_names) # not blank or None
assert len(locale_cls.gregorian_month_names) == 12
assert all(locale_cls.gregorian_month_names) # not blank or None
assert len(locale_cls.day_names) == 7
assert all(locale_cls.day_names) # not blank or None
assert locale_cls.notation is not None
assert locale_cls.gregorian_notation is not None
def test_locale_map(self):
assert len(locales._locale_map) > 0
assert "en" in locales._locale_map.keys()
def test_duplicated_language_tag(self):
with pytest.raises(LookupError):
class ExtraLocale(locales.Locale):
language_tag = "en"
class TestGettingLocale:
class CustomLocale(locales.EnglishLocale):
language_tag = "xx"
@pytest.mark.parametrize(
"test_input",
[
"xx",
"XX",
"xx-YY",
"xx-yy",
"xx_yy",
"xx_YY",
"xx_YY.UTF-8",
],
)
def test_locale_possible_names(self, test_input):
assert locales.get_locale(test_input).__class__ == self.CustomLocale
def test_unsupported_language(self):
with pytest.raises(ValueError):
locales.get_locale("xy")
|
{
"content_hash": "ac7099e8652555ff6577e4bec85bd63b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 30.810344827586206,
"alnum_prop": 0.599888080581981,
"repo_name": "dralshehri/hijri-converter",
"id": "7a665c8da3798426573a246589fcc82633df7b05",
"size": "1787",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/test_locales.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43246"
}
],
"symlink_target": ""
}
|
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'image_anchor06.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.image_dir = test_dir + 'images/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
'D7', self.image_dir + 'yellow.png',
{'x_offset': 1, 'y_offset': 2, 'positioning': 1})
workbook.close()
self.assertExcelEqual()
|
{
"content_hash": "d479071ae4373d80593186311876472b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 74,
"avg_line_length": 27.243243243243242,
"alnum_prop": 0.6130952380952381,
"repo_name": "jkyeung/XlsxWriter",
"id": "32d5e34d520e1baf212ae0adb3ff7c1496e3345f",
"size": "1181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlsxwriter/test/comparison/test_image_anchor06.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7819"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2430294"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
}
|
"""
Test pdtContext
"""
import time
import unittest
import parsedatetime as pdt
from parsedatetime.context import pdtContext
class test(unittest.TestCase):
def setUp(self):
self.cal = pdt.Calendar(version=pdt.VERSION_CONTEXT_STYLE)
(self.yr, self.mth, self.dy, self.hr, self.mn,
self.sec, self.wd, self.yd, self.isdst) = time.localtime()
def testContext(self):
self.assertEqual(self.cal.parse('5 min from now')[1],
pdtContext(pdtContext.ACU_MIN | pdtContext.ACU_NOW))
self.assertEqual(self.cal.parse('5 min from now',
version=pdt.VERSION_FLAG_STYLE)[1], 2)
self.assertEqual(self.cal.parse('7/11/2015')[1],
pdtContext(pdtContext.ACU_YEAR |
pdtContext.ACU_MONTH | pdtContext.ACU_DAY))
self.assertEqual(self.cal.parse('7/11/2015',
version=pdt.VERSION_FLAG_STYLE)[1], 1)
self.assertEqual(self.cal.parse('14/32/2015')[1],
pdtContext(0))
self.assertEqual(self.cal.parse('25:23')[1],
pdtContext())
def testSources(self):
self.assertEqual(self.cal.parse('afternoon 5pm')[1],
pdtContext(pdtContext.ACU_HALFDAY |
pdtContext.ACU_HOUR))
self.assertEqual(self.cal.parse('morning')[1],
pdtContext(pdtContext.ACU_HALFDAY))
self.assertEqual(self.cal.parse('night', version=1)[1], 2)
def testThreadRun(self):
from threading import Thread
t = Thread(target=lambda: self.cal.evalRanges('4p-6p'))
# should not throw out AttributeError
t.start()
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "a69162522f62591fac2e04583913eb60",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 35.88235294117647,
"alnum_prop": 0.5606557377049181,
"repo_name": "cpatulea/parsedatetime",
"id": "6317f7771e3a3df48f3e4c91068d86582750b375",
"size": "1854",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "parsedatetime/tests/TestContext.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "85355"
},
{
"name": "Makefile",
"bytes": "871"
},
{
"name": "Python",
"bytes": "237168"
}
],
"symlink_target": ""
}
|
class Player():
""" a leetcoin player """
def __init__(self, key, kills, deaths, name, weapon, rank):
self.key = key
self.kills = kills
self.deaths = deaths
self.name = name
self.rank = rank
self.weapon = weapon
def to_dict(self):
return ({
u'key': self.key,
u'platformID': self.key,
u'kills': self.kills,
u'deaths': self.deaths,
u'name': self.name,
u'rank': self.rank,
u'weapon': self.weapon
})
|
{
"content_hash": "90179d92fb9dff189407bcf1e00ea924",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 63,
"avg_line_length": 30,
"alnum_prop": 0.44666666666666666,
"repo_name": "LeetCoinTeam/lc_tactoe",
"id": "bef063f8b0c1c1389190e50b23253f9be05f8f92",
"size": "601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "player.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1449"
},
{
"name": "HTML",
"bytes": "9353"
},
{
"name": "Python",
"bytes": "906908"
}
],
"symlink_target": ""
}
|
import sys
if '' not in sys.path:
sys.path.append('')
import time
import unittest
from pyactors.logs import file_logger
from pyactors.generator import GeneratorActor
from pyactors.thread import ThreadedGeneratorActor
from pyactors.exceptions import EmptyInboxException
from tests import TestGeneratorActor as TestActor
from tests import SenderGeneratorActor as SenderActor
from tests import ReceiverGeneratorActor as ReceiverActor
class ThreadedActor(ThreadedGeneratorActor):
''' ThreadedActor
'''
def __init__(self):
super(ThreadedActor, self).__init__()
self.result = 0
def loop(self):
for i in xrange(10):
if self.processing:
self.result += i
if self.parent is not None:
self.parent.send(self.result)
yield
else:
break
self.stop()
class LongRunningActor(ThreadedGeneratorActor):
''' LongRunningActor
'''
def __init__(self):
super(LongRunningActor, self).__init__()
self.result = 0
def loop(self):
while self.processing:
#for i in range(100):
self.result += 1
if self.parent is not None:
self.parent.send(self.result)
yield
self.stop()
class ThreadedSenderActor(ThreadedGeneratorActor):
''' Threaded Sender Actor
'''
def loop(self):
for actor in self.find(actor_name='Receiver'):
actor.send('message from sender')
self.stop()
class ThreadedReceiverActor(ThreadedGeneratorActor):
''' Threaded Receiver Actor
'''
def __init__(self, name=None):
super(ThreadedReceiverActor, self).__init__(name=name)
self.message = None
def loop(self):
while self.processing:
try:
self.message = self.inbox.get()
except EmptyInboxException:
self.waiting = True
yield
if self.message:
break
self.stop()
class ThreadedGeneratorActorTest(unittest.TestCase):
def test_incorrect_processing_value_set(self):
''' test_threaded_actors.test_incorrect_processing_value_set
'''
test_name = 'test_threaded_actors.test_incorrect_processing_value_set'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
actor = ThreadedActor()
try:
actor.processing = 1
except RuntimeError:
pass
def test_incorrect_waiting_value_set(self):
''' test_threaded_actors.test_incorrect_waiting_value_set
'''
test_name = 'test_threaded_actors.test_incorrect_waiting_value_set'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
actor = ThreadedActor()
try:
actor.waiting = 1
except RuntimeError:
pass
def test_set_waiting_flag(self):
''' test_threaded_actors.test_set_waiting_flag
'''
test_name = 'test_threaded_actors.test_set_waiting_flag'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
actor = ThreadedActor()
actor.waiting = True
self.assertEqual(actor.waiting, True)
def test_run(self):
''' test_threaded_actors.test_run
'''
test_name = 'test_threaded_actors.test_run'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
actor = ThreadedActor()
actor.start()
while actor.processing:
time.sleep(0.1)
self.assertEqual(actor.result, 45)
self.assertEqual(actor.processing, False)
self.assertEqual(actor.waiting, False)
def test_stop_in_the_middle(self):
''' test_threaded_actors.test_stop_in_the_middle
'''
test_name = 'test_threaded_actors.test_stop_in_the_middle'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
actor = LongRunningActor()
actor.start()
self.assertEqual(actor.processing, True)
time.sleep(0.1)
actor.stop()
self.assertGreater(actor.result, 0)
self.assertEqual(actor.processing, False)
self.assertEqual(actor.waiting, False)
def test_processing_with_children(self):
''' test_threaded_actors.test_processing_with_children
'''
test_name = 'test_threaded_actors.test_processing_with_children'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
parent = ThreadedActor()
for _ in range(5):
parent.add_child(TestActor())
parent.start()
while parent.processing:
time.sleep(0.1)
result = []
while True:
try:
result.append(parent.inbox.get())
except EmptyInboxException:
break
self.assertEqual(len(result), 50)
self.assertEqual(parent.processing, False)
self.assertEqual(parent.waiting, False)
def test_processing_with_diff_timelife_children(self):
''' test_threaded_actors.test_processing_with_diff_timelife_children
'''
test_name = 'test_threaded_actors.test_processing_with_diff_timelife_children'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
parent = ThreadedActor()
for i in range(5):
parent.add_child(TestActor(iters=i))
parent.start()
while parent.processing:
time.sleep(0.1)
result = []
while True:
try:
result.append(parent.inbox.get())
except EmptyInboxException:
break
self.assertEqual(result, [0,0,0,0,1,1,1,3,3,6])
self.assertEqual(len(result), 10)
self.assertEqual(parent.processing, False)
self.assertEqual(parent.waiting, False)
def test_send_msg_between_actors(self):
''' test_threaded_actors.test_send_msg_between_actors
'''
test_name = 'test_threaded_actors.test_send_msg_between_actors'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
parent = ThreadedActor()
parent.add_child(SenderActor(name='Sender'))
parent.add_child(ReceiverActor(name='Receiver'))
parent.start()
while parent.processing:
time.sleep(0.1)
parent.stop()
self.assertEqual(parent.inbox.get(), 'message from sender')
def test_threaded_actor_in_actor(self):
''' test_threaded_actors.test_threaded_actor_in_actor
'''
test_name = 'test_threaded_actors.test_threaded_actor_in_actor'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
parent = ThreadedActor()
parent.add_child(ThreadedActor())
parent.add_child(ThreadedActor())
parent.start()
while parent.processing:
time.sleep(0.1)
parent.stop()
self.assertEqual([child.result for child in parent.children], [45,45])
self.assertEqual(parent.processing, False)
self.assertEqual(parent.waiting, False)
def test_send_msg_between_threaded_actors(self):
''' test_threaded_actors.test_send_msg_between_threaded_actors
'''
test_name = 'test_threaded_actors.test_send_msg_between_threaded_actors'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
parent = TestActor()
parent.add_child(ThreadedSenderActor(name='Sender'))
parent.add_child(ThreadedReceiverActor(name='Receiver'))
parent.start()
parent.run()
parent.stop()
self.assertEqual(
[actor.message for actor in parent.find(actor_name='Receiver')],
['message from sender']
)
def test_send_msg_between_threaded_actors_in_thread(self):
''' test_threaded_actors.test_send_msg_between_threaded_actors_in_thread
'''
test_name = 'test_threaded_actors.test_send_msg_between_threaded_actors_in_thread'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
parent = ThreadedActor()
parent.add_child(ThreadedSenderActor(name='Sender'))
parent.add_child(ThreadedReceiverActor(name='Receiver'))
parent.start()
while parent.processing:
time.sleep(0.1)
parent.stop()
self.assertEqual(
[actor.message for actor in parent.find(actor_name='Receiver')],
['message from sender']
)
|
{
"content_hash": "f56be74fa7b8b4afcc96ae920f7297d5",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 90,
"avg_line_length": 34.05791505791506,
"alnum_prop": 0.5906359823149303,
"repo_name": "snakeego/pyactors",
"id": "999119d47eedb85ceabeeb903237cc5021eb502b",
"size": "8821",
"binary": false,
"copies": "1",
"ref": "refs/heads/py3-master",
"path": "tests/test_threaded_actors.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "441"
},
{
"name": "Python",
"bytes": "80377"
}
],
"symlink_target": ""
}
|
import hec.heclib
import hec.io
import os
from datetime import datetime
from hec.heclib.util import HecTime
from os import path
def relativeFolder(folder, dssFilePath, createFolder='ifrelative'):
"""
Return an absolute path to ``folder`` relative to ``dssFilePath``.
If the path ``folder`` is already absolute, it will simply return the path.
``createFolder`` is one of 'ifrelative', 'ifabsolute' or 'allways'.
"""
if path.isabs(folder):
absPath = folder
if not path.isdir(absPath) and createFolder.lower() in ['allways', 'ifabsolute']:
os.mkdir(absPath)
else:
absPath = path.join(path.dirname(dssFilePath), folder)
if not path.isdir(absPath) and createFolder.lower() in ['allways', 'ifrelative']:
os.mkdir(absPath)
return absPath
class ValidationError(Exception):
"""An error while validating data."""
class CancelledError(Exception):
"""Operation cancellation/interruption by the user."""
def _tscFromRecord(record):
"""
Convert simple records object to HEC timeseries container
:param record: Record object
:type record: :class:`monitoring.Record`
"""
tsc = hec.io.TimeSeriesContainer()
tsc.watershed = record.site
tsc.location = record.location
tsc.parameter = record.parameter
tsc.version = record.version
tsc.interval = record.interval
tsc.fullName = record.fullName
tsc.values = record.values
if not record.qualities is None:
tsc.quality = record.qualities
tsc.times = record.times
tsc.startTime = record.startTime
tsc.endTime = record.endTime
tsc.numberValues = len(record)
tsc.units = record.units
tsc.type = record.type
return tsc
def saveRecords(records, dssFilePath):
"""
Save simple record objects to DSS file
:param record: Record object
:type record: :class:`monitoring.Record`
:param dssFilePath: HEC-DSS database to save record to
:param dssFilePath: str
:return: Number of records saved
:rtype: int
"""
saved = 0
try:
dssFile = hec.heclib.dss.HecDss.open(dssFilePath)
for record in records:
dssFile.put(_tscFromRecord(record))
saved += 1
finally:
dssFile.close()
return saved
def parseMeasurement(valueStr):
"""
Return numeric value of measurement string and quality flag as tuple.
If ``valueStr`` starts with ``<`` (i.e. below limit of detection), the
returned value is 50% of the value after the ``<``.
"""
# HEC quality flags
TESTED = 1 << 0
VALID = 1 << 1
MISSING = 1 << 3
CHANGED = 1 << 7
USER_DEF = 1 << 24
try:
return float(valueStr), TESTED | VALID
except ValueError:
if valueStr.strip().startswith('<'):
return float(valueStr.strip(' <')) * 0.5, TESTED | VALID | CHANGED | USER_DEF
else:
return None, TESTED | MISSING
def parseDateTime(dateStr, timeStr, dateFmt='%Y/%m/%d'):
"""
Return HecTime from date and time strings.
Time format is always `%H:%M:%S`.
"""
pyDate = datetime.strptime(dateStr + timeStr, dateFmt + "%H:%M:%S")
# Create HecTime from USA date format
return HecTime(pyDate.strftime("%m/%d/%Y %H:%M:%S"))
|
{
"content_hash": "2b95c6073fae35aed82f412a5fe68322",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 89,
"avg_line_length": 28.895652173913042,
"alnum_prop": 0.6370749322900993,
"repo_name": "jprine/monitoring-module",
"id": "56041444d80b946d04cfb7ccff2e901291ce4bc6",
"size": "3347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/toolbox/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "NSIS",
"bytes": "1585"
},
{
"name": "PowerShell",
"bytes": "615"
},
{
"name": "Python",
"bytes": "316882"
}
],
"symlink_target": ""
}
|
"""Fichier contenant le paramètre 'supprimer' de la commande 'groupe inclus'.
"""
from primaires.interpreteur.masque.parametre import Parametre
from primaires.interpreteur.masque.exceptions.erreur_interpretation import \
ErreurInterpretation
class PrmInclusSupprimer(Parametre):
"""Commande 'groupe inclus ajouter'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "supprimer", "del")
self.schema = "<groupe1:groupe_existant> <groupe2:groupe_existant>"
self.aide_courte = "supprime un groupe inclus"
self.aide_longue = \
"Cette commande permet de supprimer un groupe inclus. Le " \
"premier groupe à entrer est celui dans lequel on doit " \
"supprimer le second groupe précisé. Exemple : %groupe% " \
"%groupe:inclus% %groupe:inclus:supprimer% |cmd|administrateur " \
"joueur|ff| supprimera le groupe |tit|joueur|ff| des groupes " \
"inclus de |tit|administrateur|ff|."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
nom_groupe = dic_masques["groupe1"].nom_groupe
groupe = type(self).importeur.interpreteur.groupes[nom_groupe]
nom_a_supprimer = dic_masques["groupe2"].nom_groupe
if nom_a_supprimer not in groupe.groupes_inclus:
raise ErreurInterpretation(
"|err|Le groupe {} n'est pas inclus dans {}.|ff|".format(
nom_a_supprimer, nom_groupe))
groupe.supprimer_groupe_inclus(nom_a_supprimer)
personnage << "Le groupe {} a bien été supprimé de {}.".format(
nom_a_supprimer, nom_groupe)
|
{
"content_hash": "b52ee55f89d4399642d8b49de5857995",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 78,
"avg_line_length": 42.46341463414634,
"alnum_prop": 0.630097645031591,
"repo_name": "vlegoff/tsunami",
"id": "10e89604e759466cec9353f2114792ba9bc9202b",
"size": "3321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/joueur/commandes/groupe/inclus_supprimer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name='quokka_module_template')
|
{
"content_hash": "523aa75bcf46104ad9c9d0ddbc1d87c1",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 36,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.8059701492537313,
"repo_name": "abnerpc/quokka",
"id": "eb94a75612c42c7943289883b60e1bd94bbeb06d",
"size": "67",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "quokka/module_template/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "104"
},
{
"name": "CSS",
"bytes": "32332"
},
{
"name": "HTML",
"bytes": "119354"
},
{
"name": "JavaScript",
"bytes": "494398"
},
{
"name": "Makefile",
"bytes": "503"
},
{
"name": "Python",
"bytes": "199573"
},
{
"name": "Shell",
"bytes": "12305"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from open_news.models import NewsWebsite, Article
class NewsWebsiteAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'url_', 'scraper')
list_display_links = ('name',)
def url_(self, instance):
return '<a href="%s" target="_blank">%s</a>' % (instance.url, instance.url)
url_.allow_tags = True
class ArticleAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'news_website', 'url_',)
list_display_links = ('title',)
raw_id_fields = ('checker_runtime',)
def url_(self, instance):
return '<a href="%s" target="_blank">%s</a>' % (instance.url, instance.url)
url_.allow_tags = True
admin.site.register(NewsWebsite, NewsWebsiteAdmin)
admin.site.register(Article, ArticleAdmin)
|
{
"content_hash": "a5507638e2de87c29fc2c5e911612c1f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 83,
"avg_line_length": 35.45454545454545,
"alnum_prop": 0.6474358974358975,
"repo_name": "kholidfu/django-dynamic-scraper",
"id": "c5f5a0140ccc6ba739d01db4eff8ffc70ba8f76d",
"size": "780",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "example_project/open_news/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "7060"
},
{
"name": "Python",
"bytes": "200285"
},
{
"name": "Shell",
"bytes": "4479"
}
],
"symlink_target": ""
}
|
from sqlalchemy import create_engine
def _get_version(conn):
# this is the suggested way of finding the mode, from
# https://python-oracledb.readthedocs.io/en/latest/user_guide/tracing.html#vsessconinfo
sql = (
"SELECT UNIQUE CLIENT_DRIVER "
"FROM V$SESSION_CONNECT_INFO "
"WHERE SID = SYS_CONTEXT('USERENV', 'SID')"
)
return conn.exec_driver_sql(sql).scalar()
def run_thin_mode(url, queue):
e = create_engine(url)
with e.connect() as conn:
res = _get_version(conn)
queue.put((res, e.dialect.is_thin_mode(conn)))
e.dispose()
def run_thick_mode(url, queue):
e = create_engine(url, thick_mode={"driver_name": "custom-driver-name"})
with e.connect() as conn:
res = _get_version(conn)
queue.put((res, e.dialect.is_thin_mode(conn)))
e.dispose()
|
{
"content_hash": "a3b9dc7d8b53f7b39288667e69765bf6",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 91,
"avg_line_length": 30.214285714285715,
"alnum_prop": 0.6335697399527187,
"repo_name": "sqlalchemy/sqlalchemy",
"id": "21743d6ec73408ae39d2574ef3afb1bb03214465",
"size": "957",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "test/dialect/oracle/_oracledb_mode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "21698"
},
{
"name": "Python",
"bytes": "16838583"
}
],
"symlink_target": ""
}
|
class ClassRegistry(type):
"""Maintain a registry of classes, indexed by name.
Note that this implementation requires that the names be unique, as it uses
a dictionary to hold the classes by name.
The name in the registry can be overridden via the 'name' attribute of the
class, and the 'priority' attribute controls priority. The prioritized()
method returns the registered classes in priority order.
Subclasses of ClassRegistry may define an 'implemented' property to exert
control over whether the class will be added to the registry (e.g. to keep
abstract base classes out of the registry)."""
priority = 0
class __metaclass__(type):
"""Give each ClassRegistry their own registry"""
def __init__(cls, name, bases, attrs):
cls.registry = {}
type.__init__(cls, name, bases, attrs)
def __init__(cls, name, bases, attrs):
super(ClassRegistry, cls).__init__(name, bases, attrs)
try:
if not cls.implemented:
return
except AttributeError:
pass
try:
cls.name
except AttributeError:
cls.name = name
cls.registry[cls.name] = cls
@classmethod
def prioritized(tcls):
return sorted(tcls.registry.values(),
key=lambda v: v.priority, reverse=True)
def unregister(cls):
for key in cls.registry.keys():
if cls.registry[key] is cls:
del cls.registry[key]
|
{
"content_hash": "a5b79873507a3ff9bf1f5e3ea3bf2b11",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 75,
"avg_line_length": 34.74418604651163,
"alnum_prop": 0.6311914323962516,
"repo_name": "PhiInnovations/mdp28-linux-bsp",
"id": "58188fdd6e09fa6e7ad78ca1f7f497e2d15839c4",
"size": "1494",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "meta/lib/oe/classutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1488"
},
{
"name": "BlitzBasic",
"bytes": "2596257"
},
{
"name": "C",
"bytes": "1103490"
},
{
"name": "C++",
"bytes": "262815"
},
{
"name": "CSS",
"bytes": "128751"
},
{
"name": "D",
"bytes": "3548"
},
{
"name": "JavaScript",
"bytes": "2692"
},
{
"name": "Lua",
"bytes": "1194"
},
{
"name": "Objective-C",
"bytes": "251196"
},
{
"name": "PHP",
"bytes": "1480"
},
{
"name": "Perl",
"bytes": "106164"
},
{
"name": "Python",
"bytes": "2428143"
},
{
"name": "Ruby",
"bytes": "692"
},
{
"name": "Shell",
"bytes": "774669"
},
{
"name": "VimL",
"bytes": "8401"
},
{
"name": "XSLT",
"bytes": "208760"
}
],
"symlink_target": ""
}
|
import roslib; roslib.load_manifest('beginner_tutorials')
import rospy
from std_msgs.msg import String
def talker():
pub = rospy.Publisher('chatter', String)
rospy.init_node('talker')
while not rospy.is_shutdown():
str = "hello world %s" % rospy.get_time()
rospy.loginfo(str)
pub.publish(String(str))
rospy.sleep(1.0)
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
|
{
"content_hash": "b9360471fc2a427ca1a5f5ac37d47f4f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 57,
"avg_line_length": 23.6,
"alnum_prop": 0.6186440677966102,
"repo_name": "rhogroup/simple",
"id": "33220871b96bccf30f97235796692379e16df334",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/talker.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5973"
},
{
"name": "C++",
"bytes": "49599"
},
{
"name": "Python",
"bytes": "9845"
},
{
"name": "Shell",
"bytes": "95"
}
],
"symlink_target": ""
}
|
""" Tests of the chemistry utilities
:Author: Jonathan Karr <jonrkarr@gmail.com>
:Date: 2018-02-07
:Copyright: 2018, Karr Lab
:License: MIT
"""
from wc_utils.util import chem
import attrdict
import openbabel
import unittest
class EmpiricalFormulaTestCase(unittest.TestCase):
def test_EmpiricalFormula_constructor(self):
f = chem.EmpiricalFormula()
self.assertEqual(f, {})
f = chem.EmpiricalFormula('H')
self.assertEqual(f, {'H': 1})
f = chem.EmpiricalFormula('H2')
self.assertEqual(f, {'H': 2})
f = chem.EmpiricalFormula('H2.5')
self.assertEqual(f, {'H': 2.5})
f = chem.EmpiricalFormula('H2.5e3')
self.assertEqual(f, {'H': 2.5e3})
f = chem.EmpiricalFormula('H-2.5e3')
self.assertEqual(f, {'H': -2.5e3})
f = chem.EmpiricalFormula('H2.5e+3')
self.assertEqual(f, {'H': 2.5e3})
f = chem.EmpiricalFormula('H2.5e-3')
self.assertEqual(f, {'H': 2.5e-3})
f = chem.EmpiricalFormula('He2')
self.assertEqual(f, {'He': 2})
f = chem.EmpiricalFormula('He-2')
self.assertEqual(f, {'He': -2})
f = chem.EmpiricalFormula('He-20')
self.assertEqual(f, {'He': -20})
f = chem.EmpiricalFormula('H2O')
self.assertEqual(f, {'H': 2, 'O': 1})
f = chem.EmpiricalFormula('He-20He30')
self.assertEqual(f, {'He': 10})
f = chem.EmpiricalFormula('RaRb')
self.assertEqual(f, {'Ra': 1, 'Rb': 1})
f = chem.EmpiricalFormula(attrdict.AttrDict({'Ra': 1, 'Rb': 1}))
self.assertEqual(f, {'Ra': 1, 'Rb': 1})
f = chem.EmpiricalFormula(attrdict.AttrDefault(int, {'Ra': 1, 'Rb': 1}))
self.assertEqual(f, {'Ra': 1, 'Rb': 1})
f = chem.EmpiricalFormula(chem.EmpiricalFormula('RaRb'))
self.assertEqual(f, {'Ra': 1, 'Rb': 1})
with self.assertRaisesRegex(ValueError, 'not a valid formula'):
chem.EmpiricalFormula('Hee2')
with self.assertRaisesRegex(ValueError, 'not a valid formula'):
chem.EmpiricalFormula('h2')
def test_EmpiricalFormula_get_attr(self):
f = chem.EmpiricalFormula()
self.assertEqual(f.C, 0)
self.assertEqual(f['C'], 0)
def test_EmpiricalFormula___setitem__(self):
f = chem.EmpiricalFormula()
f.C = 0
self.assertEqual(f, {})
self.assertEqual(dict(f), {})
self.assertEqual(str(f), '')
f = chem.EmpiricalFormula()
f.A = 1
self.assertEqual(f, {'A': 1})
f.A = 0
self.assertEqual(f, {})
self.assertEqual(dict(f), {})
self.assertEqual(str(f), '')
f.A = 1.5
self.assertEqual(f, {'A': 1.5})
f = chem.EmpiricalFormula()
with self.assertRaisesRegex(ValueError, 'Coefficient must be a float'):
f.A = 'a'
f = chem.EmpiricalFormula()
with self.assertRaisesRegex(ValueError, 'Element must be a one or two letter string'):
f.Aaa = 1
def test_EmpiricalFormula_get_molecular_weight(self):
f = chem.EmpiricalFormula('H2O')
self.assertAlmostEqual(f.get_molecular_weight(), 18.015)
def test_EmpiricalFormula___add__(self):
f = chem.EmpiricalFormula('H2O')
g = chem.EmpiricalFormula('HO')
self.assertEqual(str(f + g), 'H3O2')
self.assertEqual(str(f + 'HO'), 'H3O2')
def test_EmpiricalFormula___sub__(self):
f = chem.EmpiricalFormula('H2O')
g = chem.EmpiricalFormula('HO')
self.assertEqual(str(f - g), 'H')
self.assertEqual(str(f - 'HO'), 'H')
def test_EmpiricalFormula___mul__(self):
f = chem.EmpiricalFormula('H2O')
self.assertEqual(str(f * 2), 'H4O2')
def test_EmpiricalFormula___truediv__(self):
f = chem.EmpiricalFormula('H4O2')
self.assertEqual(f / 2, chem.EmpiricalFormula({'H': 2, 'O': 1}))
def test_EmpiricalFormula___str__(self):
f = chem.EmpiricalFormula('H2O')
self.assertEqual(str(f), 'H2O')
f = chem.EmpiricalFormula('OH2')
self.assertEqual(str(f), 'H2O')
f = chem.EmpiricalFormula('N0OH2')
self.assertEqual(str(f), 'H2O')
f = chem.EmpiricalFormula('H2O1.1')
self.assertEqual(str(f), 'H2O1.1')
f = chem.EmpiricalFormula('H2O1.1e-3')
self.assertEqual(str(f), 'H2O0.0011')
f = chem.EmpiricalFormula('H2O1.1e+3')
self.assertEqual(str(f), 'H2O1100')
f = chem.EmpiricalFormula('H2O-1.1e+3')
self.assertEqual(str(f), 'H2O-1100')
def test_EmpiricalFormula___contains__(self):
f = chem.EmpiricalFormula('H2O')
self.assertIn('H', f)
self.assertIn('C', f)
self.assertNotIn('Ccc', f)
def test_EmpiricalFormula___hash__(self):
f = chem.EmpiricalFormula('H2O')
g = chem.EmpiricalFormula('H2O')
h = chem.EmpiricalFormula('H')
self.assertIn(f, [g])
self.assertIn(f, set([g]))
self.assertIn(f, {g: True})
self.assertNotIn(f, [h])
self.assertNotIn(f, set([h]))
self.assertNotIn(f, {h: True})
class OpenBabelUtilsTestCase(unittest.TestCase):
def test_get_formula(self):
gly_inchi = 'InChI=1S/C2H5NO2/c3-1-2(4)5/h1,3H2,(H,4,5)'
gly_formula = 'C2H5NO2'
mol = openbabel.OBMol()
conversion = openbabel.OBConversion()
conversion.SetInFormat('inchi')
conversion.ReadString(mol, gly_inchi)
self.assertEqual(chem.OpenBabelUtils.get_formula(mol), chem.EmpiricalFormula('C2H5NO2'))
def test_get_inchi(self):
gly_inchi = 'InChI=1S/C2H5NO2/c3-1-2(4)5/h1,3H2,(H,4,5)'
mol = openbabel.OBMol()
conversion = openbabel.OBConversion()
conversion.SetInFormat('inchi')
conversion.ReadString(mol, gly_inchi)
self.assertEqual(chem.OpenBabelUtils.get_inchi(mol), gly_inchi)
def test_export(self):
gly_smiles = 'C([N+])C([O-])=O'
mol = openbabel.OBMol()
conversion = openbabel.OBConversion()
conversion.SetInFormat('can')
conversion.ReadString(mol, gly_smiles)
self.assertEqual(chem.OpenBabelUtils.export(mol, 'smi'), 'C([N+])C(=O)[O-]')
self.assertEqual(chem.OpenBabelUtils.export(mol, 'smi', options=('c',)), '[O-]C(=O)C[N+]')
gly_inchi = 'InChI=1S/C2H5NO2/c3-1-2(4)5/h1,3H2,(H,4,5)'
mol = openbabel.OBMol()
conversion = openbabel.OBConversion()
conversion.SetInFormat('inchi')
conversion.ReadString(mol, gly_inchi)
self.assertEqual(chem.OpenBabelUtils.export(mol, 'inchi'), gly_inchi)
self.assertTrue(chem.OpenBabelUtils.export(mol, 'mol', options='m').endswith('END'))
|
{
"content_hash": "98b2d5274656a71a7cb360eb463b71ac",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 98,
"avg_line_length": 32.88292682926829,
"alnum_prop": 0.5895267764426643,
"repo_name": "KarrLab/wc_utils",
"id": "360ace8875cd29af2d5d4fc3b540dc758c14d22f",
"size": "6741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/util/chem/test_chem_core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "6784"
},
{
"name": "Python",
"bytes": "415285"
},
{
"name": "Shell",
"bytes": "213"
}
],
"symlink_target": ""
}
|
from collections import deque
from time import time
from twisted.application.service import Service
from twisted.internet import reactor
from twisted.internet.defer import Deferred, DeferredList
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.protocols.basic import LineOnlyReceiver, Int32StringReceiver
from carbon.conf import settings
from carbon.util import pickle
from carbon import instrumentation, log, pipeline, state
from carbon.util import PluginRegistrar
try:
import signal
except ImportError:
log.debug("Couldn't import signal module")
SEND_QUEUE_LOW_WATERMARK = settings.MAX_QUEUE_SIZE * settings.QUEUE_LOW_WATERMARK_PCT
class CarbonClientProtocol(object):
def connectionMade(self):
log.clients("%s::connectionMade" % self)
self.paused = False
self.connected = True
self.transport.registerProducer(self, streaming=True)
# Define internal metric names
self.lastResetTime = time()
self.destinationName = self.factory.destinationName
self.queuedUntilReady = 'destinations.%s.queuedUntilReady' % self.destinationName
self.sent = 'destinations.%s.sent' % self.destinationName
self.batchesSent = 'destinations.%s.batchesSent' % self.destinationName
self.slowConnectionReset = 'destinations.%s.slowConnectionReset' % self.destinationName
self.factory.connectionMade.callback(self)
self.factory.connectionMade = Deferred()
self.sendQueued()
def connectionLost(self, reason):
log.clients("%s::connectionLost %s" % (self, reason.getErrorMessage()))
self.connected = False
def pauseProducing(self):
self.paused = True
def resumeProducing(self):
self.paused = False
self.sendQueued()
def stopProducing(self):
self.disconnect()
def disconnect(self):
if self.connected:
self.transport.unregisterProducer()
self.transport.loseConnection()
self.connected = False
def sendDatapoint(self, metric, datapoint):
self.factory.enqueue(metric, datapoint)
self.factory.scheduleSend()
def _sendDatapointsNow(self, datapoints):
"""Implement this function to actually send datapoints."""
raise NotImplementedError()
def sendDatapointsNow(self, datapoints):
self._sendDatapointsNow(datapoints)
instrumentation.increment(self.sent, len(datapoints))
instrumentation.increment(self.batchesSent)
self.factory.checkQueue()
def sendQueued(self):
"""This should be the only method that will be used to send stats.
In order to not hold the event loop and prevent stats from flowing
in while we send them out, this will process
settings.MAX_DATAPOINTS_PER_MESSAGE stats, send them, and if there
are still items in the queue, this will invoke reactor.callLater
to schedule another run of sendQueued after a reasonable enough time
for the destination to process what it has just received.
Given a queue size of one million stats, and using a
chained_invocation_delay of 0.0001 seconds, you'd get 1,000
sendQueued() invocations/second max. With a
settings.MAX_DATAPOINTS_PER_MESSAGE of 100, the rate of stats being
sent could theoretically be as high as 100,000 stats/sec, or
6,000,000 stats/minute. This is probably too high for a typical
receiver to handle.
In practice this theoretical max shouldn't be reached because
network delays should add an extra delay - probably on the order
of 10ms per send, so the queue should drain with an order of
minutes, which seems more realistic.
"""
queueSize = self.factory.queueSize
if self.paused:
instrumentation.max(self.queuedUntilReady, queueSize)
return
if not self.factory.hasQueuedDatapoints():
return
if settings.USE_RATIO_RESET is True:
if not self.connectionQualityMonitor():
self.resetConnectionForQualityReasons("Sent: {0}, Received: {1}".format(
instrumentation.prior_stats.get(self.sent, 0),
instrumentation.prior_stats.get('metricsReceived', 0)))
self.sendDatapointsNow(self.factory.takeSomeFromQueue())
if (self.factory.queueFull.called and
queueSize < SEND_QUEUE_LOW_WATERMARK):
if not self.factory.queueHasSpace.called:
self.factory.queueHasSpace.callback(queueSize)
if self.factory.hasQueuedDatapoints():
self.factory.scheduleSend()
def connectionQualityMonitor(self):
"""Checks to see if the connection for this factory appears to
be delivering stats at a speed close to what we're receiving
them at.
This is open to other measures of connection quality.
Returns a Bool
True means that quality is good, OR
True means that the total received is less than settings.MIN_RESET_STAT_FLOW
False means that quality is bad
"""
destination_sent = float(instrumentation.prior_stats.get(self.sent, 0))
total_received = float(instrumentation.prior_stats.get('metricsReceived', 0))
instrumentation.increment(self.slowConnectionReset, 0)
if total_received < settings.MIN_RESET_STAT_FLOW:
return True
if (destination_sent / total_received) < settings.MIN_RESET_RATIO:
return False
else:
return True
def resetConnectionForQualityReasons(self, reason):
"""Only re-sets the connection if it's been
settings.MIN_RESET_INTERVAL seconds since the last re-set.
Reason should be a string containing the quality info that led to
a re-set.
"""
if (time() - self.lastResetTime) < float(settings.MIN_RESET_INTERVAL):
return
else:
self.factory.connectedProtocol.disconnect()
self.lastResetTime = time()
instrumentation.increment(self.slowConnectionReset)
log.clients("%s:: resetConnectionForQualityReasons: %s" % (self, reason))
def __str__(self):
return 'CarbonClientProtocol(%s:%d:%s)' % (self.factory.destination)
__repr__ = __str__
class CarbonClientFactory(object, ReconnectingClientFactory):
__metaclass__ = PluginRegistrar
plugins = {}
maxDelay = 5
def __init__(self, destination):
self.destination = destination
self.destinationName = ('%s:%d:%s' % destination).replace('.', '_')
self.host, self.port, self.carbon_instance = destination
self.addr = (self.host, self.port)
self.started = False
# This factory maintains protocol state across reconnects
self.queue = deque() # Change to make this the sole source of metrics to be sent.
self.connectedProtocol = None
self.queueEmpty = Deferred()
self.queueFull = Deferred()
self.queueFull.addCallback(self.queueFullCallback)
self.queueHasSpace = Deferred()
self.queueHasSpace.addCallback(self.queueSpaceCallback)
self.connectFailed = Deferred()
self.connectionMade = Deferred()
self.connectionLost = Deferred()
self.deferSendPending = None
# Define internal metric names
self.attemptedRelays = 'destinations.%s.attemptedRelays' % self.destinationName
self.fullQueueDrops = 'destinations.%s.fullQueueDrops' % self.destinationName
self.queuedUntilConnected = 'destinations.%s.queuedUntilConnected' % self.destinationName
self.relayMaxQueueLength = 'destinations.%s.relayMaxQueueLength' % self.destinationName
def clientProtocol(self):
raise NotImplementedError()
def scheduleSend(self):
if self.deferSendPending and self.deferSendPending.active():
return
self.deferSendPending = reactor.callLater(settings.TIME_TO_DEFER_SENDING, self.sendQueued)
def sendQueued(self):
if self.connectedProtocol:
self.connectedProtocol.sendQueued()
def queueFullCallback(self, result):
state.events.cacheFull()
log.clients('%s send queue is full (%d datapoints)' % (self, result))
def queueSpaceCallback(self, result):
if self.queueFull.called:
log.clients('%s send queue has space available' % self.connectedProtocol)
self.queueFull = Deferred()
self.queueFull.addCallback(self.queueFullCallback)
state.events.cacheSpaceAvailable()
self.queueHasSpace = Deferred()
self.queueHasSpace.addCallback(self.queueSpaceCallback)
def buildProtocol(self, addr):
self.connectedProtocol = self.clientProtocol()
self.connectedProtocol.factory = self
return self.connectedProtocol
def startConnecting(self): # calling this startFactory yields recursion problems
self.started = True
self.connector = reactor.connectTCP(self.host, self.port, self)
def stopConnecting(self):
self.started = False
self.stopTrying()
if self.connectedProtocol and self.connectedProtocol.connected:
return self.connectedProtocol.disconnect()
@property
def queueSize(self):
return len(self.queue)
def hasQueuedDatapoints(self):
return bool(self.queue)
def takeSomeFromQueue(self):
"""Use self.queue, which is a collections.deque, to pop up to
settings.MAX_DATAPOINTS_PER_MESSAGE items from the left of the
queue.
"""
def yield_max_datapoints():
for count in range(settings.MAX_DATAPOINTS_PER_MESSAGE):
try:
yield self.queue.popleft()
except IndexError:
raise StopIteration
return list(yield_max_datapoints())
def checkQueue(self):
"""Check if the queue is empty. If the queue isn't empty or
doesn't exist yet, then this will invoke the callback chain on the
self.queryEmpty Deferred chain with the argument 0, and will
re-set the queueEmpty callback chain with a new Deferred
object.
"""
if not self.queue:
self.queueEmpty.callback(0)
self.queueEmpty = Deferred()
def enqueue(self, metric, datapoint):
self.queue.append((metric, datapoint))
def enqueue_from_left(self, metric, datapoint):
self.queue.appendleft((metric, datapoint))
def sendDatapoint(self, metric, datapoint):
instrumentation.increment(self.attemptedRelays)
instrumentation.max(self.relayMaxQueueLength, self.queueSize)
if self.queueSize >= settings.MAX_QUEUE_SIZE:
if not self.queueFull.called:
self.queueFull.callback(self.queueSize)
instrumentation.increment(self.fullQueueDrops)
else:
self.enqueue(metric, datapoint)
if self.connectedProtocol:
self.scheduleSend()
else:
instrumentation.increment(self.queuedUntilConnected)
def sendHighPriorityDatapoint(self, metric, datapoint):
"""The high priority datapoint is one relating to the carbon
daemon itself. It puts the datapoint on the left of the deque,
ahead of other stats, so that when the carbon-relay, specifically,
is overwhelmed its stats are more likely to make it through and
expose the issue at hand.
In addition, these stats go on the deque even when the max stats
capacity has been reached. This relies on not creating the deque
with a fixed max size.
"""
instrumentation.increment(self.attemptedRelays)
self.enqueue_from_left(metric, datapoint)
if self.connectedProtocol:
self.scheduleSend()
else:
instrumentation.increment(self.queuedUntilConnected)
def startedConnecting(self, connector):
log.clients("%s::startedConnecting (%s:%d)" % (self, connector.host, connector.port))
def clientConnectionLost(self, connector, reason):
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
log.clients("%s::clientConnectionLost (%s:%d) %s" % (self, connector.host, connector.port, reason.getErrorMessage()))
self.connectedProtocol = None
self.connectionLost.callback(0)
self.connectionLost = Deferred()
def clientConnectionFailed(self, connector, reason):
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
log.clients("%s::clientConnectionFailed (%s:%d) %s" % (self, connector.host, connector.port, reason.getErrorMessage()))
self.connectFailed.callback(dict(connector=connector, reason=reason))
self.connectFailed = Deferred()
def disconnect(self):
self.queueEmpty.addCallback(lambda result: self.stopConnecting())
readyToStop = DeferredList(
[self.connectionLost, self.connectFailed],
fireOnOneCallback=True,
fireOnOneErrback=True)
self.checkQueue()
# This can happen if the client is stopped before a connection is ever made
if (not readyToStop.called) and (not self.started):
readyToStop.callback(None)
return readyToStop
def __str__(self):
return 'CarbonClientFactory(%s:%d:%s)' % self.destination
__repr__ = __str__
# Basic clients and associated factories.
class CarbonPickleClientProtocol(CarbonClientProtocol, Int32StringReceiver):
def _sendDatapointsNow(self, datapoints):
self.sendString(pickle.dumps(datapoints, protocol=-1))
class CarbonPickleClientFactory(CarbonClientFactory):
plugin_name = "pickle"
def clientProtocol(self):
return CarbonPickleClientProtocol()
class CarbonLineClientProtocol(CarbonClientProtocol, LineOnlyReceiver):
def _sendDatapointsNow(self, datapoints):
for metric, datapoint in datapoints:
self.sendLine("%s %s %d" % (metric, datapoint[0], datapoint[1]))
class CarbonLineClientFactory(CarbonClientFactory):
plugin_name = "line"
def clientProtocol(self):
return CarbonLineClientProtocol()
class CarbonClientManager(Service):
def __init__(self, router):
self.router = router
self.client_factories = {} # { destination : CarbonClientFactory() }
def createFactory(self, destination):
from carbon.conf import settings
factory_name = settings["DESTINATION_PROTOCOL"]
factory_class = CarbonClientFactory.plugins.get(factory_name)
if not factory_class:
print ("In carbon.conf, DESTINATION_PROTOCOL must be one of %s. "
"Invalid value: '%s'" % (', '.join(CarbonClientFactory.plugins), factory_name))
raise SystemExit(1)
return factory_class(destination)
def startService(self):
if 'signal' in globals().keys():
log.debug("Installing SIG_IGN for SIGHUP")
signal.signal(signal.SIGHUP, signal.SIG_IGN)
Service.startService(self)
for factory in self.client_factories.values():
if not factory.started:
factory.startConnecting()
def stopService(self):
Service.stopService(self)
return self.stopAllClients()
def startClient(self, destination):
if destination in self.client_factories:
return
log.clients("connecting to carbon daemon at %s:%d:%s" % destination)
self.router.addDestination(destination)
factory = self.createFactory(destination)
self.client_factories[destination] = factory
connectAttempted = DeferredList(
[factory.connectionMade, factory.connectFailed],
fireOnOneCallback=True,
fireOnOneErrback=True)
if self.running:
factory.startConnecting() # this can trigger & replace connectFailed
return connectAttempted
def stopClient(self, destination):
factory = self.client_factories.get(destination)
if factory is None:
return
self.router.removeDestination(destination)
stopCompleted = factory.disconnect()
stopCompleted.addCallback(lambda result: self.disconnectClient(destination))
return stopCompleted
def disconnectClient(self, destination):
factory = self.client_factories.pop(destination)
c = factory.connector
if c and c.state == 'connecting' and not factory.hasQueuedDatapoints():
c.stopConnecting()
def stopAllClients(self):
deferreds = []
for destination in list(self.client_factories):
deferreds.append(self.stopClient(destination))
return DeferredList(deferreds)
def sendDatapoint(self, metric, datapoint):
for destination in self.router.getDestinations(metric):
self.client_factories[destination].sendDatapoint(metric, datapoint)
def sendHighPriorityDatapoint(self, metric, datapoint):
for destination in self.router.getDestinations(metric):
self.client_factories[destination].sendHighPriorityDatapoint(metric, datapoint)
def __str__(self):
return "<%s[%x]>" % (self.__class__.__name__, id(self))
class RelayProcessor(pipeline.Processor):
plugin_name = 'relay'
def process(self, metric, datapoint):
state.client_manager.sendDatapoint(metric, datapoint)
return pipeline.Processor.NO_OUTPUT
|
{
"content_hash": "daf0e6cade745378f07b657879d5fe3f",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 123,
"avg_line_length": 35.407407407407405,
"alnum_prop": 0.727602756583805,
"repo_name": "iain-buclaw-sociomantic/carbon",
"id": "2d9dd8fbe79d18caa62cdfef11f8a6fd52867297",
"size": "16252",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/carbon/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "261"
},
{
"name": "Python",
"bytes": "242442"
},
{
"name": "Shell",
"bytes": "14106"
}
],
"symlink_target": ""
}
|
import json
from datetime import date
from elasticsearch import helpers as es_helpers
from oxyio import settings
from oxyio.log import logger
from oxyio.app import redis_client, es_client
from oxyio.mappings.stats import object_stat_mapping
from .base import Task
class IndexStats(Task):
NAME = 'core/index_stats'
es_index = None
def __init__(self):
self.doc_buffer = []
def start(self):
'''
Read stats from the Redis queue and append to internal buffer.
'''
while True:
es_doc = redis_client.brpop(settings.REDIS_INDEX_QUEUE, 5)
if es_doc:
_, es_doc = es_doc
es_doc = json.loads(es_doc)
self.doc_buffer.append(es_doc)
if len(self.doc_buffer) >= settings.ES_INDEX_BATCH:
self.index()
def stop(self):
# Make sure we index anything left over
self.index()
def index(self):
'''
Actually indexes the current doc buffer in ES.
'''
logger.debug('Indexing {0} stats -> ES...'.format(len(self.doc_buffer)))
# Make ES docs
docs = [{
'_index': self.get_index(),
'_type': 'object_stat',
'_source': es_source
} for es_source in self.doc_buffer]
n_inserted, errors = es_helpers.bulk(es_client, docs)
logger.info('Indexed {0} stats -> ES'.format(n_inserted))
self.emit('indexed_documents', n_inserted)
for error in errors:
logger.error('Error inserting ES doc: {0}'.format(error))
# Reset the buffer
self.doc_buffer = []
def get_index(self):
'''
Returns the current (daily) index, after ensuring it exists.
'''
# Get the index name
today = date.today().strftime('%Y%m%d')
index_name = '{0}_{1}'.format(settings.ES_STATS_INDEX, today)
# Does our cached index check match
if self.es_index == index_name:
return index_name
# Check the index exists
if not es_client.indices.exists(index=index_name):
# Create index, put mappings
es_client.indices.create(index=index_name)
es_client.indices.put_mapping(
index=index_name,
doc_type='object_stat',
body=object_stat_mapping
)
# Update the alias
es_client.indices.put_alias(
name=settings.ES_STATS_INDEX,
index='{0}_*'.format(settings.ES_STATS_INDEX),
)
# Cache & return
self.es_index = index_name
return index_name
|
{
"content_hash": "52266951826085d2fbe3f6cdfd208e9e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 80,
"avg_line_length": 27.377551020408163,
"alnum_prop": 0.555721207603429,
"repo_name": "oxyio/oxyio",
"id": "1cf877a1d80bb6c42076b63452dc979212573131",
"size": "2788",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "oxyio/tasks/index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "57487"
},
{
"name": "HTML",
"bytes": "38296"
},
{
"name": "JavaScript",
"bytes": "26598"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "114316"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
}
|
DASHBOARD = 'project'
# If set to True, this dashboard will be set as the default dashboard.
DEFAULT = True
# A dictionary of exception classes to be added to HORIZON['exceptions'].
ADD_EXCEPTIONS = {}
# A list of applications to be added to INSTALLED_APPS.
ADD_INSTALLED_APPS = ['openstack_dashboard.dashboards.project']
ADD_ANGULAR_MODULES = ['hz.dashboard']
LAUNCH_INST = 'dashboard/launch-instance/'
ADD_JS_FILES = [
'dashboard/dashboard.module.js',
'dashboard/workflow/workflow.js',
'dashboard/cloud-services/cloud-services.js',
LAUNCH_INST + 'launch-instance.js',
LAUNCH_INST + 'launch-instance.model.js',
LAUNCH_INST + 'source/source.js',
LAUNCH_INST + 'flavor/flavor.js',
LAUNCH_INST + 'flavor/select-flavor-table.js',
LAUNCH_INST + 'network/network.js',
LAUNCH_INST + 'security-groups/security-groups.js',
LAUNCH_INST + 'keypair/keypair.js',
LAUNCH_INST + 'configuration/configuration.js',
LAUNCH_INST + 'configuration/load-edit.js',
'dashboard/tech-debt/tech-debt.module.js',
'dashboard/tech-debt/image-form-ctrl.js',
]
ADD_JS_SPEC_FILES = [
'dashboard/dashboard.module.spec.js',
'dashboard/workflow/workflow.spec.js',
'dashboard/cloud-services/cloud-services.spec.js',
LAUNCH_INST + 'launch-instance.spec.js',
LAUNCH_INST + 'launch-instance.model.spec.js',
LAUNCH_INST + 'source/source.spec.js',
LAUNCH_INST + 'flavor/flavor.spec.js',
LAUNCH_INST + 'network/network.spec.js',
LAUNCH_INST + 'security-groups/security-groups.spec.js',
LAUNCH_INST + 'keypair/keypair.spec.js',
LAUNCH_INST + 'configuration/configuration.spec.js',
]
|
{
"content_hash": "32d963427e3cff47c8767b19699f2a6d",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 73,
"avg_line_length": 37.47727272727273,
"alnum_prop": 0.7016373559733171,
"repo_name": "RudoCris/horizon",
"id": "be3200c0b08aba8c05c98326c64d27735fe6389d",
"size": "2331",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/enabled/_10_project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "155162"
},
{
"name": "HTML",
"bytes": "477809"
},
{
"name": "JavaScript",
"bytes": "781655"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4653707"
},
{
"name": "Shell",
"bytes": "19344"
}
],
"symlink_target": ""
}
|
import struct
import time
from packet_utils import *
from tcp import *
from udp import *
from icmp import *
from igmp import *
from packet_base import packet_base
from pox.lib.addresses import IPAddr, IP_ANY, IP_BROADCAST
class ipv4(packet_base):
"IP packet struct"
MIN_LEN = 20
IPv4 = 4
ICMP_PROTOCOL = 1
TCP_PROTOCOL = 6
UDP_PROTOCOL = 17
IGMP_PROTOCOL = 2
DF_FLAG = 0x02
MF_FLAG = 0x01
ip_id = int(time.time())
def __init__(self, raw=None, prev=None, **kw):
packet_base.__init__(self)
self.prev = prev
self.v = 4
self.hl = ipv4.MIN_LEN / 4
self.tos = 0
self.iplen = ipv4.MIN_LEN
ipv4.ip_id = (ipv4.ip_id + 1) & 0xffff
self.id = ipv4.ip_id
self.flags = 0
self.frag = 0
self.ttl = 64
self.protocol = 0
self.csum = 0
self.srcip = IP_ANY
self.dstip = IP_ANY
self.next = b''
if raw is not None:
self.parse(raw)
self._init(kw)
def __str__(self):
s = "[IP+%s %s>%s (cs:%02x v:%s hl:%s l:%s t:%s)]" % (
ipproto_to_str(self.protocol),
self.srcip, self.dstip,
self.csum,
self.v, self.hl, self.iplen, self.ttl)
return s
def parse(self, raw):
assert isinstance(raw, bytes)
self.next = None # In case of unfinished parsing
self.raw = raw
dlen = len(raw)
if dlen < ipv4.MIN_LEN:
self.msg('warning IP packet data too short to parse header: data len %u' % (dlen,))
return
(vhl, self.tos, self.iplen, self.id, self.frag, self.ttl,
self.protocol, self.csum, self.srcip, self.dstip) \
= struct.unpack('!BBHHHBBHII', raw[:ipv4.MIN_LEN])
self.v = vhl >> 4
self.hl = vhl & 0x0f
self.flags = self.frag >> 13
self.frag = self.frag & 0x1fff
self.dstip = IPAddr(self.dstip)
self.srcip = IPAddr(self.srcip)
if self.v != ipv4.IPv4:
self.msg('(ip parse) warning: IP version %u not IPv4' % self.v)
return
if self.hl < 5:
self.msg('(ip parse) warning: IP header length shorter than MIN_LEN (IHL=%u => header len=%u)' \
% (self.hl, 4 * self.hl))
return
if self.iplen < ipv4.MIN_LEN:
self.msg('(ip parse) warning: Invalid IP len %u' % self.iplen)
return
if (self.hl * 4) > self.iplen:
self.msg('(ip parse) warning: IP header longer than IP length including payload (%u vs %u)' \
% (self.hl, self.iplen))
return
if (self.hl * 4) > dlen:
self.msg('(ip parse) warning: IP header is truncated')
return
# At this point, we are reasonably certain that we have an IP
# packet
self.parsed = True
length = self.iplen
if length > dlen:
length = dlen # Clamp to what we've got
if self.protocol == ipv4.UDP_PROTOCOL:
self.next = udp(raw=raw[self.hl*4:length], prev=self)
elif self.protocol == ipv4.TCP_PROTOCOL:
self.next = tcp(raw=raw[self.hl*4:length], prev=self)
elif self.protocol == ipv4.ICMP_PROTOCOL:
self.next = icmp(raw=raw[self.hl*4:length], prev=self)
elif self.protocol == ipv4.IGMP_PROTOCOL:
self.next = igmp(raw=raw[self.hl*4:length], prev=self)
elif dlen < self.iplen:
self.msg('(ip parse) warning IP packet data shorter than IP len: %u < %u' % (dlen, self.iplen))
else:
self.next = raw[self.hl*4:length]
if isinstance(self.next, packet_base) and not self.next.parsed:
self.next = raw[self.hl*4:length]
def checksum(self):
data = struct.pack('!BBHHHBBHII', (self.v << 4) + self.hl, self.tos,
self.iplen, self.id,
(self.flags << 13) | self.frag, self.ttl,
self.protocol, 0, self.srcip.toUnsigned(),
self.dstip.toUnsigned())
return checksum(data, 0)
def hdr(self, payload):
self.iplen = self.hl * 4 + len(payload)
self.csum = self.checksum()
return struct.pack('!BBHHHBBHII', (self.v << 4) + self.hl, self.tos,
self.iplen, self.id,
(self.flags << 13) | self.frag, self.ttl,
self.protocol, self.csum, self.srcip.toUnsigned(),
self.dstip.toUnsigned())
|
{
"content_hash": "7fff67f7b272b40a630fbe6068e9c422",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 108,
"avg_line_length": 32.95070422535211,
"alnum_prop": 0.5216926693737978,
"repo_name": "hsnlab/escape",
"id": "6c47a2e5ee6143410d162c80617f69459cc6c80e",
"size": "6630",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pox/pox/lib/packet/ipv4.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "381"
},
{
"name": "C",
"bytes": "9773701"
},
{
"name": "C++",
"bytes": "1144774"
},
{
"name": "Dockerfile",
"bytes": "4497"
},
{
"name": "HTML",
"bytes": "423218"
},
{
"name": "JavaScript",
"bytes": "9048"
},
{
"name": "Makefile",
"bytes": "121260"
},
{
"name": "Objective-C",
"bytes": "2964"
},
{
"name": "Python",
"bytes": "2856844"
},
{
"name": "Roff",
"bytes": "80820"
},
{
"name": "Shell",
"bytes": "190566"
}
],
"symlink_target": ""
}
|
"""Handle invoking netconan from the command line."""
# Copyright 2018 Intentionet
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
import logging
import sys
import configargparse
from . import __version__
from .anonymize_files import anonymize_files
from .ip_anonymization import IpAnonymizer
def host_bits(x):
"""Argparse type function for --preserve-host-bits."""
# the name of this function is used for error message, apparently.
val = int(x)
if val < 0 or val > 32:
raise argparse.ArgumentError("valid values are [0, 32]")
return val
def _parse_args(argv):
"""Parse arguments from the given list."""
parser = configargparse.ArgParser(
# Replace the default config file help with custom message
# To fix some syntax issues
add_config_file_help=False,
description="""
Args that can start with '--' can also be set in a config file (specified
via -c). If an arg is specified in more than one place, then command line
values override config file values which override defaults. Config file
syntax allows: key=value, flag=true, stuff=[a,b,c] (for more details, see
here https://goo.gl/R74nmi).
""",
)
parser.add_argument(
"--version",
action="version",
version=__version__,
help="Print version number and exit",
)
parser.add_argument(
"-a",
"--anonymize-ips",
action="store_true",
default=False,
help="Anonymize IP addresses",
)
parser.add_argument(
"-c",
"--config",
is_config_file=True,
help="Netconan configuration file with defaults for these CLI parameters",
)
parser.add_argument(
"-d",
"--dump-ip-map",
default=None,
help="Dump IP address anonymization map to specified file",
)
parser.add_argument(
"-i",
"--input",
required=True,
help="Input file or directory containing files to anonymize",
)
parser.add_argument(
"-l",
"--log-level",
default="INFO",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Determines what level of logs to display",
)
parser.add_argument(
"-n",
"--as-numbers",
default=None,
help="List of comma separated AS numbers to anonymize",
)
parser.add_argument(
"-o",
"--output",
required=True,
help="Output file or directory where anonymized files are placed",
)
parser.add_argument(
"-p",
"--anonymize-passwords",
action="store_true",
default=False,
help="Anonymize password and snmp community lines",
)
parser.add_argument(
"-r",
"--reserved-words",
default=None,
help="List of comma separated words that should not be anonymized",
)
parser.add_argument(
"-s",
"--salt",
default=None,
help="Salt for IP and sensitive keyword anonymization",
)
parser.add_argument(
"-u",
"--undo",
action="store_true",
default=False,
help="Undo reversible anonymization (must specify salt)",
)
parser.add_argument(
"-w",
"--sensitive-words",
default=None,
help="List of comma separated keywords to anonymize",
)
parser.add_argument(
"--preserve-prefixes",
default=",".join(IpAnonymizer.DEFAULT_PRESERVED_PREFIXES),
help="List of comma separated IP prefixes to preserve. Specified prefixes are preserved, but the host bits within those prefixes are still anonymized. To preserve prefixes and host bits in specified blocks, use --preserve-addresses instead",
)
parser.add_argument(
"--preserve-addresses",
default=None,
help="List of comma separated IP addresses or networks to preserve. Prefixes and host bits within those networks are preserved. To preserve just prefixes and anonymize host bits, use --preserve-prefixes",
)
parser.add_argument(
"--preserve-private-addresses",
action="store_true",
default=False,
help="Preserve private-use IP addresses. Prefixes and host bits within the private-use IP networks are preserved. To preserve specific addresses or networks, use --preserve-addresses instead. To preserve just prefixes and anonymize host bits, use --preserve-prefixes",
)
parser.add_argument(
"--preserve-host-bits",
type=host_bits,
default=8,
help="Preserve the trailing bits of IP addresses, aka the host bits of a network. Set this value large enough to represent the largest interface network (e.g., 8 for a /24 or 12 for a /20) or NAT pool.",
)
return parser.parse_args(argv)
def main(argv=sys.argv[1:]):
"""Netconan tool entry point."""
args = _parse_args(argv)
if not args.input:
raise ValueError("Input must be specified")
log_level = logging.getLevelName(args.log_level)
logging.basicConfig(format="%(levelname)s %(message)s", level=log_level)
if not args.output:
raise ValueError("Output must be specified")
if args.undo:
if args.anonymize_ips:
raise ValueError(
"Cannot anonymize and undo anonymization, select only one."
)
if args.salt is None:
raise ValueError(
"Salt used for anonymization must be specified in order to undo anonymization."
)
if args.dump_ip_map is not None:
if not args.anonymize_ips:
raise ValueError(
"Can only dump IP address map when anonymizing IP addresses."
)
as_numbers = None
if args.as_numbers is not None:
as_numbers = args.as_numbers.split(",")
reserved_words = None
if args.reserved_words is not None:
reserved_words = args.reserved_words.split(",")
sensitive_words = None
if args.sensitive_words is not None:
sensitive_words = args.sensitive_words.split(",")
preserve_prefixes = None
if args.preserve_prefixes is not None:
preserve_prefixes = args.preserve_prefixes.split(",")
preserve_addresses = None
if args.preserve_addresses is not None:
preserve_addresses = args.preserve_addresses.split(",")
if args.preserve_private_addresses:
addrs = list(IpAnonymizer.RFC_1918_NETWORKS)
# Merge private addresses with explicitly preserved addresses
preserve_addresses = (
addrs if preserve_addresses is None else (preserve_addresses + addrs)
)
if not any(
[
as_numbers,
sensitive_words,
args.anonymize_passwords,
args.anonymize_ips,
args.undo,
]
):
logging.warning(
"No anonymization options turned on, "
"no output file(s) will be generated."
)
else:
anonymize_files(
args.input,
args.output,
args.anonymize_passwords,
args.anonymize_ips,
args.salt,
args.dump_ip_map,
sensitive_words,
args.undo,
as_numbers,
reserved_words,
preserve_prefixes,
preserve_addresses,
preserve_suffix_v4=args.preserve_host_bits,
preserve_suffix_v6=args.preserve_host_bits,
)
if __name__ == "__main__":
main()
|
{
"content_hash": "1a3ac553c770227c1b008f591e09f32f",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 276,
"avg_line_length": 32.626506024096386,
"alnum_prop": 0.6152141802067946,
"repo_name": "intentionet/netconan",
"id": "cc586a40b3e29b09990f25e8c211d296ca72b9e3",
"size": "8124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netconan/netconan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "218794"
},
{
"name": "Starlark",
"bytes": "4250"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.